input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
== 0:
channel = 0
else:
channel = int(channelIndicator[0].strip('[]'))
outputFilename = outputFilename.split('[')[0]
outputFilename = os.path.basename(outputFilename)
dimensions = fields[3].split('+')[0].split('x')
w = int(dimensions[0])
h = int(dimensions[1])
outputText += 'Thumbnail width: {} height: {}'.format(w, h) + "\n"
record = models.ImageThumbnail(
image = image,
width = w,
height = h,
size = sizeString,
channel = channel,
filename = outputFilename
)
record.save()
return constructProcessOutput(outputText, errorText, time.time() - taskStartTime)
def initSourcefind(method, image):
"""
This is a small helper function that parses the standard configuration options common
to all or most of the source finding methods. This function returns a tuple of
information containting:
detectionThresholdMultiplier - the number of standard deviations above background
to consider a valid source.
shouldReturn - a bool, if true the source find method does not need to be run at
all for some reason. For example, it is a calibration frame, or it already has
found the number of sources the user said it should in a previous run.
outputText and errorText - strings to be appended to the current output and error
text streams for the task. These strings contain information on how the other
returned values were determined.
"""
methodDict = {
'sextractor': models.SextractorResult,
'image2xy': models.Image2xyResult,
'daofind': models.DaofindResult,
'starfind': models.StarfindResult
}
shouldReturn = False
outputText = ""
errorText = ""
outputText += "Running {}.\n\n".format(method)
# If this is a calibration image we do not need to run this task.
shouldReturn, retText = checkIfCalibrationImage(image, method, 'skippedCalibration')
outputText += retText
if shouldReturn:
return (1, shouldReturn, outputText, errorText)
# Check to see if we have run this task before and adjust the sensitivity higher or lower
# Start by seeing if we have a previously saved value, if not then load the default.
detectThresholdMultiplier = image.getImageProperty(method + 'Multiplier')
if detectThresholdMultiplier is None:
detectThresholdMultiplier = models.CosmicVariable.getVariable(method + 'Threshold')
outputText += 'Have not run before, setting default multiplier of {} standard deviations.\n'.format(detectThresholdMultiplier)
else:
detectThresholdMultiplier = float(detectThresholdMultiplier)
outputText += "Last detect threshold was {}.\n".format(detectThresholdMultiplier)
# Check to see if there is a recommendation from a method that got the sourcefind "about right".
previousRunNumFound = methodDict[method].objects.filter(image=image).count()
outputText += 'Previous run of this method found {} results.\n'.format(previousRunNumFound)
numExpectedFeedback = image.getImageProperty('userNumExpectedResults', asList=True)
minValid = 0
maxValid = 1e9
aboutRightRange = 0.2
feedbackFound = False
for feedback in numExpectedFeedback:
feedbackFound = True
numExpected, rangeString = feedback.value.split()
numExpected = float(numExpected)
findFactor = previousRunNumFound / numExpected
outputText += 'User feedback indicates that {} results is {}.\n'.format(numExpected, rangeString)
if rangeString == 'aboutRight':
minValid = numExpected*(1-aboutRightRange)
maxValid = numExpected*(1+aboutRightRange)
elif rangeString in ['tooMany', 'wayTooMany']:
maxValid = min(maxValid, numExpected*(1-aboutRightRange))
elif rangeString in ['tooFew', 'wayTooFew']:
minValid = max(minValid, numExpected*(1+aboutRightRange))
#TODO: We should subtract the number of sources found by the previous run which are flagged as hot pixels, etc, before doing this comparison.
if feedbackFound:
outputText += "Valid range of results is between {} and {}.\n".format(minValid, maxValid)
if previousRunNumFound <= 0.1*minValid:
detectThresholdMultiplier -= 0.7 + 0.3*(.1*minValid)/previousRunNumFound
outputText += "Last run was less than 10% of the user submitted range, reducing detection threshold a lot.\n"
elif previousRunNumFound <= minValid:
detectThresholdMultiplier -= 0.25
outputText += "Last run was less than {}% of the user submitted range, reducing detection threshold a little.\n".format(100*(1-aboutRightRange))
elif previousRunNumFound <= maxValid:
outputText += "Last run was within {}% of the user submitted range, not running again.".format(100*aboutRightRange)
shouldReturn = True
elif previousRunNumFound <= 10*maxValid:
detectThresholdMultiplier += 0.25
outputText += "Last run was more than {}% of the user submitted range, increasing detection threshold a little.\n".format(100*(1+aboutRightRange))
else:
detectThresholdMultiplier += 0.7 + 0.3*previousRunNumFound/(10*maxValid)
outputText += "Last run was more than 10 times the user submitted figure, increasing detection threshold a lot.\n"
if detectThresholdMultiplier < 0.1:
outputText += "Not running threshold of {} standard deviations, exiting.\n".format(detectThresholdMultiplier)
shouldReturn = True
# Store the multiplier we decided to use in case we re-run this method in the future.
image.addImageProperty(method + 'Multiplier', str(detectThresholdMultiplier))
return (detectThresholdMultiplier, shouldReturn, outputText, errorText)
def checkIfCalibrationImage(image, propertyKeyToSet, propertyValueToSet):
"""
A simple helper function which returns a tuple whose first entry is True if the image
is a calibration image of some sort and False if it is unknown or a science image.
The second entry in the tuple is the output text to append to the output stream of the
task calling this function.
If the image is a calibration image then then in addition to returning true, the image
will get an image property added to it with the specified key and value. This can be
used to tag the image as having been skipped for processing by the calling routine if
it is a calibration image.
TODO: Allow propertyKeyToSet, etc, to be None and skip creating the image property if so.
"""
outputText = ''
imageType = image.getImageProperty('imageType')
outputText += "Image type is: " + str(imageType) + "\n"
if imageType in ('bias', 'dark', 'flat', 'masterBias', 'masterDark', 'masterFlat'):
outputText += "\n\n\nReturning, do not need to run this task on calibration images (bias, dark, flat, etc)\n"
image.addImageProperty(propertyKeyToSet, propertyValueToSet)
return (True, outputText)
else:
outputText += "\n\n\nNot returning, image is not known to be a calibration image (bias, dark, flat, etc)\n"
return (False, outputText)
@shared_task
def sextractor(filename, processInputId):
taskStartTime = time.time()
# Get the image record
image = models.Image.objects.get(fileRecord__onDiskFileName=filename)
#TODO: Handle multi-extension fits files.
channelInfos = models.ImageChannelInfo.objects.filter(image=image).order_by('index')
detectThresholdMultiplier, shouldReturn, outputText, errorText = initSourcefind('sextractor', image)
if shouldReturn:
return constructProcessOutput(outputText, errorText, time.time() - taskStartTime)
detectThreshold = detectThresholdMultiplier*channelInfos[0].bgStdDev
outputText += 'Final multiplier of {} standard deviations.\n'.format(detectThresholdMultiplier)
outputText += 'Final detect threshold of {} above background.\n'.format(detectThreshold)
#TODO: sextractor can only handle .fit files. Should autoconvert the file to .fit if necessary before running.
#TODO: sextractor has a ton of different modes and options, we should consider running
# it multiple times to detect point sources, then again for extended sources, etc.
# Each of these different settings options could be combined into a single output, or
# they could be independently matched against other detection algorithms.
catfileName = settings.MEDIA_ROOT + filename + ".cat"
proc = subprocess.Popen(['source-extractor', '-CATALOG_NAME', catfileName, settings.MEDIA_ROOT + filename,
'-THRESH_TYPE', 'ABSOLUTE', '-DETECT_THRESH', str(detectThreshold)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=settings.MEDIA_ROOT
)
output, error = proc.communicate()
output = output.decode('utf-8')
error = error.decode('utf-8')
proc.wait()
outputText += output
errorText += error
outputText += '\n ==================== End of process output ====================\n\n'
errorText += '\n ==================== End of process error =====================\n\n'
with open(catfileName, 'r') as catfile:
fieldDict = {}
with transaction.atomic():
models.SextractorResult.objects.filter(image=image).delete()
fwhmValues = []
ellipticityValues = []
sextractorResults = []
for line in catfile:
# Split the line into fields (space separated) and throw out empty fields caused by multiple spaces in a
# row. I.E. do a "combine consecutive delimeters" operation.
fields = line.split()
# Read the comment lines at the top of the file to record what fields are present and in what order.
if line.startswith("#"):
fieldDict[fields[2]] = int(fields[1]) - 1
#For lines that are not comments, use the fieldDict to determine what fields to read and store in the database.
else:
zPos = None #TODO: Add image layer number if this is a data cube, just leaving null for now.
sextractorResult = models.SextractorResult(
image = image,
pixelX = fields[fieldDict['X_IMAGE_DBL']],
pixelY = fields[fieldDict['Y_IMAGE_DBL']],
pixelZ = zPos,
fluxAuto = fields[fieldDict['FLUX_AUTO']],
fluxAutoErr = fields[fieldDict['FLUXERR_AUTO']],
fwhm = fields[fieldDict['FWHM_IMAGE']],
ellipticity = fields[fieldDict['ELLIPTICITY']],
flags = fields[fieldDict['FLAGS']],
boxXMin = fields[fieldDict['XMIN_IMAGE']],
boxYMin = fields[fieldDict['YMIN_IMAGE']],
boxXMax = fields[fieldDict['XMAX_IMAGE']],
boxYMax = fields[fieldDict['YMAX_IMAGE']]
)
sextractorResults.append(sextractorResult)
fwhmValues.append(parseFloat(fields[fieldDict['FWHM_IMAGE']]))
ellipticityValues.append(parseFloat(fields[fieldDict['ELLIPTICITY']]))
"""
fields[fieldDict['NUMBER']]
fields[fieldDict['FLUX_ISO']]
fields[fieldDict['FLUXERR_ISO']]
fields[fieldDict['MAG_ISO']]
fields[fieldDict['MAGERR_ISO']]
fields[fieldDict['MAG_AUTO']]
fields[fieldDict['MAGERR_AUTO']]
fields[fieldDict['FLUX_BEST']]
fields[fieldDict['FLUXERR_BEST']]
fields[fieldDict['MAG_BEST']]
fields[fieldDict['MAGERR_BEST']]
fields[fieldDict['THRESHOLD']]
fields[fieldDict['FLUX_MAX']]
fields[fieldDict['XPEAK_IMAGE']]
fields[fieldDict['YPEAK_IMAGE']]
fields[fieldDict['X_IMAGE']]
fields[fieldDict['Y_IMAGE']]
fields[fieldDict['ISO0']]
fields[fieldDict['ISO1']]
fields[fieldDict['ISO2']]
fields[fieldDict['ISO3']]
fields[fieldDict['ISO4']]
fields[fieldDict['ISO5']]
fields[fieldDict['ISO6']]
fields[fieldDict['ISO7']]
fields[fieldDict['IMAFLAGS_ISO']]
fields[fieldDict['NIMAFLAGS_ISO']]
fields[fieldDict['FLUX_GROWTH']]
fields[fieldDict['FLUX_GROWTHSTEP']]
fields[fieldDict['MAG_GROWTH']]
fields[fieldDict['MAG_GROWTHSTEP']]
fields[fieldDict['FLUX_RADIUS']]
fields[fieldDict['XPSF_IMAGE']]
fields[fieldDict['YPSF_IMAGE']]
fields[fieldDict['FLUX_PSF']]
fields[fieldDict['FLUXERR_PSF']]
fields[fieldDict['MAG_PSF']]
fields[fieldDict['MAGERR_PSF']]
fields[fieldDict['ERRAPSF_IMAGE']]
fields[fieldDict['ERRBPSF_IMAGE']]
fields[fieldDict['FLUX_MODEL']]
fields[fieldDict['FLUXERR_MODEL']]
fields[fieldDict['MAG_MODEL']]
fields[fieldDict['MAGERR_MODEL']]
fields[fieldDict['XMODEL_IMAGE']]
fields[fieldDict['YMODEL_IMAGE']]
fields[fieldDict['FLUX_POINTSOURCE']]
fields[fieldDict['FLUXERR_POINTSOURCE']]
fields[fieldDict['MAG_POINTSOURCE']]
fields[fieldDict['MAGERR_POINTSOURCE']]
"""
models.SextractorResult.objects.bulk_create(sextractorResults)
fwhmMean = numpy.nanmean(fwhmValues)
fwhmMedian = numpy.nanmedian(fwhmValues)
fwhmStdDev = numpy.nanstd(fwhmValues)
ellipticityMean = numpy.nanmean(ellipticityValues)
ellipticityMedian = numpy.nanmedian(ellipticityValues)
ellipticityStdDev = numpy.nanstd(ellipticityValues)
outputText += "\n\nBefore removing hot pixels:\n"
outputText += "FWHM Mean {:.3f} Median {:.3f} StdDev {:.3f}\n".format(fwhmMean, fwhmMedian, fwhmStdDev)
outputText += "Ellipticity Mean {:.3f} Median {:.3f} StdDev {:.3f}\n".format(ellipticityMean, ellipticityMedian, ellipticityStdDev)
#TODO: Recode this section to | |
<filename>solution.py
#!env/bin/python3.7
from copy import deepcopy as copy_deepcopy
import os.path
from time import time
import search
class state:
"""A class used to represent the state of each node in this search problem
...
Attributes
----------
tod : list of strings
A list of string, where each string represents the time of departure of the i-th plane
schedule : list of lists of dictionaries
A list of schedules per plane. The first index corresponds to the plane and the second to the leg, with is a dictionary
remaining : list of dictionaries
A list of the remaining legs, that is, legs not yet assigned
g : float
Value of the cost function
h : float
Value of the heuristic
Methods
-------
__lt__(self, other)
Compares each state through their evaluation function values: f(n)=g(n)+h(n)
"""
def __init__(self, nplanes=None, legs=None, g=0, h=0):
"""
Parameters
----------
nplanes : int, optional
The number of planes (default is None)
legs : list of dictionaries, optional
A list with the existing legs (default is None)
"""
if nplanes:
self.tod = [None for i in range(nplanes)]
self.schedule = [[] for i in range(nplanes)]
else:
self.tod = None
self.schedule = None
if legs:
self.remaining = [leg for leg in legs]
else:
self.remaining = None
self.g = g
self.h = h
def __lt__(self, other):
"""Compares each state through their evaluation function values: f(n)=g(n)+h(n)
Returns
-------
bool
True if the evaluation function of the state in the left is less than the one in the right, or False otherwise
"""
return (self.g + self.h) < (other.g + other.h)
class ASARProblem(search.Problem):
"""A class used to represent the ASAR problem, derived from the abstract class search.Problem (https://github.com/aimacode/aima-python)
...
Attributes
----------
A : dictionary
Dictionary with available airports. The key is the airport code and the value is a dictionary with keys: start and end times
C : dictionary
Dictionary where the keys are the airplanes classes and the values are their rotation times
L : list of dictionaries
List of dictionaries where each dictionary represents a leg.
Each leg has as keys the departure and arrival airports and the available classes (which values correspond to the profits associated)
P : list of dictionaries
List of dictionaries where each dictionary represents an airplane. Each airplane has as keys its name and class
maxprofitall : float
Corresponds to the maximum profit of all legs +1.
This value will be used as a bound to calculate the linear cost with the given profit: cost = maxprofitall - profit
Methods
-------
actions(state)
Returns the actions that can be executed in the given state
result(state, action)
Computes the state that results from executing a given action in the given state
goal_test(state)
Checks if the state is a goal state
path_cost(c, s1, a, s2)
Calculates the cost of a solution path that arrives at state2 from state1 via action a.
Assumes cost c to get up to state1
heuristic(n, state=None)
Computes the heuristic of node n, which encapsulates a given state
load(f)
Loads a problem from a (opened) file object f (the formatting is specified in the Mini-Project statement).
Gets the max profit of each leg. Initializes the initial state of this problem
save(f)
Saves a solution state s to a (opened) file object f (the formatting is specified in the Mini-Project statement).
calculate_profit(s)
Calculates the profit of the provided state (which corresponds to the airplanes schedules)
nextleg_dep_time(leg, idx, dep_time)
Computes the time at which the airplane can start the next leg
formatted_schedule(i, schedule)
Makes a string which represents an airplane schedule, that will be written int the output file
(with the formatting specified in the Mini-Project statement)
"""
def __init__(self):
super().__init__(None)
self.A = self.C = {}
self.L = self.P = []
self.maxprofitall = 0
def actions(self, state):
"""Returns the actions that can be executed in the given
state. The result would typically be a list, but if there are
many actions, consider yielding them one at a time in an
iterator, rather than building them all at once.
Checks done before returning an action to decrease expanded nodes include:
Added leg is compatible with airport opening/closing times
Matching "current leg arrival" and "next leg departure" airports
Plane can make more trips (current airport is not closed)
If added leg is the last of the airplane, must match with the first airport
Before assigning a leg to "empty" airplane, there must be at least two legs left to close the loop
Parameters
----------
state : str
State of node chosen for expansion
Yields
-------
tuple
A tuple containing the index of the airplane to which the leg will
be added, the leg to be added and the new tod of the airplane
"""
for idx, airplane_legs in enumerate(state.schedule):
if not airplane_legs:
if len(state.remaining) == 1: # One leg left and empty airplane, don't add
continue
for next_leg in state.remaining:
dep_time = self.A[next_leg['dep']]['start']
new_tod = self.nextleg_dep_time(next_leg, idx, dep_time)
if new_tod == -1: # Conflict regarding times, don't add
continue
yield (idx, next_leg, new_tod)
else:
if not state.tod[idx]: # Empty string, schedule for this airplane is full
continue
for next_leg in state.remaining:
if next_leg['dep'] != airplane_legs[-1]['arr']:
continue
new_tod = self.nextleg_dep_time(next_leg, idx, state.tod[idx])
if new_tod == -1: # Conflict regarding times, don't add
continue
if new_tod >= self.A[next_leg['arr']]['end']: # Will be the plane's last airport
if airplane_legs[0]['dep'] != next_leg['arr']: # Does not loop back, invalid node
continue
new_tod = ''
yield (idx, next_leg, new_tod)
def result(self, state, action):
"""Computes the state that results from executing a given
action in the given state. The action must be one of
self.actions(state).
Parameters
----------
state : object
action : tuple
Returns
----------
new_state : object
"""
new_state = copy_deepcopy(state)
idx_airplane = action[0]
new_leg = action[1]
new_tod = action[2]
new_state.tod[idx_airplane] = new_tod
new_state.schedule[idx_airplane].append(new_leg)
new_state.remaining.remove(new_leg)
new_state.g = self.path_cost(state.g, state, action, new_state)
new_state.h = self.heuristic(None, new_state)
return new_state
def goal_test(self, state):
"""Checks if the state is a goal state
Returns
-------
bool
True if we are in a goal state, or False otherwise
"""
if not state.remaining:
# There are no remaining legs to add
# We can check the validity of our solution
for plane in state.schedule:
if not plane:
continue
if plane[0]['dep'] != plane[-1]['arr']:
# Departure airport is not the same as the arrival
return False
return True
else:
return False
def path_cost(self, c, s1, a, s2):
"""Calculates the cost of a solution path that arrives at state2 from
state1 via action a, assuming cost c to get up to state1.
Receives a = (index of airplane, leg, ...) e.g. (3, {'dep': 'LPPT', 'arr': ...}, ...)
Goes to the list of airplanes in self and figures out the class of airplane
With the class information goes to the leg to add and figures out the profit
For clarity: self.P[a[0]] = {'airplane': 'CS-TUA', 'class': 'a320'}
Parameters
----------
c : float
a : tuple
s1, s2 : object
Returns
-------
float
"""
return c + self.maxprofitall - a[1][self.P[a[0]]['class']]
def heuristic(self, n, state=None):
"""Computes the heuristic of node n, which encapsulates a given state
Parameters
----------
n : object
state : object
Returns
-------
heurfun : float
"""
if n is None:
curr_state = state
else:
curr_state = n.state
heurfun = 0
for leg in curr_state.remaining:
heurfun += self.maxprofitall - leg['maxprofit']
return heurfun
def load(self, f):
"""Loads a problem from a (opened) file object f (the formatting is specified in the Mini-Project statement).
Gets the max profit of each leg. Initializes the initial state of this problem
Parameters
----------
f : file
"""
self.A, self.C, self.P, self.L = read_input_from_file(f)
self.L = get_maxprofits(self.L, self.C)
self.maxprofitall = max([leg['maxprofit'] for leg in self.L]) + 1
self.initial = state(len(self.P), self.L)
def save(self, f, s):
"""Saves a solution state s to a (opened) file object f (the formatting is specified in the Mini-Project statement).
Parameters
----------
f : file
s : state | |
<gh_stars>0
import numpy as np
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import time
def sub2d(img, value):
N, M = img.shape
result = np.zeros((N,M))
for row in range(N):
for col in range(M):
result[row][col] = img[row][col] - value
return result
def add2d(img, value):
N, M = img.shape
result = np.zeros((N,M))
for row in range(N):
for col in range(M):
result[row][col] = img[row][col] + value
return result
def min1d(arr):
minElement = arr[0]
for i in range(len(arr)):
if (arr[i] < minElement):
minElement = arr[i]
return minElement
def max1d(arr):
maxElement = arr[0]
for i in range(len(arr)):
if (arr[i] > maxElement):
maxElement = arr[i]
return maxElement
def flatten(img):
flat_list = [item for sublist in img for item in sublist]
return flat_list
def min2d(img):
flat_list = flatten(img)
minElement= min1d(flat_list)
return minElement
def max2d(img):
flat_list = flatten(img)
maxElement = max1d(flat_list)
return maxElement
def normalize(img):
'''
normalize pixel intinsity range of an image to [0, 1]
- findMin : loop over 2d array to get min element
- findMax : loop over 2d array to get max element
- sub : element wise subtraction from 2d array
'''
img_min = min2d(img)
img_max = max2d(img)
return (sub2d(img, img_min)) / (img_max - img_min)
def get_histogram(image, bins):
image_flattened = flatten(image)
# array with size of bins, set to zeros
histogram = np.zeros(bins)
# loop through pixels and sum up counts of pixels
for pixel in image_flattened:
histogram[pixel] += 1
return histogram
def cumsum(hist):
hist = iter(hist)
b = [next(hist)]
for i in hist:
b.append(b[-1] + i)
return np.array(b)
def cumSumNorm(hist):
cs = cumsum(hist)
# re-normalize cumsum values to be between 0-255
nj = (cs - min1d(cs)) * 255
N = max1d(cs) - min1d(cs)
# re-normalize the cdf
cs = nj / N
# cast it back to uint8 since we can't use floating point values in images
cs_norm = cs.astype('uint8')
return cs_norm
def histEqualize(image):
flatten_image = flatten(image)
hist = get_histogram(image, 256)
# get the value from cumulative sum for every index in flat, and set that as img_new
cs_norm = cumSumNorm(hist)
equalized_image = cs_norm[flatten_image]
return equalized_image
# seyam
def conv_step(img_slice , kernel, filter_type):
'''
Args:
img_slice: slice of image that will convolved with image
kernel : filter / mask
filter_type : type of filter
Returns:
convolved image
'''
if img_slice.shape != kernel.shape:
print("the two shapes are different\n",img_slice, kernel)
else :
nrows,ncols = img_slice.shape
result = np.zeros((nrows,ncols))
for row in range(nrows):
for col in range(ncols):
result[row][col] = img_slice[row][col] * kernel[row][col]
if filter_type == "average":
sum_img = img_sum(result)
return sum_img/(kernel.shape[0]*kernel.shape[1])
if filter_type == "median":
med_img = img_median(result)
return med_img
if filter_type == "gaussian":
guass_img = img_sum(result)
return guass_img
def img_flatten(img):
'''Args:
img : matrix
Return:
flattened_img : unrolled version of img matrix
'''
nrows, ncols = img.shape
flattend_img = []
for row in range(nrows):
for col in range(ncols):
flattend_img.append(img[row][col])
return flattend_img
def img_sum(img):
'''
Args:
img (matrix)
Return:
img_sum : value of summation
'''
nrows, ncols = img.shape
img_sum = 0
for row in range(nrows):
for col in range(ncols):
img_sum += img[row][col]
return img_sum
def guassian_kernel(kernel_size, sigma):
'''
Args:
kernel_size: tuple of size of kernel
sigma : the standard devation (more sigma -- more bluring)
Return:
kernel: auto generated kernel from normal distribution
'''
size,_ = kernel_size
x0 = y0 = size // 2
kernel = np.zeros((size, size))
for i in range(size):
for j in range(size):
kernel[i, j] = np.exp(-(0.5/(sigma*sigma)) * (np.square(i-x0) +
np.square(j-y0))) / np.sqrt(2*(22/7)*sigma*sigma)
kernel = kernel/img_sum(kernel)
return kernel
def img_median(img):
''' Args:
img: matrix
Return:
value : median value
'''
img = img_flatten(img) # to unroll the matrix into list
sorted_image = sorted(img)
img_size = len(sorted_image)
if img_size % 2 == 0 :
value = (sorted_image[img_size/2 - 1] + sorted_image[img_size/2])/2
return value
if len(sorted_image)% 2 != 0 :
value = sorted_image[img_size//2]
return value
def convolution(img, filter_type , filter_size, sigma = 1):
'''
Args:
img : the image that will be filtered (nd.array of int8)
filter_type : just one of theree filters (Average , Guassian, Median filter)
filter_size : the size of filter (tuple)
Return:
filtered_img : image after filteration (convolution)
'''
nrows, ncols = img.shape
filter_height, filter_width = filter_size
filter_img_width = ncols - filter_width + 1
filter_img_height = nrows - filter_height + 1
print(filter_img_height, filter_img_width)
filtered_img = np.zeros((filter_img_height,filter_img_width)) # this is valid filter i think
for col in range(filter_img_width):
horiz_start = col
horiz_end = col + filter_width
for row in range(filter_img_height):
vert_start = row
vert_end = row + filter_height
img_slice = img[horiz_start:horiz_end, vert_start:vert_end]
if filter_type in ["average","median"] :
kernel = np.uint8(np.ones(filter_size))
if filter_type == "gaussian":
kernel = guassian_kernel(filter_size, sigma)
update_val = conv_step(img_slice, kernel, filter_type)
filtered_img[col][row] = update_val
return np.uint8(filtered_img)
# Galloul
def compare_after_noise(image, function):
'''
To compare between original image and the output image of a given function.
Args:
image (np.array): the original image before any change.
function (fun): the function that returns the second image for comparison after some changes.
'''
fig, axs = plt.subplots(1, 2, figsize=(14, 7))
axs[0].imshow(image, cmap="gray");
axs[1].imshow(function(image.copy()), cmap="gray");
def add_uniform_noise(image, offset=0):
'''
Add uniform noise to a given image.
Args:
image (np.array): the image to add noise to.
offset (int, default=0): how much noise to add to every pixel.
if not specified by thee user, it's randomly selected
Returns:
noised_image (np.array): the image after uniform noise was added.
'''
uniform_noise = np.ones_like(image, dtype=np.uint8)
if (offset == 0): # Generate random uniform noise
offset = np.random.randint(0, 256);
uniform_noise *= offset
print("Uniform noise value is:", offset)
noised_image = image + uniform_noise
return noised_image
def add_saltNpepper_noise(image, Ws=0.1, Wp=0.1):
'''
Add salt&pepper noise to a given image and specify how much weight for every type.
Args:
image (np.array): the image to add noise to.
Ws (float, default=0.1): the salt weight in the produced image.
Wp (float, default=0.1): the pepper weight in the produced image.
Returns:
image (np.array): the image after salt&pepper noise was added.
'''
w, h = image.shape[0], image.shape[1]
no_salted_pixels = int(w * h * Ws);
no_peppered_pixels = int(w * h * Wp);
for i in range(no_salted_pixels):
image[np.random.randint(0, w), np.random.randint(0, h)] = 255;
for i in range(no_peppered_pixels):
image[np.random.randint(0, w), np.random.randint(0, h)] = 0;
print(f" Adding noise with salt weight: {Ws} and pepper weight:{Wp}")
return image
def add_gaussian_noise(image, mean=0, std=10):
'''
Add gaussian noise to a given image and specify the mean and std of the noise values driven from the guassian distribution.
Args:
image (np.array): the image to add noise to.
mean (float, default=0): the mean of the gaussian distribution which the noise values are taken from.
std (float, default=10): the standard deviation of the gaussian distribution which the noise values are taken from.
Returns:
noised_image (np.array): the image after gaussian noise was added.
'''
np.random.seed(int(time.time()))
gaussian_noise = np.random.normal(mean, std, size=image.shape)
print(f"Adding gaussian noise with mean={mean} and std={std}")
noised_image = image + gaussian_noise
return noised_image
def gradient_detector(image, detector_type='sobel'):
'''
Apply a gradient detector to a given image to extract edges in it.
Args:
image (np.array): the image to detect edges in.
detector_type (str, default='sobel', options={'sobel', 'prewitt', 'roberts'}):
the type of kernel applied to extract edges.
Returns:
gradients_mag (np.array): the gradient magnitude for every pixel after applying the selected edge kernel,
given by (sqrt(Ix^2 + Iy^2))
gradients_angle (np.array): the gradient direction for every pixel after applying the selected edge kernel,
calculated in rads.
'''
if detector_type == 'sobel':
kernel_x = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
kernel_y = kernel_x.T * -1 # equivelent to -> np.array([[1, 2, 1],
# [0, 0, 0],
# [-1, -2, -1]])
elif detector_type == 'prewitt':
kernel_x = np.array([[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]])
kernel_y = kernel_x.T * -1 # equivelent to -> np.array([[1, 1, 1],
# [0, 0, 0],
# [-1, -1, -1]])
elif detector_type == 'roberts':
kernel_x = np.array([[0, 1],
[-1, 0]])
kernel_y = np.array([[1, 0],
[0, -1]])
else:
print("Unsupported detector, please choose either 'sobel', 'roberts', or 'prewitt' ")
return None
image_h, image_w = image.shape[0], image.shape[1]
kernel_h, kernel_w = kernel_x.shape
h, w = kernel_h // 2, kernel_w // 2
gradients_x = np.zeros_like(image, dtype=np.uint8)
gradients_y = np.zeros_like(image, dtype=np.uint8)
gradients_mag = np.zeros_like(image, dtype=np.uint8)
gradients_angle = np.zeros_like(image, dtype=np.uint8)
for i in range(h, image_h - h):
for j in range(w, image_w - w):
conv_sum_x = 0
conv_sum_y = 0
for m in range(kernel_h):
for n in range(kernel_w):
conv_sum_x += kernel_x[m][n] * image[i - h + m][j - w + n]
conv_sum_y += kernel_y[m][n] * image[i - h + m][j - w + n]
gradients_x[i][j] = conv_sum_x
gradients_y[i][j] = conv_sum_y
gradients_mag[i][j] = (conv_sum_x ** 2 + conv_sum_y ** 2) ** 0.5
gradients_angle = np.arctan2(gradients_y, gradients_x)
return gradients_mag, gradients_angle
def non_max_suppression(gradient_mag, gradient_direction_rad):
'''
Apply non-maximum suppression for the gradients magnitude of an image using its gradients direction
Args:
gradients_mag (np.array): the gradient magnitude at every pixel, given by (sqrt(Ix^2 + Iy^2))
gradient_direction_rad (np.array): the gradient direction at every pixel, calculated in rads.
Returns:
suppressed_img (np.array): the new image after | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: <NAME>, <NAME>
#
from __future__ import print_function
__version__ = '15.1'
__title__ = 'Dependency/Version Check Utility'
__mod__ = 'hp-check'
__doc__ = """Checks dependency versions,permissions of HPLIP. (Run as 'python ./check.py' from the HPLIP tarball before installation.)"""
# Std Lib
import sys
import os
import getopt
import re
from base.sixext import PY3, to_string_utf8
from base.sixext import to_string_utf8
# Local
from base.g import *
from base import utils, tui, queues, smart_install
from installer.core_install import *
from prnt import cups
device_avail = False
try:
from base import device, pml
# This can fail due to hpmudext not being present
except ImportError:
log.debug("Device library is not avail.")
else:
device_avail = True
################ Global variables ############
USAGE = [(__doc__, "", "name", True),
("Usage: %s [OPTIONS]" % __mod__, "", "summary", True),
utils.USAGE_OPTIONS,
("Compile-time check:", "-c or --compile", "option", False),
("Run-time check:", "-r or --run or --runtime", "option", False),
("Compile and run-time checks:", "-b or --both (default)", "option", False),
utils.USAGE_LOGGING1, utils.USAGE_LOGGING2, utils.USAGE_LOGGING3,
utils.USAGE_LOGGING_PLAIN,
utils.USAGE_HELP,
utils.USAGE_NOTES,
("1. For checking for the proper build environment for the HPLIP supplied tarball (.tar.gz or .run),", "", "note", False),
("use the --compile or --both switches.", "", "note", False),
("2. For checking for the proper runtime environment for a distro supplied package (.deb, .rpm, etc),", "", "note", False),
("use the --runtime switch.", "", "note", False),
]
Ver_Func_Pat = re.compile('''FUNC#(.*)''')
IS_LIBUSB01_ENABLED = 'no'
############ Functions #########
# Usage function
def usage(typ='text'):
if typ == 'text':
utils.log_title(__title__, __version__)
utils.format_text(USAGE, typ, __title__, __mod__, __version__)
sys.exit(0)
# Displays the the hp-check usage information.
def show_title():
utils.log_title(__title__, __version__)
log.info(log.bold("Note: hp-check can be run in three modes:"))
for l in tui.format_paragraph("1. Compile-time check mode (-c or --compile): Use this mode before compiling the HPLIP supplied tarball (.tar.gz or .run) to determine if the proper dependencies are installed to successfully compile HPLIP."):
log.info(l)
for l in tui.format_paragraph("2. Run-time check mode (-r or --run): Use this mode to determine if a distro supplied package (.deb, .rpm, etc) or an already built HPLIP supplied tarball has the proper dependencies installed to successfully run."):
log.info(l)
for l in tui.format_paragraph("3. Both compile- and run-time check mode (-b or --both) (Default): This mode will check both of the above cases (both compile- and run-time dependencies)."):
log.info(l)
log.info()
for l in tui.format_paragraph("Check types:"):
log.info(l)
for l in tui.format_paragraph("a. EXTERNALDEP - External Dependencies"):
log.info(l)
for l in tui.format_paragraph("b. GENERALDEP - General Dependencies (required both at compile and run time)"):
log.info(l)
for l in tui.format_paragraph("c. COMPILEDEP - Compile time Dependencies"):
log.info(l)
for l in tui.format_paragraph("d. [All are run-time checks]"):
log.info(l)
for l in tui.format_paragraph("PYEXT\nSCANCONF\nQUEUES\nPERMISSION"):
log.info(l)
log.info()
log.info("Status Types:")
log.info(" OK")
log.info(" MISSING - Missing Dependency or Permission or Plug-in")
log.info(" INCOMPAT - Incompatible dependency-version or Plugin-version")
log.info()
# Status_Type function. --> Returns the package installed status indformation
def Status_Type(Installedsts, min_ver,Installed_ver):
if Installedsts is True or Installedsts != 0:
if min_ver == '-' or check_version(Installed_ver,min_ver):
return "OK"
else:
return "INCOMPAT"
else:
return "MISSING"
# get_comment function --> Returns the 'comments' corresponding to the function.
def get_comment(package, Inst_status, installed_ver):
comment = "-"
if package == 'pyqt' or package == 'pyqt4':
if Inst_status == 'OK':
if not check_version(installed_ver, '2.3') and check_version(installed_ver, '2.2'):
comment = "Fax is not supported if version is lessthan 2.3"
elif not check_version(installed_ver, '2.2'):
comment = "Python Programming is not supported if version is lessthan 2.2"
elif package == 'hpaio':
if Inst_status == 'OK':
comment = "'hpaio found in /etc/sane.d/dll.conf'"
else:
comment = "'hpaio not found in /etc/sane.d/dll.conf. hpaio needs to be added in this file.'"
elif package == 'cupsext' or package == 'pcardext' or package == 'hpmudext':
if Inst_status != 'OK':
comment = "'Not Found or Failed to load, Please reinstall HPLIP'"
elif package =='cups':
if Inst_status != 'OK':
comment = "'CUPS may not be installed or not running'"
else:
comment = "'CUPS Scheduler is running'"
elif package == 'libusb' and IS_LIBUSB01_ENABLED == "yes":
if Inst_status != 'OK':
comment = "'libusb-1.0 needs to be installed'"
elif package == 'dbus':
if Inst_status != 'OK':
comment = "'DBUS may not be installed or not running'"
else:
comment = "-"
else:
if Inst_status != 'OK':
comment = "'%s needs to be installed'"%package
return comment
########## Classes ###########
#DependenciesCheck class derived from CoreInstall
class DependenciesCheck(object):
def __init__(self, mode=MODE_CHECK, ui_mode=INTERACTIVE_MODE, ui_toolkit='qt4'):
# CoreInstall.__init__(self,mode,ui_mode,ui_toolkit)
self.num_errors = 0
self.num_warns = 0
self.core = CoreInstall(mode, ui_mode, ui_toolkit)
# self.missing_user_grps = ''
self.ui_toolkit = ui_toolkit
# self.disable_selinux = False
self.req_deps_to_be_installed = []
self.opt_deps_to_be_installed =[]
self.cmds_to_be_run = []
self.comm_error_devices = {}
self.plugin_status = ''
self.smart_install_devices = []
self.user_grps_cmd = ''
def __update_deps_info(self, sup_dist_vers, d, deps_info):
if d == 'cups-ddk' and self.cups_ddk_not_req == True:
return
elif self.ui_toolkit != 'qt5' and self.ui_toolkit != 'qt4' and self.ui_toolkit != 'qt3' and d == 'pyqt':
return
elif d == 'pyqt' and self.ui_toolkit == 'qt5':
return
elif d == 'pyqt' and self.ui_toolkit == 'qt4':
return
elif d == 'pyqt4' and self.ui_toolkit == 'qt3':
return
elif d == 'hpaio' and not self.scanning_enabled:
return
elif self.core.distro =="rhel" and "5." in self.distro_version:
if d in ['dbus','python-devel','python-dbus','pyqt4-dbus','libnetsnmp-devel','gcc','make','reportlab','policykit','sane-devel','cups-ddk']:
return
if deps_info[6] is None:
installed_ver = '-'
elif Ver_Func_Pat.search(deps_info[6]):
if deps_info[6] in self.core.version_func:
installed_ver = self.core.version_func[deps_info[6]]()
else:
installed_ver = '-'
else:
installed_ver = get_version(deps_info[6])
Status = Status_Type(deps_info[3](),deps_info[5],installed_ver)
comment = get_comment(d, Status, installed_ver)
packages_to_install, commands=[],[]
if self.core.is_auto_installer_support():
packages_to_install, commands = self.core.get_dependency_data(d)
if not packages_to_install and d == 'hpaio':
packages_to_install.append(d)
else:
packages_to_install, commands = self.core.get_dependency_data(d,sup_dist_vers)
if not packages_to_install and d == 'hpaio':
packages_to_install.append(d)
if deps_info[0]:
package_type = "REQUIRED"
else:
package_type = "OPTIONAL"
if d == 'cups' and ((installed_ver == '-') or check_version(installed_ver,'1.4')):
self.cups_ddk_not_req = True
log.debug("cups -ddk not required as cups version [%s] is => 1.4 "%installed_ver)
if d == 'hpmudext' and Status == 'OK':
self.hpmudext_avail = True
if Status == 'OK':
log.info(" %-20s %-60s %-15s %-15s %-15s %-10s %s" %(d,deps_info[2], package_type,deps_info[5],installed_ver,Status,comment))
else:
log.info(log.red(" error: %-13s %-60s %-15s %-15s %-15s %-10s %s" %(d,deps_info[2], package_type,deps_info[5],installed_ver,Status,comment)))
self.num_errors += 1
for cmd in commands:
if cmd:
self.cmds_to_be_run.append(cmd)
if package_type == "OPTIONAL":
for pkg in packages_to_install:
if pkg:
self.opt_deps_to_be_installed.append(pkg)
else:
for pkg in packages_to_install:
if pkg:
self.req_deps_to_be_installed.append(pkg)
def get_required_deps(self):
return self.req_deps_to_be_installed
def get_optional_deps(self):
return self.opt_deps_to_be_installed
def get_cmd_to_run(self):
return self.cmds_to_be_run
# def get_disable_selinux_status(self):
# return self.disable_selinux
def get_communication_error_devs(self):
return self.comm_error_devices
# def get_missing_user_grps(self):
# return self.missing_user_grps
def get_user_grp_cmd(self):
return self.user_grps_cmd
def get_plugin_status(self):
return self.plugin_status
def get_smart_install_devices(self):
return self.smart_install_devices
def validate(self,time_flag=DEPENDENCY_RUN_AND_COMPILE_TIME, is_quiet_mode= False):
############ Variables #######################
self.cups_ddk_not_req = False
self.hpmudext_avail = False
self.ui_toolkit = sys_conf.get('configure','ui-toolkit')
org_log_location = log.get_where()
if is_quiet_mode:
log.set_where(log.LOG_TO_FILE)
IS_LIBUSB01_ENABLED = sys_conf.get('configure', 'libusb01-build', 'no')
vrs =self.core.get_distro_data('versions_list')
supported_distro_vrs= self.core.distro_version
if self.core.distro_version not in vrs and len(vrs):
supported_distro_vrs= vrs[len(vrs)-1]
log.warn(log.bold("%s-%s version is not supported. Using %s-%s versions dependencies to verify and install..." \
%(self.core.distro, self.core.distro_version, self.core.distro, supported_distro_vrs)))
tui.header("SYSTEM INFO")
Sts, Kernel_info =utils.run("uname -r -v -o")
Sts, Host_info =utils.run("uname -n")
Sts, Proc_info =utils.run("uname -r -v -o")
log.info(" Kernel: %s Host: %s Proc: %s Distribution: %s %s"\
%(Kernel_info,Host_info,Proc_info,self.core.distro, self.core.distro_version))
log.info(" Bitness: %s bit\n"%utils.getBitness())
tui.header("HPLIP CONFIGURATION")
v = sys_conf.get('hplip', 'version')
if v:
home = sys_conf.get('dirs', 'home')
log.info("HPLIP-Version: HPLIP %s" %v)
log.info("HPLIP-Home: %s" %home)
if self.core.is_auto_installer_support():
log.info("HPLIP-Installation: Auto installation is supported for %s distro %s version " %(self.core.distro_name, self.core.distro_version))
else:
log.warn("HPLIP-Installation: Auto installation is not supported for %s distro %s version " %(self.core.distro, self.core.distro_version))
log.info()
log.info(log.bold("Current contents of '/etc/hp/hplip.conf' file:"))
try:
output = open('/etc/hp/hplip.conf', 'r').read()
except (IOError, OSError) as e:
log.error("Could not access file: %s. Check HPLIP installation." % e.strerror)
self.num_errors += 1
else:
log.info(output)
log.info()
log.info(log.bold("Current contents of '/var/lib/hp/hplip.state' file:"))
try:
| |
"""
pyarchive.submission
A Python library which provides an interface for uploading files to the
Internet Archive.
copyright 2004-2006, Creative Commons, <NAME>
"""
__id__ = "$Id: submission.py 640 2006-06-23 02:34:35Z nyergler $"
__version__ = "$Revision: 640 $"
__copyright__ = '(c) 2004, Creative Commons, <NAME>'
__license__ = 'licensed under the GNU GPL2'
import cStringIO as StringIO
import cb_ftp
import httplib
import socket
import urllib
import urllib2
import xml.parsers.expat
import xml.sax.saxutils
import elementtree.ElementTree as etree
import os.path
import string
import types
import codecs
import time
from pyarchive.exceptions import MissingParameterException
from pyarchive.exceptions import SubmissionError
import pyarchive.utils
import pyarchive.identifier
import pyarchive.const
import exceptions
MAX_RETRY = 10
class UploadApplication(object):
"""A class which wraps the relevant information about the
uploading application."""
def __init__(self, application_name, application_version):
self.__name = application_name
self.__version = application_version
def __getApplication(self):
return self.__name
application = property(__getApplication)
def __getVersion(self):
return self.__version
version = property(__getVersion)
def __user_agent(self):
"""Returns a user-agent string for this application."""
return "%s %s" % (self.application, self.version)
user_agent = property(__user_agent)
class ArchiveItem:
"""
<metadata>
<collection>opensource_movies</collection>
<mediatype>movies</mediatype>
<title>My Home Movie</title>
<runtime>2:30</runtime>
<director><NAME></director>
</metadata>
"""
# ***
# old constructor signature:
#
#def __init__(self, uploader, identifier, collection, mediatype,
# title, runtime=None, adder=None, license=None):
# ***
def __init__(self, uploader, license=None):
"""Initialize the submision; uploader should be an instance of
UploadApplication"""
self.files = []
self.uploader = uploader
self.__identifier = None
self.collection = None
self.mediatype = None
self.title = None
self.metadata = {}
self.metadata['licenseurl'] = license
self.archive_url = None
def __setitem__(self, key, value):
if key == 'subjects':
subjects = [n.strip() for n in value.split(',')]
self.metadata['subject'] = subjects
else:
self.metadata[key] = value
def __getitem__(self, key):
return self.metadata[key]
def __getIdentifier(self):
"""Return the current IA identifier for the submission, or
None if an identifier has not been successfully set."""
return self.__identifier
def __setIdentifier(self, identifier):
"""Check if the identifier is available by calling create.
If it is, store the FTP information and return True. If the
identifier is not available or does not meet standards, throw
an exception."""
if pyarchive.identifier.conforms(identifier) and \
pyarchive.identifier.available(identifier):
self.__identifier = identifier
return True
raise Exception()
identifier = property(__getIdentifier, __setIdentifier)
def addFile(self, filename, source, format=None, claim=None):
self.files.append(ArchiveFile(filename, source, format, claim))
# set the running time to defaults
if 'runtime' in self.metadata:
self.files[-1].runtime = self.metadata['runtime']
# return the added file object
return self.files[-1]
def metaxml(self, username=None):
"""Generates _meta.xml to use in submission;
returns a file-like object."""
# define a convenience handle to XML escape routine
xe = xml.sax.saxutils.escape
meta_out = StringIO.StringIO()
result = codecs.getwriter('UTF-8')(meta_out)
result.write('<metadata>')
# write the required keys
result.write(u"""
<identifier>%s</identifier>
<title>%s</title>
<collection>%s</collection>
<mediatype>%s</mediatype>
<resource>%s</resource>
<upload_application appid="%s" version="%s" />
""" % (self.identifier,
xe(self.title),
self.collection,
self.mediatype,
self.mediatype,
self.uploader.application,
self.uploader.version) )
if username is not None:
result.write(u"<uploader>%s</uploader>\n" % username)
# write any additional metadata
for key in self.metadata:
if self.metadata[key] is not None:
value = self.metadata[key]
# check if value is a list
if type(value) in [types.ListType, types.TupleType]:
# this is a sequence
for n in value:
result.write(u'<%s>%s</%s>\n' % (
key,
xe(str(n)),
key)
)
else:
result.write(u'<%s>%s</%s>\n' % (
key,
xe(str(value)),
key) )
result.write(u'</metadata>\n')
result.seek(0)
meta_out.seek(0)
return meta_out
def filesxml(self):
"""Generates _files.xml to use in submission;
returns a file-like object."""
result = StringIO.StringIO()
result.write('<files>\n')
for archivefile in self.files:
result.write(archivefile.fileNode())
result.write('</files>\n')
result.seek(0)
return result
def sanityCheck(self):
"""Perform sanity checks before submitting to archive.org"""
# check for required fields
if self.identifier is None:
raise MissingParameterException("No identifier specified.")
if self.collection is None:
raise MissingParameterException("No collection specified.")
if self.mediatype is None:
raise MissingParameterException("No mediatype specified.")
if self.metadata['licenseurl'] is None:
raise MissingParameterException("No licenseurl specified.")
# check that fields were specified
if len(self.files) < 1:
raise MissingParameterException("No files selected.")
# perform sanity checks for each file
for archivefile in self.files:
archivefile.sanityCheck()
def createSubmission(self, username, identifier):
"""Create a new submission at archive.org.
If successful returns a tuple containing (server, path)."""
retry_count = 0
new_url = "/create.php"
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
"User-Agent": self.uploader.user_agent}
params = urllib.urlencode({'xml':1,
'user':username,
'identifier':identifier}
)
conn = httplib.HTTPConnection('www.archive.org')
conn.request('POST', new_url, params, headers)
while retry_count < MAX_RETRY:
try:
resp = conn.getresponse()
response = resp.read()
# successfully read
break
except (socket.error, httplib.HTTPException), e:
# increment the retry count
retry_count = retry_count + 1
# short delay to prevent hammering server...
time.sleep(1)
# make sure we were successful
if retry_count == MAX_RETRY:
# unsuccessful
raise exceptions.CommunicationsError(
"Unable to create submission.")
# parse the response
try:
result = etree.fromstring(response)
except xml.parsers.expat.ExpatError, e:
# XML format error occurred... raise our own Exception
raise SubmissionError("Invalid response format.", response)
# result = etree.fromstring(response).getroot()
if result.tag != 'result':
raise SubmissionError("Unknown response format: %s" %
etree.tostring(result))
if result.attrib['type'] == "success":
url = result.find('url').text
print url
return url.split('/')
else:
# some error occured; throw an exception with the message
raise Exception(result.find('message').text)
def completeSubmission(self, username):
"""Complete the submission at archive.org; return True if successful,
otherwise raise an exception."""
retry_count = 0
# call the import url, check the return result
importurl = "http://www.archive.org/checkin.php?" \
"xml=1&identifier=%s&user=%s" % (self.identifier, username)
# attempt to complete the submission
while retry_count < MAX_RETRY:
try:
response = etree.parse(urllib2.urlopen(importurl))
break
except (socket.error, httplib.HTTPException), e:
# increment the retry count
retry_count = retry_count + 1
# short delay to prevent hammering the server
time.sleep(1)
# make sure we were successful
if retry_count == MAX_RETRY:
# unsuccessful
raise exceptions.CommunicationsError(
"Unable to complete submission.")
# our response should be encapsulated in a <result> tag
result = response.getroot()
if result.tag != 'result':
raise SubmissionError("Unknown response format: %s" %
etree.tostring(result))
# check the response status
result_type = result.attrib['type']
if result_type == 'success':
# successfully completed
return True
else:
# an error occured; raise an exception
raise SubmissionError(result.find('message').text)
def submit(self, username, password, server=None, callback=None):
"""Submit the files to archive.org"""
# set the adder (if necessary)
if self.metadata.get('adder', None) is None:
self.metadata['adder'] = username
# make sure we're ready to submit
self.sanityCheck()
# reset the status
callback.reset(steps=10)
# create the submission on the server
ftp_server, ftp_path = self.createSubmission(username, self.identifier)
# connect to the FTP server
callback.increment(status='connecting to archive.org...')
ftp = cb_ftp.FTP(ftp_server)
ftp.login(username, password)
ftp.cwd(ftp_path)
# upload the XML files
callback.increment(status='uploading metadata...')
ftp.storlines("STOR %s_meta.xml" % self.identifier,
self.metaxml(username))
ftp.storlines("STOR %s_files.xml" % self.identifier,
self.filesxml())
# upload each file
callback.increment(status='uploading files...')
for archivefile in self.files:
# determine the local path name and switch directories
localpath, fname = os.path.split(archivefile.filename)
os.chdir(localpath)
# reset the gauge for this file
callback.reset(filename=archivefile.filename)
ftp.storbinary("STOR %s" % archivefile.archiveFilename(),
file(fname, 'rb'), callback=callback)
ftp.quit()
# complete the submission
callback.increment(status='completing upload...')
if self.completeSubmission(username):
self.archive_url = pyarchive.identifier.verify_url(self.identifier)
callback.finish()
return self.archive_url
class ArchiveFile:
def __init__(self, filename, source = None, format = None, claim = None):
# make sure the file exists
if not(os.path.exists(filename)):
# can not find the file; raise an exception
raise IOError
# set object properties from suppplied parameters
self.filename = filename
self.runtime = None
self.source = source
self.format = format
self.__claim = claim
if self.format is None:
self.__detectFormat()
def __detectFormat(self):
info = pyarchive.utils.getFileInfo(os.path.split(self.filename)[1],
self.filename)
bitrate = info[2]
if bitrate is not None:
if bitrate[1]:
self.format = pyarchive.const.MP3['VBR']
else:
try:
self.format = pyarchive.const.MP3[bitrate[0]]
except KeyError, e:
self.format = pyarchive.const.MP3['VBR']
def fileNode(self):
"""Generates the XML to represent this file in files.xml."""
result = '<file name="%s" source="%s">\n' % (
self.archiveFilename(), self.source)
if self.runtime is not None:
result = result + '<runtime>%s</runtime>\n' % self.runtime
# removing metadata dependency for stand-alone-ish-ness
#if self.__claim is None:
# try:
# self.__claim = metadata(self.filename).getClaim()
# except NotImplementedError, e:
# pass
if self.__claim:
result = result + '<license>%s</license>\n' % \
xml.sax.saxutils.escape(self.__claim)
result = result + '<format>%s</format>\n</file>\n' % \
xml.sax.saxutils.escape(self.format)
return result
def sanityCheck(self):
"""Perform simple sanity checks before uploading."""
# make sure the file exists
if not(os.path.exists(self.filename)):
# can not find the file; raise an exception
raise IOError
# ensure necessary parameters have been supplied
if None in (self.filename, self.source, self.format):
raise MissingParameterException
def archiveFilename(self):
localpath, fname = os.path.split(self.filename)
fname = fname.replace(' ', '_')
chars = [n for n in fname if n in
(string.ascii_letters + string.digits + '._')]
result = "".join(chars)
if result[0] | |
double > const *
"""
return _simbody.Transform_p(self)
def setP(self, p):
"""
setP(Transform self, Vec3 p) -> Transform
Parameters
----------
p: SimTK::Vec< 3,double > const &
"""
return _simbody.Transform_setP(self, p)
def pInv(self):
"""
pInv(Transform self) -> Vec3
Parameters
----------
self: SimTK::Transform_< double > const *
"""
return _simbody.Transform_pInv(self)
def setPInv(self, p_FB):
"""
setPInv(Transform self, Vec3 p_FB) -> Transform
Parameters
----------
p_FB: SimTK::Vec< 3,double > const &
"""
return _simbody.Transform_setPInv(self, p_FB)
def asMat34(self):
"""
asMat34(Transform self) -> SimTK::Mat< 3,4,double > const &
Parameters
----------
self: SimTK::Transform_< double > const *
"""
return _simbody.Transform_asMat34(self)
def toMat34(self):
"""
toMat34(Transform self) -> SimTK::Mat< 3,4,double >
Parameters
----------
self: SimTK::Transform_< double > const *
"""
return _simbody.Transform_toMat34(self)
def toMat44(self):
"""
toMat44(Transform self) -> SimTK::Mat< 4,4,double >
Parameters
----------
self: SimTK::Transform_< double > const *
"""
return _simbody.Transform_toMat44(self)
def T(self):
"""
T(Transform self) -> Vec3
Parameters
----------
self: SimTK::Transform_< double > const *
"""
return _simbody.Transform_T(self)
__swig_destroy__ = _simbody.delete_Transform
__del__ = lambda self: None
Transform_swigregister = _simbody.Transform_swigregister
Transform_swigregister(Transform)
class Inertia(_object):
"""Proxy of C++ SimTK::Inertia_<(double)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Inertia, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Inertia, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SimTK::Inertia_<(double)> self) -> Inertia
__init__(SimTK::Inertia_<(double)> self, double const & moment) -> Inertia
Parameters
----------
moment: double const &
__init__(SimTK::Inertia_<(double)> self, Vec3 p, double const & mass) -> Inertia
Parameters
----------
p: SimTK::Vec3 const &
mass: double const &
__init__(SimTK::Inertia_<(double)> self, Vec3 moments, Vec3 products) -> Inertia
Parameters
----------
moments: SimTK::Vec3 const &
products: SimTK::Vec3 const &
__init__(SimTK::Inertia_<(double)> self, Vec3 moments) -> Inertia
Parameters
----------
moments: SimTK::Vec3 const &
__init__(SimTK::Inertia_<(double)> self, double const & xx, double const & yy, double const & zz) -> Inertia
Parameters
----------
xx: double const &
yy: double const &
zz: double const &
__init__(SimTK::Inertia_<(double)> self, double const & xx, double const & yy, double const & zz, double const & xy, double const & xz, double const & yz) -> Inertia
Parameters
----------
xx: double const &
yy: double const &
zz: double const &
xy: double const &
xz: double const &
yz: double const &
"""
this = _simbody.new_Inertia(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def getMoments(self):
"""
getMoments(Inertia self) -> Vec3
Parameters
----------
self: SimTK::Inertia_< double > const *
"""
return _simbody.Inertia_getMoments(self)
def getProducts(self):
"""
getProducts(Inertia self) -> Vec3
Parameters
----------
self: SimTK::Inertia_< double > const *
"""
return _simbody.Inertia_getProducts(self)
def isNaN(self):
"""
isNaN(Inertia self) -> bool
Parameters
----------
self: SimTK::Inertia_< double > const *
"""
return _simbody.Inertia_isNaN(self)
def isInf(self):
"""
isInf(Inertia self) -> bool
Parameters
----------
self: SimTK::Inertia_< double > const *
"""
return _simbody.Inertia_isInf(self)
def isFinite(self):
"""
isFinite(Inertia self) -> bool
Parameters
----------
self: SimTK::Inertia_< double > const *
"""
return _simbody.Inertia_isFinite(self)
__swig_destroy__ = _simbody.delete_Inertia
__del__ = lambda self: None
Inertia_swigregister = _simbody.Inertia_swigregister
Inertia_swigregister(Inertia)
class MassProperties(_object):
"""Proxy of C++ SimTK::MassProperties_<(double)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, MassProperties, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, MassProperties, name)
__repr__ = _swig_repr
def __init__(self):
"""__init__(SimTK::MassProperties_<(double)> self) -> MassProperties"""
this = _simbody.new_MassProperties()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def isExactlyMassless(self):
"""
isExactlyMassless(MassProperties self) -> bool
Parameters
----------
self: SimTK::MassProperties_< double > const *
"""
return _simbody.MassProperties_isExactlyMassless(self)
def isNearlyMassless(self, *args):
"""
isNearlyMassless(MassProperties self, double const & tol) -> bool
Parameters
----------
tol: double const &
isNearlyMassless(MassProperties self) -> bool
Parameters
----------
self: SimTK::MassProperties_< double > const *
"""
return _simbody.MassProperties_isNearlyMassless(self, *args)
def isExactlyCentral(self):
"""
isExactlyCentral(MassProperties self) -> bool
Parameters
----------
self: SimTK::MassProperties_< double > const *
"""
return _simbody.MassProperties_isExactlyCentral(self)
def isNearlyCentral(self, *args):
"""
isNearlyCentral(MassProperties self, double const & tol) -> bool
Parameters
----------
tol: double const &
isNearlyCentral(MassProperties self) -> bool
Parameters
----------
self: SimTK::MassProperties_< double > const *
"""
return _simbody.MassProperties_isNearlyCentral(self, *args)
def isNaN(self):
"""
isNaN(MassProperties self) -> bool
Parameters
----------
self: SimTK::MassProperties_< double > const *
"""
return _simbody.MassProperties_isNaN(self)
def isInf(self):
"""
isInf(MassProperties self) -> bool
Parameters
----------
self: SimTK::MassProperties_< double > const *
"""
return _simbody.MassProperties_isInf(self)
def isFinite(self):
"""
isFinite(MassProperties self) -> bool
Parameters
----------
self: SimTK::MassProperties_< double > const *
"""
return _simbody.MassProperties_isFinite(self)
__swig_destroy__ = _simbody.delete_MassProperties
__del__ = lambda self: None
MassProperties_swigregister = _simbody.MassProperties_swigregister
MassProperties_swigregister(MassProperties)
SimTK_DEFAULT_PRECISION = _simbody.SimTK_DEFAULT_PRECISION
def SimTK_version_SimTKcommon(major, minor, build):
"""
SimTK_version_SimTKcommon(int * major, int * minor, int * build)
Parameters
----------
major: int *
minor: int *
build: int *
"""
return _simbody.SimTK_version_SimTKcommon(major, minor, build)
def SimTK_about_SimTKcommon(key, maxlen, value):
"""
SimTK_about_SimTKcommon(char const * key, int maxlen, char * value)
Parameters
----------
key: char const *
maxlen: int
value: char *
"""
return _simbody.SimTK_about_SimTKcommon(key, maxlen, value)
def canStoreInNonnegativeInt(*args):
"""
canStoreInNonnegativeInt(bool arg1) -> bool
Parameters
----------
arg1: bool
canStoreInNonnegativeInt(char c) -> bool
Parameters
----------
c: char
canStoreInNonnegativeInt(unsigned char arg1) -> bool
Parameters
----------
arg1: unsigned char
canStoreInNonnegativeInt(signed char c) -> bool
Parameters
----------
c: signed char
canStoreInNonnegativeInt(short s) -> bool
Parameters
----------
s: short
canStoreInNonnegativeInt(unsigned short arg1) -> bool
Parameters
----------
arg1: unsigned short
canStoreInNonnegativeInt(int i) -> bool
Parameters
----------
i: int
canStoreInNonnegativeInt(long l) -> bool
Parameters
----------
l: long
canStoreInNonnegativeInt(long long l) -> bool
Parameters
----------
l: long long
canStoreInNonnegativeInt(unsigned int u) -> bool
Parameters
----------
u: unsigned int
canStoreInNonnegativeInt(unsigned long u) -> bool
Parameters
----------
u: unsigned long
canStoreInNonnegativeInt(unsigned long long u) -> bool
Parameters
----------
u: unsigned long long
"""
return _simbody.canStoreInNonnegativeInt(*args)
def isSizeInRange(*args):
"""
isSizeInRange(char sz, char mx) -> bool
Parameters
----------
sz: char
mx: char
isSizeInRange(signed char sz, signed char mx) -> bool
Parameters
----------
sz: signed char
mx: signed char
isSizeInRange(short sz, short mx) -> bool
Parameters
----------
sz: short
mx: short
isSizeInRange(int sz, int mx) -> bool
Parameters
----------
sz: int
mx: int
isSizeInRange(long sz, long mx) -> bool
Parameters
----------
sz: long
mx: long
isSizeInRange(long long sz, long long mx) -> bool
Parameters
----------
sz: long long
mx: long long
isSizeInRange(unsigned char sz, unsigned char mx) -> bool
Parameters
----------
sz: unsigned char
mx: unsigned char
isSizeInRange(unsigned short sz, unsigned short mx) -> bool
Parameters
----------
sz: unsigned short
mx: unsigned short
isSizeInRange(unsigned int sz, unsigned int mx) -> bool
Parameters
----------
sz: unsigned int
mx: unsigned int
isSizeInRange(unsigned long sz, unsigned long mx) -> bool
Parameters
----------
sz: unsigned long
mx: unsigned long
isSizeInRange(unsigned long long sz, unsigned long long mx) -> bool
Parameters
----------
sz: unsigned long long
mx: unsigned long long
"""
return _simbody.isSizeInRange(*args)
def isIndexInRange(*args):
"""
isIndexInRange(char ix, char sz) -> bool
Parameters
----------
ix: char
sz: char
isIndexInRange(signed char ix, signed char sz) -> bool
Parameters
----------
ix: signed char
sz: signed char
isIndexInRange(short ix, short sz) -> bool
Parameters
----------
ix: short
sz: short
isIndexInRange(int ix, int sz) -> bool
Parameters
----------
ix: int
sz: int
isIndexInRange(long ix, long sz) -> bool
Parameters
----------
ix: long
sz: long
isIndexInRange(long long ix, long long sz) -> bool
Parameters
----------
ix: long long
sz: long long
isIndexInRange(unsigned char ix, unsigned char sz) -> bool
Parameters
----------
ix: unsigned char
sz: unsigned char
isIndexInRange(unsigned short ix, unsigned short sz) -> bool
Parameters
----------
ix: unsigned short
sz: unsigned short
isIndexInRange(unsigned int ix, unsigned int sz) -> bool
Parameters
----------
ix: unsigned int
sz: unsigned int
isIndexInRange(unsigned long ix, unsigned long sz) -> bool
Parameters
----------
ix: unsigned long
sz: unsigned long
isIndexInRange(unsigned long long ix, unsigned long long sz) -> bool
Parameters
----------
ix: unsigned long long
sz: unsigned long long
"""
return _simbody.isIndexInRange(*args)
def isNonnegative(*args):
"""
isNonnegative(bool arg1) -> bool
Parameters
----------
arg1: bool
isNonnegative(char n) -> bool
Parameters
----------
n: char
isNonnegative(signed char n) -> bool
Parameters
----------
n: signed char
isNonnegative(short n) -> bool
Parameters
----------
n: short
isNonnegative(int n) -> bool
Parameters
----------
n: int
isNonnegative(long n) -> bool
Parameters
----------
n: long
isNonnegative(long long n) -> bool
Parameters
----------
n: long long
isNonnegative(unsigned char arg1) -> bool
Parameters
----------
arg1: unsigned char
isNonnegative(unsigned short arg1) -> bool
Parameters
----------
arg1: unsigned short
isNonnegative(unsigned int arg1) -> bool
Parameters
----------
| |
# Python Code implementation for Class_Reg ALGORITHM
import pandas as pd
import numpy as np
import itertools
from sklearn.model_selection import train_test_split
from models import *
from sklearn.metrics import accuracy_score,mean_squared_error
from tqdm import tqdm
import random
from sklearn.metrics import median_absolute_error
from sklearn.metrics import mean_absolute_error
import statistics
class class_reg(object):
""" A function combining classifiers and regressors"""
def __init__(self, data = None, X_cols = None,
y_col = None, test_size = 0.3,
validation_size = 0.2, epochs = 5,
metrics = 'wmape'):
self.data = data
self.X_cols = X_cols
self.y_col = y_col
self.test_size = test_size
self.validation_size = validation_size
self.epochs = epochs
self.metrics = metrics
self.test_X = None
self.test_y = None
self.classifier = None
self.regressor = None
self.mets = None
def fitted(self):
data = self.data
X_cols = self.X_cols
y_col = self.y_col
test_size = self.test_size
validation_size = self.validation_size
epochs = self.epochs
metrics = self.metrics
mape_vals = []
epoch_num = 0
X = data[X_cols]
y = pd.DataFrame(data[y_col])
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size = test_size,
random_state = 0)
y_test = list(processing.avgfit(list(y_test[y_col])))
dataset = []
for i in range(len(X_train)):
dataset.append(list(X_train.iloc[i]))
dataset = pd.DataFrame(dataset)
cols = []
for i in X_cols :
cols.append(i)
cols.append(y_col)
dataset[y_col] = y_train
dataset.columns = cols
self.test_X = X_test
self.test_y = y_test
self.train_X = X_train
self.train_y = y_train
for random_state in np.random.randint(0, 10000, epochs):
epoch_num = epoch_num + 1
X,y,n_classes = processing.split(dataset,
X_cols,
y_col)
X_train, X_test, y_train, y_test = processing.train_test(X,
y,
validation_size,
random_state)
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
y,
len(y),
n_classes,
'train')
epoch = str(epoch_num) + '/' + str(epochs)
print(' ')
print("Epoch " + epoch + ' :')
acc_conf, clf, reg = training.train(y, y_col,
X_train, y_train,
X_train_list, y_train_list,
X_test, y_test,
n_classes, random_state, metrics)
for acc_ in acc_conf:
mape_vals.append(acc_)
acc_vals, c, r = analysis.analyse(mape_vals,epochs)
self.acc_vals = acc_vals
classifier = clf[c]
regressor = []
for i in range(n_classes):
regressor.append(reg[i][r])
X_train = self.train_X
y_train = self.train_y
train = X_train
train[y_col] = y_train
X_train,y_train,n_classes = processing.split(train,
X_cols,
y_col)
classifier.fit(X_train, pd.DataFrame(y_train[1]))
X_train = processing.rem_col_name(X_train)
y_train = processing.rem_col_name(y_train)
X_train.columns = X_cols
#y_train.columns = [y_col]
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
y,
len(y),
n_classes,
'train')
for i in range(n_classes):
(regressor[i]).fit(X_train_list[i],y_train_list[i][0])
self.classifier = classifier
self.regressor = regressor
self.n_classes = n_classes
def fit(self, X, y, validation_size = 0.3, epochs = 1):
X_cols = X.columns
y_col = y.columns
X = processing.rem_col_name(X)
y = processing.rem_col_name(y)
X.columns = X_cols
y.columns = y_col
dataset = X
dataset[y_col] = y
epoch_num = 0
mape_vals = []
for random_state in np.random.randint(0, 10000, epochs):
epoch_num = epoch_num + 1
X,y,n_classes = processing.split(dataset,
X_cols,
y_col)
X_train, X_test, y_train, y_test = processing.train_test(X,
y,
validation_size,
random_state)
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
pd.DataFrame(y),
len(y),
n_classes,
'train')
epoch = str(epoch_num) + '/' + str(epochs)
print(' ')
print("Epoch " + epoch + ' :')
metrics = 'wmape'
acc_conf, clf, reg = training.train(y, y_col,
X_train, y_train,
X_train_list, y_train_list,
X_test, y_test,
n_classes, random_state, metrics)
for acc_ in acc_conf:
mape_vals.append(acc_)
acc_vals, c, r = analysis.analyse(mape_vals,epochs)
self.acc_vals = acc_vals
classifier = clf[c]
regressor = []
for i in range(n_classes):
regressor.append(reg[i][r])
X_train,y_train,n_classes = processing.split(dataset,X_cols,y_col)
classifier.fit(X_train, pd.DataFrame(y_train[1]))
X_train.columns = X_cols
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
y,
len(y),
n_classes,
'train')
for i in range(n_classes):
(regressor[i]).fit(X_train_list[i],y_train_list[i][0])
self.classifier = classifier
self.regressor = regressor
self.n_classes = n_classes
def predict(self, X):
clf = self.classifier
reg = self.regressor
if isinstance(X, pd.DataFrame):
pred = []
for i in range(len(X)):
arr = list(X.iloc[i])
pred.append(class_reg.pred(clf,reg,arr))
else:
X = ((np.array(X).reshape(1,-1)))
clf_pred = (clf.predict(X))[0]
class_ = ([int(s) for s in clf_pred.split() if s.isdigit()])[0]
pred = (reg[class_ - 1].predict(X))[0]
return(pred)
@classmethod
def pred(self,clf,reg,X):
X = ((np.array(X).reshape(1,-1)))
clf_pred = (clf.predict(X))[0]
class_ = ([int(s) for s in clf_pred.split() if s.isdigit()])[0]
pred = (reg[class_ - 1].predict(X))[0]
return(pred)
def performance(self):
clf = self.classifier
reg = self.regressor
data = self.data
X_cols = self.X_cols
y_col = self.y_col
test_size = self.test_size
X,y,n_classes = processing.split(data,
X_cols,
y_col)
mape_list = []
mse_list = []
for random_state in np.random.randint(0, 10000, 20):
X_train, X_test, y_train, y_test = processing.train_test(X,
y,
test_size,
random_state)
X_train_list, y_train_list = processing.dataset_split_class(X_train ,
y_train[1],
y,
len(y),
n_classes,
'train')
classi = clf
classi.fit(X_train, y_train[1])
regr = []
for i in range(n_classes):
regre_ = reg[i]
regre_.fit(X_train_list[i],y_train_list[i][0])
regr.append(regre_)
pred = []
for i in range(len(X_test)):
arr = list(X_test.iloc[i])
pred.append(class_reg.pred(classi, regr, arr))
mape = metric.wmape(list(y_test[0]), list(pred))
mse = mean_squared_error(list(y_test[0]),
pred,
squared = False)
mse = (np.sqrt(mse) - min(y_test[0]))/((max(y_test[0])) - min(y_test[0]))
mse = mse**2
mape_list.append(mape)
mse_list.append(mse)
mape = sum(mape_list)/len(mape_list)
mse = sum(mse_list)/len(mse_list)
mets = {'WMAPE' : mape, 'MSE' : mse}
self.mets = mets
return(mets)
class processing(object):
@classmethod
def avgfit(self,l):
self.l = l
na = pd.isna(l)
arr = []
for i in range(len(l)):
if na[i] == False:
arr.append(l[i])
#avg = sum(arr)/len(arr)
avg = statistics.median(arr)
fit_arr = []
for i in range(len(l)):
if na[i] == False:
fit_arr.append(l[i])
elif na[i] == True:
fit_arr.append(avg)
self.fit_arr = fit_arr
return(fit_arr)
@classmethod
def class_split(self,l,l_):
self.l = l
self.l = l_
length = len(l_)
if length <= 1000:
n_classes = 5
elif length <= 10000:
n_classes = 10
else:
n_classes = 100
class_size = int(length/n_classes)
indices = []
for i in l:
indices.append(l_.index(i))
indices = list(np.argsort(l))
c_list = []
for j in range(1,n_classes+1):
for i in range(class_size):
c_list.append('Class ' + str(j))
l_diff = length - len(c_list)
for i in range(l_diff):
c_list.append(c_list[-1])
class_list = []
for i in indices:
class_list.append(c_list[i])
return(class_list,n_classes)
@classmethod
def class_weight(self,arr):
count = [(list(arr)).count(x) for x in list(set(list(arr)))]
class_weights = dict(zip(list(set(list(arr))),count))
return(class_weights)
@classmethod
def dataset_split_class(self,X,y,Y,size,n_classes,mode):
l = [[] for _ in [None] * n_classes]
if mode == 'train' :
for i in range(size):
try:
yy = y[i]
ind = ([int(s) for s in yy.split() if s.isdigit()])[0]
l[ind - 1].append(i)
except:
continue
elif mode == 'test':
for i in range(size):
try:
yy = y[0][i]
ind = ([int(s) for s in yy.split() if s.isdigit()])[0]
l[ind - 1].append(y[1][i])
except:
continue
X_ = []
for i in range(n_classes):
X_.append(X.loc[l[i]])
y_ = []
for i in range(n_classes):
y_.append(Y.loc[l[i]])
return(X_, y_)
@classmethod
def rem_col_name(self, df):
arr = []
for i in range(len(df)):
arr.append(list(df.iloc[i]))
return(pd.DataFrame(arr))
@classmethod
def tolist(self, df):
df_ = []
for i in range(len(df)):
df_.append((df.iloc[i])[0])
return(df_)
@classmethod
def split(self,dataset,X_cols,y_col):
try:
y_d = self.tolist(dataset[y_col])
except:
y_d = list(dataset[y_col])
X = dataset[X_cols]
y,n_classes = self.transform(y_d)
return(X,y,n_classes)
@classmethod
def train_test(self,X,y,test_size = 0.3, random_state = 0):
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size = test_size,
random_state = random_state)
return(X_train,X_test,y_train,y_test)
@classmethod
def transform(self,data):
self.data = data
l = data
l = self.avgfit(l)
l_ = sorted(l)
c_list,n_classes = self.class_split(l,l_)
y = pd.DataFrame()
y[0] = l
y[1] = c_list
return(y,n_classes)
class metric(object):
@classmethod
def wmape(self,y_true, y_pred):
y_true, y_pred = list(y_true),list(y_pred)
l = len(y_true)
num = 0
den = 0
for i in range(l):
num = num + (abs(y_pred[i] - y_true[i]))
den = den + y_true[i]
return abs(num/den) * 100
@classmethod
def rmse(self,y_true,y_pred):
y_true, y_pred = list(y_true),list(y_pred)
mse = mean_squared_error(y_true, y_pred, squared = False)
return(mse)
@classmethod
def me_ae(self, y_true, y_pred):
y_true, y_pred = list(y_true),list(y_pred)
med_ae = median_absolute_error(y_true, y_pred)
return(med_ae)
@classmethod
def mae(self, y_true, y_pred):
y_true, y_pred = list(y_true),list(y_pred)
mae_ = mean_absolute_error(y_true, y_pred)
return(mae_)
class training(object):
@classmethod
def train(self, y, y_col,
X_train, y_train,
X_train_list, y_train_list,
X_test, y_test, n_classes,
random_state, metrics):
Classifiers = models.classifiers(X_train,pd.DataFrame(y_train[1]))
Regressors = []
for i in range(n_classes):
Regressors.append(models.regressors(X_train_list[i],
pd.DataFrame(y_train_list[i][0])))
Regressors_ = list(map(list, itertools.zip_longest(*Regressors,
fillvalue=None)))
acc_conf = []
for clf in tqdm(Classifiers, leave = True):
try:
classifier = clf
# | |
import boto3
from time import time
import torch
from torch.nn.functional import conv2d
import json
import tenacity
import operator
import numpy as np
from copy import deepcopy
from os.path import join
from functools import partial
from mipless_cloudvolume import deserialize_miplessCV as DCV
from cloudvolume import Storage
from cloudvolume.lib import scatter
from boundingbox import BoundingBox, deserialize_bbox
from fcorr import fcorr_conjunction
from scipy import ndimage
from taskqueue import RegisteredTask, TaskQueue, LocalTaskQueue, GreenTaskQueue
from concurrent.futures import ProcessPoolExecutor
# from taskqueue.taskqueue import _scatter as scatter
def remote_upload(queue_name, ptasks):
with TaskQueue(queue_name=queue_name) as tq:
for task in ptasks:
tq.insert(task)
def green_upload(ptask, aligner):
if aligner.distributed:
tq = GreenTaskQueue(aligner.queue_name)
tq.insert_all(ptask, parallel=aligner.threads)
else:
tq = LocalTaskQueue(parallel=1)
tq.insert_all(ptask, args= [aligner])
# for task in ptask:
# tq.insert(task, args=[ a ])
def run(aligner, tasks):
if aligner.distributed:
tasks = scatter(tasks, aligner.threads)
fn = partial(remote_upload, aligner.queue_name)
with ProcessPoolExecutor(max_workers=aligner.threads) as executor:
executor.map(fn, tasks)
else:
with LocalTaskQueue(queue_name=aligner.queue_name, parallel=1) as tq:
for task in tasks:
tq.insert(task, args=[ aligner ])
class PredictImageTask(RegisteredTask):
def __init__(self, model_path, src_cv, dst_cv, z, mip, bbox):
super().__init__(model_path, src_cv, dst_cv, z, mip, bbox)
def execute(self, aligner):
src_cv = DCV(self.src_cv)
dst_cv = DCV(self.dst_cv)
z = self.z
patch_bbox = deserialize_bbox(self.bbox)
mip = self.mip
print("\nPredict Image\n"
"src {}\n"
"dst {}\n"
"at z={}\n"
"MIP{}\n".format(src_cv, dst_cv, z, mip), flush=True)
start = time()
image = aligner.predict_image_chunk(self.model_path, src_cv, z, mip, patch_bbox)
image = image.cpu().numpy()
aligner.save_image(image, dst_cv, z, patch_bbox, mip)
end = time()
diff = end - start
print(':{:.3f} s'.format(diff))
class CopyTask(RegisteredTask):
def __init__(self, src_cv, dst_cv, src_z, dst_z, patch_bbox, mip,
is_field, to_uint8, mask_cv, mask_mip, mask_val):
super().__init__(src_cv, dst_cv, src_z, dst_z, patch_bbox, mip,
is_field, to_uint8, mask_cv, mask_mip, mask_val)
def execute(self, aligner):
src_cv = DCV(self.src_cv)
dst_cv = DCV(self.dst_cv)
src_z = self.src_z
dst_z = self.dst_z
patch_bbox = deserialize_bbox(self.patch_bbox)
mip = self.mip
is_field = self.is_field
to_uint8 = self.to_uint8
mask_cv = None
if self.mask_cv:
mask_cv = DCV(self.mask_cv)
mask_mip = self.mask_mip
mask_val = self.mask_val
print("\nCopy\n"
"src {}\n"
"dst {}\n"
"mask {}, val {}, MIP{}\n"
"z={} to z={}\n"
"MIP{}\n".format(src_cv, dst_cv, mask_cv, mask_val, mask_mip,
src_z, dst_z, mip), flush=True)
start = time()
if not aligner.dry_run:
if is_field:
field = aligner.get_field(src_cv, src_z, patch_bbox, mip, relative=False,
to_tensor=False)
aligner.save_field(field, dst_cv, dst_z, patch_bbox, mip, relative=False)
elif to_uint8:
image = aligner.get_masked_image(src_cv, src_z, patch_bbox, mip,
mask_cv=mask_cv, mask_mip=mask_mip,
mask_val=mask_val,
to_tensor=False, normalizer=None)
aligner.save_image(image, dst_cv, dst_z, patch_bbox, mip, to_uint8=True)
else:
image = aligner.get_data(src_cv, src_z, patch_bbox, mip, mip, to_float=False,
to_tensor=False, normalizer=None)
aligner.save_image(image, dst_cv, dst_z, patch_bbox, mip, to_uint8=False)
end = time()
diff = end - start
print(':{:.3f} s'.format(diff))
class ComputeFieldTask(RegisteredTask):
def __init__(self, model_path, src_cv, tgt_cv, field_cv, src_z, tgt_z,
patch_bbox, mip, pad, src_mask_cv, src_mask_val, src_mask_mip,
tgt_mask_cv, tgt_mask_val, tgt_mask_mip,
prev_field_cv, prev_field_z, prev_field_inverse):
super().__init__(model_path, src_cv, tgt_cv, field_cv, src_z, tgt_z,
patch_bbox, mip, pad, src_mask_cv, src_mask_val, src_mask_mip,
tgt_mask_cv, tgt_mask_val, tgt_mask_mip,
prev_field_cv, prev_field_z, prev_field_inverse)
def execute(self, aligner):
model_path = self.model_path
src_cv = DCV(self.src_cv)
tgt_cv = DCV(self.tgt_cv)
field_cv = DCV(self.field_cv)
if self.prev_field_cv is not None:
prev_field_cv = DCV(self.prev_field_cv)
else:
prev_field_cv = None
src_z = self.src_z
tgt_z = self.tgt_z
prev_field_z = self.prev_field_z
prev_field_inverse = self.prev_field_inverse
patch_bbox = deserialize_bbox(self.patch_bbox)
mip = self.mip
pad = self.pad
src_mask_cv = None
if self.src_mask_cv:
src_mask_cv = DCV(self.src_mask_cv)
src_mask_mip = self.src_mask_mip
src_mask_val = self.src_mask_val
tgt_mask_cv = None
if self.tgt_mask_cv:
tgt_mask_cv = DCV(self.tgt_mask_cv)
tgt_mask_mip = self.tgt_mask_mip
tgt_mask_val = self.tgt_mask_val
print("\nCompute field\n"
"model {}\n"
"src {}\n"
"tgt {}\n"
"field {}\n"
"src_mask {}, val {}, MIP{}\n"
"tgt_mask {}, val {}, MIP{}\n"
"z={} to z={}\n"
"MIP{}\n".format(model_path, src_cv, tgt_cv, field_cv, src_mask_cv, src_mask_val,
src_mask_mip, tgt_mask_cv, tgt_mask_val, tgt_mask_mip,
src_z, tgt_z, mip), flush=True)
start = time()
if not aligner.dry_run:
field = aligner.compute_field_chunk(model_path, src_cv, tgt_cv, src_z, tgt_z,
patch_bbox, mip, pad,
src_mask_cv, src_mask_mip, src_mask_val,
tgt_mask_cv, tgt_mask_mip, tgt_mask_val,
None, prev_field_cv, prev_field_z,
prev_field_inverse)
aligner.save_field(field, field_cv, src_z, patch_bbox, mip, relative=False)
end = time()
diff = end - start
print('ComputeFieldTask: {:.3f} s'.format(diff))
class RenderTask(RegisteredTask):
def __init__(self, src_cv, field_cv, dst_cv, src_z, field_z, dst_z, patch_bbox, src_mip,
field_mip, mask_cv, mask_mip, mask_val, affine, use_cpu=False):
super(). __init__(src_cv, field_cv, dst_cv, src_z, field_z, dst_z, patch_bbox, src_mip,
field_mip, mask_cv, mask_mip, mask_val, affine, use_cpu)
def execute(self, aligner):
src_cv = DCV(self.src_cv)
field_cv = DCV(self.field_cv)
dst_cv = DCV(self.dst_cv)
src_z = self.src_z
field_z = self.field_z
dst_z = self.dst_z
patch_bbox = deserialize_bbox(self.patch_bbox)
src_mip = self.src_mip
field_mip = self.field_mip
mask_cv = None
if self.mask_cv:
mask_cv = DCV(self.mask_cv)
mask_mip = self.mask_mip
mask_val = self.mask_val
affine = None
if self.affine:
affine = np.array(self.affine)
print("\nRendering\n"
"src {}\n"
"field {}\n"
"dst {}\n"
"z={} to z={}\n"
"MIP{} to MIP{}\n"
"\n".format(src_cv.path, field_cv.path, dst_cv.path, src_z, dst_z,
field_mip, src_mip), flush=True)
start = time()
if not aligner.dry_run:
image = aligner.cloudsample_image(src_cv, field_cv, src_z, field_z,
patch_bbox, src_mip, field_mip,
mask_cv=mask_cv, mask_mip=mask_mip,
mask_val=mask_val, affine=affine,
use_cpu=self.use_cpu)
image = image.cpu().numpy()
aligner.save_image(image, dst_cv, dst_z, patch_bbox, src_mip)
end = time()
diff = end - start
print('RenderTask: {:.3f} s'.format(diff))
class VectorVoteTask(RegisteredTask):
def __init__(self, pairwise_cvs, vvote_cv, z, patch_bbox, mip, inverse, serial,
softmin_temp, blur_sigma):
super().__init__(pairwise_cvs, vvote_cv, z, patch_bbox, mip, inverse, serial,
softmin_temp, blur_sigma)
def execute(self, aligner):
pairwise_cvs = {int(k): DCV(v) for k,v in self.pairwise_cvs.items()}
vvote_cv = DCV(self.vvote_cv)
z = self.z
patch_bbox = deserialize_bbox(self.patch_bbox)
mip = self.mip
inverse = bool(self.inverse)
serial = bool(self.serial)
softmin_temp = self.softmin_temp
blur_sigma = self.blur_sigma
print("\nVector vote\n"
"fields {}\n"
"dst {}\n"
"z={}\n"
"MIP{}\n"
"inverse={}\n"
"serial={}\n"
"softmin_temp={}\n"
"blur_sigma={}\n".format(pairwise_cvs.keys(), vvote_cv, z,
mip, inverse, serial, softmin_temp,
blur_sigma), flush=True)
start = time()
if not aligner.dry_run:
field = aligner.vector_vote_chunk(pairwise_cvs, vvote_cv, z, patch_bbox, mip,
inverse=inverse, serial=serial,
softmin_temp=softmin_temp, blur_sigma=blur_sigma)
field = field.data.cpu().numpy()
aligner.save_field(field, vvote_cv, z, patch_bbox, mip, relative=False)
end = time()
diff = end - start
print('VectorVoteTask: {:.3f} s'.format(diff))
class CloudComposeTask(RegisteredTask):
def __init__(self, f_cv, g_cv, dst_cv, f_z, g_z, dst_z, patch_bbox, f_mip, g_mip,
dst_mip, factor, affine, pad):
super().__init__(f_cv, g_cv, dst_cv, f_z, g_z, dst_z, patch_bbox, f_mip, g_mip,
dst_mip, factor, affine, pad)
def execute(self, aligner):
f_cv = DCV(self.f_cv)
g_cv = DCV(self.g_cv)
dst_cv = DCV(self.dst_cv)
f_z = self.f_z
g_z = self.g_z
dst_z = self.dst_z
patch_bbox = deserialize_bbox(self.patch_bbox)
f_mip = self.f_mip
g_mip = self.g_mip
dst_mip = self.dst_mip
factor = self.factor
pad = self.pad
affine = None
if self.affine:
affine = np.array(self.affine)
print("\nCompose\n"
"f {}\n"
"g {}\n"
"f_z={}, g_z={}\n"
"f_MIP{}, g_MIP{}\n"
"dst {}\n"
"dst_MIP {}\n".format(f_cv, g_cv, f_z, g_z, f_mip, g_mip, dst_cv,
dst_mip), flush=True)
start = time()
if not aligner.dry_run:
h = aligner.cloudsample_compose(f_cv, g_cv, f_z, g_z, patch_bbox, f_mip,
g_mip, dst_mip, factor=factor,
affine=affine, pad=pad)
h = h.data.cpu().numpy()
aligner.save_field(h, dst_cv, dst_z, patch_bbox, dst_mip, relative=False)
end = time()
diff = end - start
print('ComposeTask: {:.3f} s'.format(diff))
class CloudMultiComposeTask(RegisteredTask):
def __init__(self, cv_list, dst_cv, z_list, dst_z, patch_bbox, mip_list,
dst_mip, factors, pad):
super().__init__(cv_list, dst_cv, z_list, dst_z, patch_bbox, mip_list,
dst_mip, factors, pad)
def execute(self, aligner):
cv_list = [DCV(f) for f in self.cv_list]
dst_cv = DCV(self.dst_cv)
z_list = self.z_list
dst_z = self.dst_z
patch_bbox = deserialize_bbox(self.patch_bbox)
mip_list = self.mip_list
dst_mip = self.dst_mip
factors = self.factors
pad = self.pad
print("\nCompose\n"
"cv {}\n"
"z={}\n"
"MIPs={}\n"
"dst {}\n"
"dst_MIP {}\n"
.format(cv_list, z_list, mip_list, dst_cv, dst_mip),
flush=True)
start = time()
if not aligner.dry_run:
h = aligner.cloudsample_multi_compose(cv_list, z_list, patch_bbox,
mip_list, dst_mip, factors,
pad)
h = h.data.cpu().numpy()
aligner.save_field(h, dst_cv, dst_z, patch_bbox, dst_mip,
relative=False)
end = time()
diff = end - start
print('MultiComposeTask: {:.3f} s'.format(diff))
class CPCTask(RegisteredTask):
def __init__(self, src_cv, tgt_cv, dst_cv, src_z, tgt_z, patch_bbox,
src_mip, dst_mip, norm):
super().__init__(src_cv, tgt_cv, dst_cv, src_z, tgt_z, patch_bbox,
src_mip, dst_mip, norm)
def execute(self, aligner):
src_cv = DCV(self.src_cv)
tgt_cv = DCV(self.tgt_cv)
dst_cv = DCV(self.dst_cv)
src_z = self.src_z
tgt_z = self.tgt_z
patch_bbox = deserialize_bbox(self.patch_bbox)
src_mip = self.src_mip
dst_mip = self.dst_mip
norm = self.norm
print("\nCPC\n"
"src {}\n"
"tgt {}\n"
"src_z={}, tgt_z={}\n"
"src_MIP{} to dst_MIP{}\n"
"norm={}\n"
"dst {}\n".format(src_cv, tgt_cv, src_z, tgt_z, src_mip, dst_mip, norm,
dst_cv), flush=True)
if not aligner.dry_run:
r = aligner.cpc_chunk(src_cv, tgt_cv, src_z, tgt_z, patch_bbox, src_mip,
dst_mip, norm)
r = r.cpu().numpy()
aligner.save_image(r, dst_cv, src_z, patch_bbox, dst_mip, to_uint8=norm)
class BatchRenderTask(RegisteredTask):
def __init__(
self, z, field_cv, field_z, patches,
mip, dst_cv, dst_z, batch
):
super().__init__(
z, field_cv, field_z, patches,
mip, dst_cv, dst_z, batch
)
#self.patches = [p.serialize() for p in patches]
def execute(self, aligner):
src_z = self.z
patches = [deserialize_bbox(p) for p in self.patches]
batch = self.batch
field_cv = DCV(self.field_cv)
mip = self.mip
field_z = self.field_z
dst_cv = DCV(self.dst_cv)
dst_z = self.dst_z
def chunkwise(patch_bbox):
print ("Rendering {} at mip {}".format(patch_bbox.__str__(mip=0), mip),
end='', flush=True)
warped_patch = aligner.warp_patch_batch(src_z, field_cv, field_z,
patch_bbox, mip, batch)
aligner.save_image_patch_batch(dst_cv, (dst_z, dst_z + batch),
warped_patch, patch_bbox, mip)
aligner.pool.map(chunkwise, patches)
class DownsampleTask(RegisteredTask):
def __init__(self, cv, | |
<reponame>FowlerLab/hgvs-patterns
import unittest
import re
from mavehgvs.patterns.dna import (
dna_equal_c,
dna_equal_n,
dna_equal_gmo,
dna_sub_c,
dna_sub_n,
dna_sub_gmo,
dna_del_c,
dna_del_n,
dna_del_gmo,
dna_dup_c,
dna_dup_n,
dna_dup_gmo,
dna_ins_c,
dna_ins_n,
dna_ins_gmo,
dna_delins_c,
dna_delins_n,
dna_delins_gmo,
dna_variant_c,
dna_variant_n,
dna_variant_gmo,
dna_single_variant,
dna_multi_variant,
)
class TestDnaEqualC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_equal_c, flags=re.ASCII)
cls.valid_strings = [
"=",
"18=",
"10_14=",
"122-6=",
"*24=",
"19+22=",
"19+22_88=",
"-27+3=",
]
cls.invalid_strings = ["=22", "(=)", "18(=)"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaEqualN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_equal_n, flags=re.ASCII)
cls.valid_strings = ["="]
cls.invalid_strings = [
"=22",
"(=)",
"18(=)",
"-27+3=",
"*24=",
"18=",
"10_14=",
"122-6=",
"19+22=",
"19+22_88=",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaEqualGMO(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_equal_gmo, flags=re.ASCII)
cls.valid_strings = ["=", "18=", "10_14="]
cls.invalid_strings = [
"=22",
"(=)",
"18(=)",
"122-6=",
"*24=",
"19+22=",
"19+22_88=",
"-27+3=",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaSubC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_sub_c, flags=re.ASCII)
cls.valid_strings = ["48C>A", "122-6T>A", "*24G>C", "19+22A>G", "-27+3T>C"]
cls.invalid_strings = ["22g>u", "48C>W", "122=/T>A"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaSubN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_sub_n, flags=re.ASCII)
cls.valid_strings = ["48C>A", "122-6T>A", "19+22A>G"]
cls.invalid_strings = ["22g>u", "48C>W", "122=/T>A", "*24G>C", "-27+3T>C"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaSubGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_sub_gmo, flags=re.ASCII)
cls.valid_strings = ["48C>A"]
cls.invalid_strings = ["122-6T>A", "22g>u", "48C>W", "22=", "122=/T>A", "0C>T"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_del_c, flags=re.ASCII)
cls.valid_strings = [
"44del",
"1_95del",
"78+5_78+10del",
"-25+1_-25+3del",
"*17del",
]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_del_n, flags=re.ASCII)
cls.valid_strings = ["44del", "1_95del", "78+5_78+10del"]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"-25+1_-25+3del",
"*17del",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_del_gmo, flags=re.ASCII)
cls.valid_strings = ["44del", "1_95del"]
cls.invalid_strings = [
"78+5_78+10del",
"-25+1_-25+3del",
"*17del",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDupC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_dup_c, flags=re.ASCII)
cls.valid_strings = [
"22_24dup",
"77dup",
"101+1_101+7dup",
"-25+1_-25+3dup",
"*17dup",
]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDupN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_dup_n, flags=re.ASCII)
cls.valid_strings = ["22_24dup", "77dup", "101+1_101+7dup"]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"-25+1_-25+3dup",
"*17dup",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDupGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_dup_gmo, flags=re.ASCII)
cls.valid_strings = ["22_24dup", "77dup"]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"101+1_101+7dup",
"-25+1_-25+3dup",
"*17dup",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaInsC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_ins_c, flags=re.ASCII)
cls.valid_strings = [
"234_235insT",
"84_85insCTG",
"*84_*85insCTG",
"99+6_99+7insA",
"124+100_124-100insTTG",
"124+101_124-100insTTG",
]
cls.invalid_strings = ["84_85ins100_125", "234_235ins(10)", "234_235ins(?)"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaInsN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_ins_n, flags=re.ASCII)
cls.valid_strings = [
"234_235insT",
"84_85insCTG",
"99+6_99+7insA",
"124+100_124-100insTTG",
"124+101_124-100insTTG",
]
cls.invalid_strings = [
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"*84_*85insCTG",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaInsGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_ins_gmo, flags=re.ASCII)
cls.valid_strings = ["234_235insT", "84_85insCTG"]
cls.invalid_strings = [
"99+6_99+7insA",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelinsC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_delins_c, flags=re.ASCII)
cls.valid_strings = [
"22delinsAACG",
"83_85delinsT",
"43-6_595+12delinsCTT",
"*788delinsA",
]
cls.invalid_strings = ["84_85delinsAAN", "234delinsW"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelinsN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_delins_n, flags=re.ASCII)
cls.valid_strings = ["22delinsAACG", "83_85delinsT", "43-6_595+12delinsCTT"]
cls.invalid_strings = ["84_85delinsAAN", "234delinsW" "*788delinsA"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelinsGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_delins_gmo, flags=re.ASCII)
cls.valid_strings = ["22delinsAACG", "83_85delinsT"]
cls.invalid_strings = [
"43-6_595+12delinsCTT",
"*788delinsA",
"84_85delinsAAN",
"234delinsW",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaVariantC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_variant_c, flags=re.ASCII)
cls.valid_strings = [
"48C>A",
"=",
"22=",
"4_6=",
"122-6T>A",
"*24G>C",
"19+22A>G",
"-27+3T>C",
"44del",
"1_95del",
"78+5_78+10del",
"-25+1_-25+3del",
"*17del",
"22_24dup",
"77dup",
"101+1_101+7dup",
"-25+1_-25+3dup",
"*17dup",
"234_235insT",
"84_85insCTG",
"99+6_99+7insA",
"22delinsAACG",
"83_85delinsT",
"43-6_595+12delinsCTT",
"*788delinsA",
]
cls.invalid_strings = [
"22g>u",
"48C>W",
"122=/T>A",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"84_85delinsAAN",
"234delinsW",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaVariantN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_variant_n, flags=re.ASCII)
cls.valid_strings = [
"48C>A",
"=",
"122-6T>A",
"19+22A>G",
"44del",
"1_95del",
"78+5_78+10del",
"22_24dup",
"77dup",
"101+1_101+7dup",
"234_235insT",
"84_85insCTG",
"99+6_99+7insA",
"22delinsAACG",
"83_85delinsT",
"43-6_595+12delinsCTT",
]
cls.invalid_strings = [
"22=",
"1_3=",
"22g>u",
"48C>W",
"122=/T>A",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"84_85delinsAAN",
"234delinsW",
"*24G>C",
"-27+3T>C",
"-25+1_-25+3del",
"*17del",
"-25+1_-25+3dup",
"*17dup",
"*788delinsA",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaVariantGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_variant_gmo, flags=re.ASCII)
cls.valid_strings = [
"48C>A",
"=",
"22=",
"1_3=",
"44del",
"1_95del",
"22_24dup",
"77dup",
"234_235insT",
"84_85insCTG",
"22delinsAACG",
"83_85delinsT",
]
cls.invalid_strings = [
"43-6_595+12delinsCTT",
"*788delinsA",
"99+6_99+7insA",
"101+1_101+7dup",
"-25+1_-25+3dup",
"*17dup",
"78+5_78+10del",
"-25+1_-25+3del",
"*17del",
"*24G>C",
"19+22A>G",
"122-6T>A",
"-27+3T>C",
"22g>u",
"48C>W",
"122=/T>A",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"84_85delinsAAN",
"234delinsW",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaSingleVariant(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_single_variant, flags=re.ASCII)
cls.valid_strings = [
"48C>A",
"=",
"44del",
"1_95del",
"22_24dup",
"77dup",
"234_235insT",
"84_85insCTG",
"22delinsAACG",
"83_85delinsT",
]
cls.valid_strings_c_only = [
"*788delinsA",
"-25+1_-25+3dup",
"*17dup",
"-25+1_-25+3del",
"*17del",
"*24G>C",
"-27+3T>C",
]
cls.valid_strings_cn_only = [
"43-6_595+12delinsCTT",
"99+6_99+7insA",
"101+1_101+7dup",
"78+5_78+10del",
"19+22A>G",
"122-6T>A",
]
cls.valid_strings_cgmo_only = ["22=", "4_6="]
cls.invalid_strings = [
"22g>u",
"48C>W",
"122=/T>A",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"84_85delinsAAN",
"234delinsW",
]
def test_valid_strings(self):
for p in "cngmo":
for s in self.valid_strings:
with self.subTest(s=s, p=p):
v = f"{p}.{s}"
| |
<reponame>reiterl/openslides-backend<gh_stars>0
import time
from copy import deepcopy
from typing import Any, Dict
from openslides_backend.action.motion.delete import MotionDelete
from openslides_backend.action.motion.sort import MotionSort
from openslides_backend.action.motion.update import MotionUpdate, MotionUpdateMetadata
from openslides_backend.shared.exceptions import ActionException, PermissionDenied
from tests.system.action.base import BaseActionTestCase
from tests.util import Client, get_fqfield, get_fqid
from ..fake_services.database import DatabaseTestAdapter
from ..fake_services.permission import PermissionTestAdapter
from ..util import create_test_application_old as create_test_application
# TODO: These tests use all old style datastore testing.
# Fix this (do not use create_test_applicaton_old and do not use old_style_testing=True any more).
class BaseMotionUpdateActionTester(BaseActionTestCase):
"""
Tests the motion update action.
"""
def setUp(self) -> None:
self.valid_payload_1 = [
{
"id": 2995885358,
"title": "title_pheK0Ja3ai",
"statute_paragraph_id": None,
}
]
class MotionUpdateActionUnitTester(BaseMotionUpdateActionTester):
def setUp(self) -> None:
super().setUp()
user_id = 7826715669
self.action = MotionUpdate(
PermissionTestAdapter(superuser=user_id),
DatabaseTestAdapter(old_style_testing=True),
)
self.action.user_id = user_id
def test_validation_correct_1(self) -> None:
self.action.validate(self.valid_payload_1)
def test_prepare_dataset_1(self) -> None:
dataset = self.action.prepare_dataset(self.valid_payload_1)
instance = deepcopy(self.valid_payload_1[0])
instance["last_modified"] = round(time.time())
self.assertEqual(
dataset["data"],
[
{
"instance": instance,
"relations": {
get_fqfield("motion_statute_paragraph/8264607531/motion_ids"): {
"type": "remove",
"value": [],
}
},
}
],
)
class MotionUpdateActionPerformTester(BaseMotionUpdateActionTester):
def setUp(self) -> None:
super().setUp()
self.user_id = 7826715669
self.action = MotionUpdate(
PermissionTestAdapter(superuser=self.user_id),
DatabaseTestAdapter(old_style_testing=True),
)
def test_perform_correct_1(self) -> None:
write_request_elements = self.action.perform(
self.valid_payload_1, user_id=self.user_id
)
expected = [
{
"events": [
{
"type": "update",
"fqid": get_fqid("motion/2995885358"),
"fields": {
"title": "title_pheK0Ja3ai",
"last_modified": round(time.time()),
"statute_paragraph_id": None,
},
},
{
"type": "update",
"fqid": get_fqid("motion_statute_paragraph/8264607531"),
"fields": {"motion_ids": []},
},
],
"information": {
get_fqid("motion/2995885358"): ["Object updated"],
get_fqid("motion_statute_paragraph/8264607531"): [
"Object attachment to motion reset"
],
},
"user_id": self.user_id,
},
]
self.assertEqual(list(write_request_elements), expected)
class MotionUpdateActionWSGITester(BaseMotionUpdateActionTester):
def setUp(self) -> None:
super().setUp()
self.user_id = 7826715669
self.application = create_test_application(
user_id=self.user_id, view_name="ActionView", superuser=self.user_id
)
def test_wsgi_request_correct_1(self) -> None:
client = Client(self.application)
response = client.post(
"/", json=[{"action": "motion.update", "data": self.valid_payload_1}],
)
self.assert_status_code(response, 200)
class BaseMotionUpdateMetadataActionTester(BaseActionTestCase):
"""
Tests the motion update medadata action.
"""
def setUp(self) -> None:
self.valid_payload_1 = [
{"id": 2995885358, "category_id": None, "block_id": 4740630442}
]
self.valid_payload_2 = [{"id": 2995885358, "supporter_ids": [7268025091]}]
class MotionUpdateMetadataActionUnitTester(BaseMotionUpdateMetadataActionTester):
def setUp(self) -> None:
super().setUp()
user_id = 7826715669
self.action = MotionUpdateMetadata(
PermissionTestAdapter(superuser=user_id),
DatabaseTestAdapter(old_style_testing=True),
)
self.action.user_id = user_id
def test_validation_correct_1(self) -> None:
self.action.validate(self.valid_payload_1)
def test_validation_correct_2(self) -> None:
self.action.validate(self.valid_payload_2)
def test_prepare_dataset_1(self) -> None:
dataset = self.action.prepare_dataset(self.valid_payload_1)
instance = deepcopy(self.valid_payload_1[0])
instance["last_modified"] = round(time.time())
expected = [
{
"instance": instance,
"relations": {
get_fqfield("motion_category/8734727380/motion_ids"): {
"type": "remove",
"value": [],
},
get_fqfield("motion_block/4116433002/motion_ids"): {
"type": "remove",
"value": [],
},
get_fqfield("motion_block/4740630442/motion_ids"): {
"type": "add",
"value": [2995885358],
},
},
}
]
self.assertEqual(dataset["data"], expected)
def test_prepare_dataset_2(self) -> None:
dataset = self.action.prepare_dataset(self.valid_payload_2)
instance = deepcopy(self.valid_payload_2[0])
instance["last_modified"] = round(time.time())
expected = [
{
"instance": instance,
"relations": {
get_fqfield("user/7268025091/supported_motion_5562405520_ids"): {
"type": "add",
"value": [2995885358],
},
},
}
]
self.assertEqual(dataset["data"], expected)
class MotionUpdateMetadataActionPerformTester(BaseMotionUpdateMetadataActionTester):
def setUp(self) -> None:
super().setUp()
self.user_id = 7826715669
self.action = MotionUpdateMetadata(
PermissionTestAdapter(superuser=self.user_id),
DatabaseTestAdapter(old_style_testing=True),
)
def test_perform_correct_1(self) -> None:
write_request_elements = self.action.perform(
self.valid_payload_1, user_id=self.user_id
)
expected = [
{
"events": [
{
"type": "update",
"fqid": get_fqid("motion/2995885358"),
"fields": {
"last_modified": round(time.time()),
"category_id": None,
"block_id": 4740630442,
},
},
{
"type": "update",
"fqid": get_fqid("motion_block/4116433002"),
"fields": {"motion_ids": []},
},
{
"type": "update",
"fqid": get_fqid("motion_block/4740630442"),
"fields": {"motion_ids": [2995885358]},
},
{
"type": "update",
"fqid": get_fqid("motion_category/8734727380"),
"fields": {"motion_ids": []},
},
],
"information": {
get_fqid("motion/2995885358"): ["Object updated"],
get_fqid("motion_block/4116433002"): [
"Object attachment to motion reset"
],
get_fqid("motion_block/4740630442"): ["Object attached to motion"],
get_fqid("motion_category/8734727380"): [
"Object attachment to motion reset"
],
},
"user_id": self.user_id,
},
]
self.assertEqual(
list(write_request_elements), expected,
)
def test_perform_correct_2(self) -> None:
write_request_elements = self.action.perform(
self.valid_payload_2, user_id=self.user_id
)
expected = [
{
"events": [
{
"type": "update",
"fqid": get_fqid("motion/2995885358"),
"fields": {
"last_modified": round(time.time()),
"supporter_ids": [7268025091],
},
},
{
"type": "update",
"fqid": get_fqid("user/7268025091"),
"fields": {"supported_motion_5562405520_ids": [2995885358]},
},
],
"information": {
get_fqid("motion/2995885358"): ["Object updated"],
get_fqid("user/7268025091"): ["Object attached to motion"],
},
"user_id": self.user_id,
}
]
self.assertEqual(
list(write_request_elements), expected,
)
def test_perform_no_permission_1(self) -> None:
with self.assertRaises(PermissionDenied) as context_manager:
self.action.perform(self.valid_payload_1, user_id=4796568680)
self.assertEqual(
context_manager.exception.message,
"You are not allowed to perform action motion.update_metadata.",
)
class BaseMotionDeleteActionTester(BaseActionTestCase):
"""
Tests the motion delete action.
"""
def setUp(self) -> None:
self.valid_payload_1 = [{"id": 2995885358}]
class MotionDeleteActionUnitTester(BaseMotionDeleteActionTester):
def setUp(self) -> None:
super().setUp()
user_id = 7826715669
self.action = MotionDelete(
PermissionTestAdapter(superuser=user_id),
DatabaseTestAdapter(old_style_testing=True),
)
self.action.user_id = user_id
def test_validation_correct_1(self) -> None:
self.action.validate(self.valid_payload_1)
def test_prepare_dataset_1(self) -> None:
dataset = self.action.prepare_dataset(self.valid_payload_1)
expected = [
{
"instance": {
"id": self.valid_payload_1[0]["id"],
"meeting_id": None,
"statute_paragraph_id": None,
"sort_parent_id": None,
"lead_motion_id": None,
"category_id": None,
"change_recommendation_ids": None,
"current_projector_ids": None,
"comment_ids": None,
"block_id": None,
"origin_id": None,
"state_id": None,
"recommendation_id": None,
"personal_note_ids": None,
"poll_ids": None,
"projection_ids": None,
"recommendation_extension_reference_ids": None,
"referenced_in_motion_recommendation_extension_ids": None,
"submitter_ids": None,
"attachment_ids": None,
"tag_ids": None,
"amendment_ids": None,
"derived_motion_ids": None,
"sort_child_ids": None,
"agenda_item_id": None,
"list_of_speakers_id": None,
},
"relations": {
get_fqfield("meeting/5562405520/motion_ids"): {
"type": "remove",
"value": [],
},
get_fqfield("motion_statute_paragraph/8264607531/motion_ids"): {
"type": "remove",
"value": [],
},
get_fqfield("motion_state/5205893377/motion_ids"): {
"type": "remove",
"value": [],
},
get_fqfield("motion_state/5205893377/motion_recommendation_ids"): {
"type": "remove",
"value": [],
},
get_fqfield("motion_category/8734727380/motion_ids"): {
"type": "remove",
"value": [],
},
get_fqfield("motion_block/4116433002/motion_ids"): {
"type": "remove",
"value": [],
},
},
}
]
self.maxDiff = None
self.assertEqual(dataset["data"], expected)
class MotionDeleteActionPerformTester(BaseMotionDeleteActionTester):
def setUp(self) -> None:
super().setUp()
self.user_id = 7826715669
self.action = MotionDelete(
PermissionTestAdapter(superuser=self.user_id),
DatabaseTestAdapter(old_style_testing=True),
)
def test_perform_correct_1(self) -> None:
write_request_elements = self.action.perform(
self.valid_payload_1, user_id=self.user_id
)
expected = [
{
"events": [
{"type": "delete", "fqid": get_fqid("motion/2995885358")},
{
"type": "update",
"fqid": get_fqid("motion_block/4116433002"),
"fields": {"motion_ids": []},
},
{
"type": "update",
"fqid": get_fqid("motion_category/8734727380"),
"fields": {"motion_ids": []},
},
{
"type": "update",
"fqid": get_fqid("meeting/5562405520"),
"fields": {"motion_ids": []},
},
{
"type": "update",
"fqid": get_fqid("motion_state/5205893377"),
"fields": {"motion_recommendation_ids": []},
},
{
"type": "update",
"fqid": get_fqid("motion_state/5205893377"),
"fields": {"motion_ids": []},
},
{
"type": "update",
"fqid": get_fqid("motion_statute_paragraph/8264607531"),
"fields": {"motion_ids": []},
},
],
"information": {
get_fqid("motion/2995885358"): ["Object deleted"],
get_fqid("meeting/5562405520"): [
"Object attachment to motion reset"
],
get_fqid("motion_statute_paragraph/8264607531"): [
"Object attachment to motion reset"
],
get_fqid("motion_state/5205893377"): [
"Object attachment to motion reset",
"Object attachment to motion reset",
],
get_fqid("motion_category/8734727380"): [
"Object attachment to motion reset"
],
get_fqid("motion_block/4116433002"): [
"Object attachment to motion reset"
],
},
"user_id": self.user_id,
},
]
self.assertEqual(
list(write_request_elements), expected,
)
class MotionDeleteActionWSGITester(BaseMotionDeleteActionTester):
def setUp(self) -> None:
super().setUp()
self.user_id = 7826715669
self.application = create_test_application(
user_id=self.user_id, view_name="ActionView", superuser=self.user_id
)
def test_wsgi_request_correct_1(self) -> None:
client = Client(self.application)
response = client.post(
"/", json=[{"action": "motion.delete", "data": self.valid_payload_1}],
)
self.assert_status_code(response, 200)
class BaseMotionSortActionTester(BaseActionTestCase):
"""
Tests the motion sort action.
"""
def setUp(self) -> None:
self.meeting_id = 5562405520
self.valid_payload_1 = {
"meeting_id": self.meeting_id,
"nodes": [
{"id": 3265963568},
{"id": 2279328478},
{"id": 1082050467},
{"id": 8000824551},
{"id": 2995885358},
],
}
self.valid_payload_2 = {
"meeting_id": self.meeting_id,
"nodes": [
{
"id": 3265963568,
"children": [
{
"id": 2279328478,
"children": [{"id": 8000824551}, {"id": 1082050467}],
}
],
},
{"id": 2995885358},
],
}
self.circular_payload = {
"meeting_id": self.meeting_id,
"nodes": [
{
"id": 3265963568,
"children": [{"id": 2279328478, "children": [{"id": 3265963568}]}],
},
],
}
class MotionSortActionUnitTester(BaseMotionSortActionTester):
def setUp(self) -> None:
super().setUp()
user_id = 7826715669
self.action = MotionSort(
PermissionTestAdapter(superuser=user_id),
DatabaseTestAdapter(old_style_testing=True),
)
self.action.user_id = user_id
def test_validation_correct_1(self) -> None:
self.action.validate(self.valid_payload_1)
def test_validation_correct_2(self) -> None:
self.action.validate(self.valid_payload_2)
def test_prepare_dataset_1(self) -> None:
dataset = self.action.prepare_dataset(self.valid_payload_1)
expected: Dict[int, Dict[str, Any]] = {
3265963568: {
"sort_parent_id": None,
"sort_weight": 2,
"sort_children_ids": [],
},
2279328478: {
"sort_parent_id": None,
"sort_weight": 4,
"sort_children_ids": [],
},
1082050467: {
"sort_parent_id": None,
"sort_weight": 6,
"sort_children_ids": [],
},
8000824551: {
"sort_parent_id": None,
"sort_weight": 8,
"sort_children_ids": [],
},
2995885358: {
"sort_parent_id": None,
"sort_weight": 10,
"sort_children_ids": [],
},
}
self.assertEqual(dataset["data"], expected)
def test_prepare_dataset_2(self) -> None:
dataset = self.action.prepare_dataset(self.valid_payload_2)
expected = {
3265963568: {
"sort_parent_id": None,
"sort_weight": 2,
"sort_children_ids": [2279328478],
},
2279328478: {
"sort_parent_id": 3265963568,
"sort_weight": 4,
"sort_children_ids": [8000824551, 1082050467],
},
1082050467: {
"sort_parent_id": 2279328478,
"sort_weight": 8,
"sort_children_ids": [],
},
8000824551: {
"sort_parent_id": 2279328478,
"sort_weight": 6,
"sort_children_ids": [],
},
2995885358: {
"sort_parent_id": None,
"sort_weight": 10,
"sort_children_ids": [],
},
}
self.assertEqual(dataset["data"], expected)
def test_circular_dataset(self) -> None:
with self.assertRaises(ActionException) as context_manager:
self.action.prepare_dataset(self.circular_payload)
self.assertEqual(
context_manager.exception.message, "Duplicate id in sort tree: 3265963568"
)
class MotionSortActionPerformTester(BaseMotionSortActionTester):
def setUp(self) -> None:
super().setUp()
self.user_id = 7826715669
self.action = MotionSort(
PermissionTestAdapter(superuser=self.user_id),
DatabaseTestAdapter(old_style_testing=True),
)
def test_perform_correct_1(self) -> None:
write_request_elements = self.action.perform(
self.valid_payload_1, user_id=self.user_id
)
expected = [
{
"events": [
{
"type": "update",
"fqid": get_fqid("motion/3265963568"),
"fields": {
"sort_parent_id": None,
"sort_children_ids": [],
"sort_weight": 2,
},
}
],
"information": {get_fqid("motion/3265963568"): ["Object sorted"]},
"user_id": self.user_id,
},
{
"events": [
{
"type": "update",
"fqid": get_fqid("motion/2279328478"),
"fields": {
"sort_parent_id": None,
"sort_weight": 4,
"sort_children_ids": [],
},
}
],
"information": {get_fqid("motion/2279328478"): ["Object sorted"]},
"user_id": self.user_id,
},
{
"events": [
{
"type": "update",
"fqid": get_fqid("motion/1082050467"),
"fields": {
"sort_weight": 6,
"sort_parent_id": None,
"sort_children_ids": [],
},
}
],
"information": {get_fqid("motion/1082050467"): ["Object sorted"]},
"user_id": self.user_id,
},
{
"events": [
{
"type": "update",
"fqid": get_fqid("motion/8000824551"),
"fields": {
| |
"""Definition of volume phase scattering functions"""
import numpy as np
import sympy as sp
from functools import partial, update_wrapper
from .scatter import Scatter
from .rtplots import polarplot
class Volume(Scatter):
"""basic volume class"""
def __init__(self, **kwargs):
self.omega = kwargs.pop('omega', None)
self.tau = kwargs.pop('tau', None)
# set scattering angle generalization-matrix to [-1,1,1] if it is not
# explicitly provided by the chosen class this results in a peak in
# forward-direction which is suitable for describing volume-scattering
# phase-functions
self.a = getattr(self, 'a', [-1., 1., 1.])
# add a quick way for visualizing the functions as polarplot
self.polarplot = partial(polarplot, X=self)
update_wrapper(self.polarplot, polarplot)
def p(self, t_0, t_ex, p_0, p_ex, param_dict={}):
"""
Calculate numerical value of the volume-scattering phase-function
for chosen incidence- and exit angles.
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
Returns
-------
array_like(float)
Numerical value of the volume-scattering phase-function
"""
# define sympy objects
theta_0 = sp.Symbol('theta_0')
theta_ex = sp.Symbol('theta_ex')
phi_0 = sp.Symbol('phi_0')
phi_ex = sp.Symbol('phi_ex')
# replace arguments and evaluate expression
# sp.lambdify is used to allow array-inputs
# for python > 3.5 unpacking could be used, i.e.:
# pfunc = sp.lambdify((theta_0, theta_ex, phi_0, phi_ex,
# *param_dict.keys()),
# self._func, modules=["numpy", "sympy"])
args = (theta_0, theta_ex, phi_0, phi_ex) + tuple(param_dict.keys())
pfunc = sp.lambdify(args, self._func, modules=["numpy", "sympy"])
# in case _func is a constant, lambdify will produce a function with
# scalar output which is not suitable for further processing
# (this happens e.g. for the Isotropic brdf).
# The following query is implemented to ensure correct array-output:
# TODO this is not a proper test !
if not isinstance(pfunc(np.array([.1, .2, .3]), .1, .1, .1,
**{key: .12 for key in param_dict.keys()}
), np.ndarray):
pfunc = np.vectorize(pfunc)
return pfunc(t_0, t_ex, p_0, p_ex, **param_dict)
def p_theta_diff(self, t_0, t_ex, p_0, p_ex, geometry,
param_dict={}, return_symbolic=False, n=1):
"""
Calculation of the derivative of p with respect to
the scattering-angles t_ex
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
geometry : str
4 character string specifying which components of the angles should
be fixed or variable. This is done to significantly speed up the
evaluation-process of the fn-coefficient generation
The 4 characters represent in order the properties of:
t_0, t_ex, p_0, p_ex
- 'f' indicates that the angle is treated 'fixed'
(i.e. as a numerical constant)
- 'v' indicates that the angle is treated 'variable'
(i.e. as a sympy-variable)
- Passing geometry = 'mono' indicates a monstatic geometry
(i.e.: t_ex = t_0, p_ex = p_0 + pi)
If monostatic geometry is used, the input-values of t_ex and p_ex
have no effect on the calculations!
For detailed information on the specification of the
geometry-parameter, please have a look at the
"Evaluation Geometries" section of the documentation
(http://rt1.readthedocs.io/en/latest/model_specification.html#evaluation-geometries)
return_symbolic : bool (default = False)
indicator if symbolic result
should be returned
n : int (default = 1)
order of derivatives (d^n / d_theta^n)
Returns
-------
sympy - expression
The derivative of the BRDF with espect to the excident angle
t_ex for the chosen geometry
"""
# define sympy variables based on chosen geometry
if geometry == 'mono':
assert len(np.unique(p_0)) == 1, 'p_0 must contain only a ' + \
'single unique value for monostatic geometry'
theta_0 = sp.Symbol('theta_0')
theta_ex = theta_0
phi_0 = np.unique(p_0)[0]
phi_ex = np.unique(p_0)[0] + sp.pi
t_ex = t_0
p_ex = p_0 + np.pi
else:
if geometry[0] == 'v':
theta_0 = sp.Symbol('theta_0')
elif geometry[0] == 'f':
assert len(np.unique(t_0)) == 1, 't_0 must contain only a ' + \
'single unique value for geometry[0] == f'
theta_0 = np.unique(t_0)[0]
else:
raise AssertionError('wrong choice of theta_0 geometry')
if geometry[1] == 'v':
theta_ex = sp.Symbol('theta_ex')
elif geometry[1] == 'f':
assert len(np.unique(t_ex)) == 1, 't_ex must contain only' + \
' a single unique value for geometry[1] == f'
theta_ex = np.unique(t_ex)[0]
else:
raise AssertionError('wrong choice of theta_ex geometry')
if geometry[2] == 'v':
phi_0 = sp.Symbol('phi_0')
elif geometry[2] == 'f':
assert len(np.unique(p_0)) == 1, 'p_0 must contain only' + \
' a single unique value for geometry[2] == f'
phi_0 = np.unique(p_0)[0]
else:
raise AssertionError('wrong choice of phi_0 geometry')
if geometry[3] == 'v':
phi_ex = sp.Symbol('phi_ex')
elif geometry[3] == 'f':
assert len(np.unique(p_0)) == 1, 'p_ex must contain only' + \
' a single unique value for geometry[3] == f'
phi_ex = np.unique(p_ex)[0]
else:
raise AssertionError('wrong choice of phi_ex geometry')
if geometry[1] == 'f':
dfunc_dtheta_0 = 0.
else:
func = self._func.xreplace({sp.Symbol('theta_0'): theta_0,
sp.Symbol('theta_ex'): theta_ex,
sp.Symbol('phi_0'): phi_0,
sp.Symbol('phi_ex'): phi_ex})
dfunc_dtheta_0 = sp.diff(func, theta_ex, n)
if return_symbolic is True:
return dfunc_dtheta_0
else:
args = (sp.Symbol('theta_0'),
sp.Symbol('theta_ex'),
sp.Symbol('phi_0'),
sp.Symbol('phi_ex')) + tuple(param_dict.keys())
pfunc = sp.lambdify(args, dfunc_dtheta_0,
modules=["numpy", "sympy"])
# in case _func is a constant, lambdify will produce a function
# with scalar output which is not suitable for further processing
# (this happens e.g. for the Isotropic brdf).
# The following query is implemented to ensure correct array-output
# TODO this is not a proper test !
if not isinstance(pfunc(
np.array([.1, .2, .3]), .1, .1, .1,
**{key: .12 for key in param_dict.keys()}), np.ndarray):
pfunc = np.vectorize(pfunc)
return pfunc(t_0, t_ex, p_0, p_ex, **param_dict)
def legexpansion(self, t_0, t_ex, p_0, p_ex, geometry):
"""
Definition of the legendre-expansion of the
volume-scattering phase-function
.. note::
The output represents the legendre-expansion as needed to compute
the fn-coefficients for the chosen geometry!
(http://rt1.readthedocs.io/en/latest/theory.html#equation-fn_coef_definition)
The incidence-angle argument of the legexpansion() is different to
the documentation due to the direct definition of the argument as
the zenith-angle (t_0) instead of the incidence-angle defined in a
spherical coordinate system (t_i).
They are related via: t_i = pi - t_0
Parameters
----------
t_0 : array_like(float)
array of incident zenith-angles in radians
p_0 : array_like(float)
array of incident azimuth-angles in radians
t_ex : array_like(float)
array of exit zenith-angles in radians
p_ex : array_like(float)
array of exit azimuth-angles in radians
geometry : str
4 character string specifying which components of the angles should
be fixed or variable. This is done to significantly speed up the
evaluation-process of the fn-coefficient generation
The 4 characters represent in order the properties of:
t_0, t_ex, p_0, p_ex
- 'f' indicates that the angle is treated 'fixed'
(i.e. as a numerical constant)
- 'v' indicates that the angle is treated 'variable'
(i.e. as a sympy-variable)
- Passing geometry = 'mono' indicates a monstatic geometry
(i.e.: t_ex = t_0, p_ex = p_0 + pi)
If monostatic geometry is used, the input-values of t_ex and p_ex
have no effect on the calculations!
For detailed information on the specification of the
geometry-parameter, please have a look at the
"Evaluation Geometries" section of the documentation
(http://rt1.readthedocs.io/en/latest/model_specification.html#evaluation-geometries)
Returns
-------
sympy-expression
The legendre-expansion of the volume-scattering phase-function
for the chosen geometry
"""
assert self.ncoefs > 0
theta_s = sp.Symbol('theta_s')
phi_s = sp.Symbol('phi_s')
NP = self.ncoefs
n = sp.Symbol('n')
# define sympy variables based on chosen geometry
if geometry == 'mono':
assert len(np.unique(p_0)) == 1, 'p_0 must contain only a ' + \
'single unique value for monostatic geometry'
theta_0 = sp.Symbol('theta_0')
theta_ex = theta_0
phi_0 = np.unique(p_0)[0]
phi_ex = np.unique(p_0)[0] + sp.pi
else:
if geometry[0] == 'v':
theta_0 = sp.Symbol('theta_0')
elif geometry[0] == 'f':
assert len(np.unique(t_0)) == 1, 't_0 must contain only a ' + \
'single unique value for geometry[0] == f'
theta_0 = np.unique(t_0)[0]
else:
raise AssertionError('wrong choice of theta_i geometry')
if geometry[1] == 'v':
theta_ex = sp.Symbol('theta_ex')
elif geometry[1] == 'f':
assert len(np.unique(t_ex)) == 1, 't_ex must contain only' + \
' a single unique value for geometry[1] == f'
theta_ex = np.unique(t_ex)[0]
else:
raise AssertionError('wrong choice of theta_ex geometry')
if geometry[2] == 'v':
phi_0 = sp.Symbol('phi_0')
elif geometry[2] == 'f':
| |
<reponame>bednie/mango-explorer
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:<EMAIL>)
import pyserum.enums
import typing
from datetime import datetime
from decimal import Decimal
from pyserum._layouts.instructions import (
INSTRUCTIONS_LAYOUT as PYSERUM_INSTRUCTIONS_LAYOUT,
InstructionType as PySerumInstructionType,
)
from pyserum.enums import OrderType as PySerumOrderType, Side as PySerumSide
from pyserum.instructions import (
settle_funds as pyserum_settle_funds,
SettleFundsParams as PySerumSettleFundsParams,
)
from pyserum.market.market import Market as PySerumMarket
from pyserum.open_orders_account import (
make_create_account_instruction as pyserum_make_create_account_instruction,
)
from solana.keypair import Keypair
from solana.publickey import PublicKey
from solana.system_program import CreateAccountParams, create_account
from solana.transaction import AccountMeta, TransactionInstruction
from spl.token.constants import (
ACCOUNT_LEN,
TOKEN_PROGRAM_ID,
)
from spl.token.instructions import (
CloseAccountParams,
InitializeAccountParams,
TransferParams,
close_account,
create_associated_token_account,
initialize_account,
transfer,
)
from .combinableinstructions import CombinableInstructions
from .constants import I64_MAX, SYSTEM_PROGRAM_ADDRESS
from .context import Context
from .layouts import layouts
from .orders import Order, OrderType, Side
from .perpmarketdetails import PerpMarketDetails
# from .spotmarket import SpotMarket
from .tokens import Token
from .tokenaccount import TokenAccount
from .tokenbank import TokenBank, NodeBank, RootBank
from .wallet import Wallet
# 🥭 Interfaces
#
# To avoid circular dependencies, here we specify the bare interfaces we need for some
# of the objects. For instance, where a function takes an IAccount, you can (and probably
# should) safely pass in an IAccount.
#
class IAccount(typing.Protocol):
group_address: PublicKey
@property
def address(self) -> PublicKey:
raise NotImplementedError(
"IAccount.address is not implemented on the Protocol."
)
@property
def spot_open_orders_by_index(self) -> typing.Sequence[typing.Optional[PublicKey]]:
raise NotImplementedError(
"IAccount.spot_open_orders_by_index is not implemented on the Protocol."
)
@property
def spot_open_orders(self) -> typing.Sequence[PublicKey]:
raise NotImplementedError(
"IAccount.spot_open_orders is not implemented on the Protocol."
)
def update_spot_open_orders_for_market(
self, spot_market_index: int, spot_open_orders: PublicKey
) -> None:
raise NotImplementedError(
"IAccount.update_spot_open_orders_for_market() is not implemented on the Protocol."
)
class IGroupSlot(typing.Protocol):
index: int
class IGroup(typing.Protocol):
cache: PublicKey
signer_key: PublicKey
fees_vault: PublicKey
shared_quote: TokenBank
@property
def address(self) -> PublicKey:
raise NotImplementedError("IGroup.address is not implemented on the Protocol.")
@property
def tokens_by_index(self) -> typing.Sequence[typing.Optional[TokenBank]]:
raise NotImplementedError(
"IGroup.tokens_by_index is not implemented on the Protocol."
)
@property
def base_tokens_by_index(self) -> typing.Sequence[typing.Optional[TokenBank]]:
raise NotImplementedError(
"IGroup.base_tokens_by_index is not implemented on the Protocol."
)
def slot_by_spot_market_address(self, spot_market_address: PublicKey) -> IGroupSlot:
raise NotImplementedError(
"IGroup.slot_by_spot_market_address() is not implemented on the Protocol."
)
class IPerpMarket(typing.Protocol):
underlying_perp_market: PerpMarketDetails
@property
def address(self) -> PublicKey:
raise NotImplementedError(
"IPerpMarket.address is not implemented on the Protocol."
)
class ISpotMarket(typing.Protocol):
underlying_serum_market: PySerumMarket
@property
def address(self) -> PublicKey:
raise NotImplementedError(
"ISpotMarket.address is not implemented on the Protocol."
)
@property
def bids_address(self) -> PublicKey:
raise NotImplementedError(
"ISpotMarket.bids_address is not implemented on the Protocol."
)
@property
def asks_address(self) -> PublicKey:
raise NotImplementedError(
"ISpotMarket.asks_address is not implemented on the Protocol."
)
@property
def event_queue_address(self) -> PublicKey:
raise NotImplementedError(
"ISpotMarket.event_queue_address is not implemented on the Protocol."
)
# 🥭 Instructions
#
# This file contains the low-level instruction functions that build the raw instructions
# to send to Solana.
#
# One important distinction between these functions and the more common `create instruction functions` in
# Solana is that these functions *all return a combinable of instructions and signers*.
#
# It's likely that some operations will require actions split across multiple instructions because of
# instruction size limitiations, so all our functions are prepared for this without having to change
# the function signature in future.
#
# #
# # 🥭 SOLANA instruction builders
# #
# # 🥭 build_solana_create_account_instructions function
#
# Creates and initializes an SPL token account. Can add additional lamports too but that's usually not
# necesary.
#
def build_solana_create_account_instructions(
context: Context,
wallet: Wallet,
mango_program_address: PublicKey,
size: int,
lamports: int = 0,
) -> CombinableInstructions:
minimum_balance = context.client.get_minimum_balance_for_rent_exemption(size)
account = Keypair()
create_instruction = create_account(
CreateAccountParams(
wallet.address,
account.public_key,
lamports + minimum_balance,
size,
mango_program_address,
)
)
return CombinableInstructions(signers=[account], instructions=[create_instruction])
# #
# # 🥭 SPL instruction builders
# #
# # 🥭 build_spl_create_account_instructions function
#
# Creates and initializes an SPL token account. Can add additional lamports too but that's usually not
# necesary.
#
# Prefer `build_spl_create_account_instructions()` over this function. This function should be
# reserved for cases where you specifically don't want the associated token account.
#
def build_spl_create_account_instructions(
context: Context, wallet: Wallet, token: Token, lamports: int = 0
) -> CombinableInstructions:
create_account_instructions = build_solana_create_account_instructions(
context, wallet, TOKEN_PROGRAM_ID, ACCOUNT_LEN, lamports
)
initialize_instruction = initialize_account(
InitializeAccountParams(
TOKEN_PROGRAM_ID,
create_account_instructions.signers[0].public_key,
token.mint,
wallet.address,
)
)
return create_account_instructions + CombinableInstructions(
signers=[], instructions=[initialize_instruction]
)
# # 🥭 build_spl_create_associated_account_instructions function
#
# Creates and initializes an 'associated' SPL token account. This is the usual way of creating a
# token account now. `build_spl_create_account_instructions()` should be reserved for cases where
# you specifically don't want the associated token account.
#
def build_spl_create_associated_account_instructions(
context: Context, wallet: Wallet, owner: PublicKey, token: Token
) -> CombinableInstructions:
create_account_instructions = create_associated_token_account(
wallet.address, owner, token.mint
)
return CombinableInstructions(
signers=[], instructions=[create_account_instructions]
)
# # 🥭 build_spl_transfer_tokens_instructions function
#
# Creates an instruction to transfer SPL tokens from one account to another.
#
def build_spl_transfer_tokens_instructions(
context: Context,
wallet: Wallet,
token: Token,
source: PublicKey,
destination: PublicKey,
quantity: Decimal,
) -> CombinableInstructions:
amount = int(token.shift_to_native(quantity))
instructions = [
transfer(
TransferParams(
TOKEN_PROGRAM_ID, source, destination, wallet.address, amount, []
)
)
]
return CombinableInstructions(signers=[], instructions=instructions)
# # 🥭 build_spl_close_account_instructions function
#
# Creates an instructio to close an SPL token account and transfers any remaining lamports to the wallet.
#
def build_spl_close_account_instructions(
context: Context, wallet: Wallet, address: PublicKey
) -> CombinableInstructions:
return CombinableInstructions(
signers=[],
instructions=[
close_account(
CloseAccountParams(
TOKEN_PROGRAM_ID, address, wallet.address, wallet.address
)
)
],
)
# # 🥭 build_spl_faucet_airdrop_instructions function
#
# Creates an airdrop instruction for compatible faucets (those based on https://github.com/paul-schaaf/spl-token-faucet)
#
def build_spl_faucet_airdrop_instructions(
token_mint: PublicKey, destination: PublicKey, faucet: PublicKey, quantity: Decimal
) -> CombinableInstructions:
faucet_program_address: PublicKey = PublicKey(
"<KEY>"
)
authority_and_nonce: typing.Tuple[PublicKey, int] = PublicKey.find_program_address(
[b"faucet"], faucet_program_address
)
authority: PublicKey = authority_and_nonce[0]
# Mints and airdrops tokens from a faucet.
#
# SPL instruction is at:
# https://github.com/paul-schaaf/spl-token-faucet/blob/main/src/program/src/instruction.rs
#
# ///
# /// Mints Tokens
# ///
# /// 0. `[]` The mint authority - Program Derived Address
# /// 1. `[writable]` Token Mint IAccount
# /// 2. `[writable]` Destination IAccount
# /// 3. `[]` The SPL Token Program
# /// 4. `[]` The Faucet IAccount
# /// 5. `[optional/signer]` Admin IAccount
faucet_airdrop_instruction = TransactionInstruction(
keys=[
AccountMeta(is_signer=False, is_writable=False, pubkey=authority),
AccountMeta(is_signer=False, is_writable=True, pubkey=token_mint),
AccountMeta(is_signer=False, is_writable=True, pubkey=destination),
AccountMeta(is_signer=False, is_writable=False, pubkey=TOKEN_PROGRAM_ID),
AccountMeta(is_signer=False, is_writable=False, pubkey=faucet),
],
program_id=faucet_program_address,
data=layouts.FAUCET_AIRDROP.build({"quantity": quantity}),
)
return CombinableInstructions(signers=[], instructions=[faucet_airdrop_instruction])
# #
# # 🥭 Serum instruction builders
# #
# # 🥭 build_serum_create_openorders_instructions function
#
# Creates a Serum openorders-creating instruction.
#
def build_serum_create_openorders_instructions(
context: Context, wallet: Wallet, market: PySerumMarket
) -> CombinableInstructions:
new_open_orders_account = Keypair()
minimum_balance = context.client.get_minimum_balance_for_rent_exemption(
layouts.OPEN_ORDERS.sizeof()
)
instruction = pyserum_make_create_account_instruction(
owner_address=wallet.address,
new_account_address=new_open_orders_account.public_key,
lamports=minimum_balance,
program_id=market.state.program_id(),
)
return CombinableInstructions(
signers=[new_open_orders_account], instructions=[instruction]
)
# # 🥭 build_serum_place_order_instructions function
#
# Creates a Serum order-placing instruction using V3 of the NewOrder instruction.
#
def build_serum_place_order_instructions(
context: Context,
wallet: Wallet,
market: PySerumMarket,
source: PublicKey,
open_orders_address: PublicKey,
order_type: OrderType,
side: Side,
price: Decimal,
quantity: Decimal,
client_id: int,
fee_discount_address: PublicKey,
) -> CombinableInstructions:
serum_order_type: PySerumOrderType = (
PySerumOrderType.POST_ONLY
if order_type == OrderType.POST_ONLY
else PySerumOrderType.IOC
if order_type == OrderType.IOC
else PySerumOrderType.LIMIT
)
serum_side: PySerumSide = PySerumSide.SELL if side == Side.SELL else PySerumSide.BUY
instruction = market.make_place_order_instruction(
source,
wallet.keypair,
serum_order_type,
serum_side,
float(price),
float(quantity),
client_id,
open_orders_address,
fee_discount_address,
)
return CombinableInstructions(signers=[], instructions=[instruction])
# # 🥭 build_serum_consume_events_instructions function
#
# Creates an event-consuming 'crank' instruction.
#
def build_serum_consume_events_instructions(
context: Context,
market_address: PublicKey,
event_queue_address: PublicKey,
open_orders_addresses: typing.Sequence[PublicKey],
limit: int = 32,
) -> CombinableInstructions:
instruction = TransactionInstruction(
keys=[
AccountMeta(pubkey=pubkey, is_signer=False, is_writable=True)
for pubkey in [*open_orders_addresses, market_address, event_queue_address]
],
program_id=context.serum_program_address,
data=PYSERUM_INSTRUCTIONS_LAYOUT.build(
dict(
instruction_type=PySerumInstructionType.CONSUME_EVENTS,
args=dict(limit=limit),
)
),
)
# The interface accepts (and currently requires) two accounts at the end, but
# it doesn't actually use them.
random_account = Keypair().public_key
instruction.keys.append(
AccountMeta(random_account, is_signer=False, is_writable=True)
)
instruction.keys.append(
AccountMeta(random_account, is_signer=False, is_writable=True)
)
return CombinableInstructions(signers=[], instructions=[instruction])
# # 🥭 build_serum_settle_instructions function
#
# Creates a 'settle' instruction.
#
def build_serum_settle_instructions(
context: Context,
wallet: Wallet,
market: PySerumMarket,
open_orders_address: PublicKey,
base_token_account_address: PublicKey,
quote_token_account_address: PublicKey,
) -> CombinableInstructions:
vault_signer = PublicKey.create_program_address(
[
bytes(market.state.public_key()),
market.state.vault_signer_nonce().to_bytes(8, byteorder="little"),
],
market.state.program_id(),
)
instruction = pyserum_settle_funds(
PySerumSettleFundsParams(
market=market.state.public_key(),
open_orders=open_orders_address,
owner=wallet.address,
base_vault=market.state.base_vault(),
quote_vault=market.state.quote_vault(),
base_wallet=base_token_account_address,
quote_wallet=quote_token_account_address,
vault_signer=vault_signer,
program_id=market.state.program_id(),
)
)
return CombinableInstructions(signers=[], instructions=[instruction])
# #
# # 🥭 Spot instruction builders
# #
# # 🥭 build_spot_settle_instructions function
#
# Creates a 'settle' instruction for spot markets.
#
# /// Settle all funds from serum dex open orders
# | |
"""
Contains base level parents that aren't to be used directly.
"""
from twisted.internet.defer import inlineCallbacks, returnValue
from fuzzywuzzy.process import QRatio
from fuzzywuzzy import utils as fuzz_utils
from src.daemons.server.ansi import ANSI_HILITE, ANSI_NORMAL
from src.daemons.server.objects.exceptions import ObjectHasZoneMembers, NoSuchObject
from src.daemons.server.protocols.proxyamp import EmitToObjectCmd
#noinspection PyShadowingBuiltins
class BaseObject(object):
"""
This is the base parent for every in-game "object". Rooms, Players, and
Things are all considered objects. Behaviors here are very low level.
"""
# Holds this object's command table. Any objects inside of this object
# will check this for command matches before the global table.
local_command_table = None
# Same as above, but for admin-only commands.
local_admin_command_table = None
def __init__(self, mud_service, id, parent, name, description=None,
internal_description=None,
location_id=None, destination_id=None, zone_id=None,
aliases=None, originally_controlled_by_account_id=None,
controlled_by_account_id=None, attributes=None,
created_time=None):
"""
:param MudService mud_service: The MudService class running the game.
:param int id: A unique ID for the object, or None if this is
a new object.
:param str parent: The Python path to the parent class for this
instantiated object.
:param str name: The non-ASCII'd name.
:param str description: The object's description.
:keyword int location_id: The ID of the object this object resides within.
None if this object is location-less.
:keyword int destination_id: Used to determine where an exit leads.
:keyword int zone_id: Optional zone ID (ID of another BaseObject).
:keyword int originally_controlled_by_account_id: Account ID that
first controlled this object (if it was created in conjunction
with an account).
:keyword in controlled_by_account_id: If this object is being controlled
by an account, this will be populated.
:keyword dict kwargs: All objects are instantiated with the values from
the DB as kwargs. Since the DB representation of all of an
objects attributes is just a dict, this works really well.
:keyword datetime.datetime created_time: The time the object was
created.
"""
self.mud_service = mud_service
# This mirrors the 'id' field in dott_objects. If this is set to None
# and the instance is saved, an insert is done.
self.id = id
self.name = name
self.description = description
self.internal_description = internal_description
self.parent = parent
self.location_id = location_id
self.destination_id = destination_id
self.zone_id = zone_id
self.aliases = aliases or []
self.originally_controlled_by_account_id = originally_controlled_by_account_id
self.controlled_by_account_id = controlled_by_account_id
# This stores all of the object's data. This includes core and
# userspace attributes.
self._attributes = attributes or {}
self.created_time = created_time
assert isinstance(self._attributes, dict)
def __str__(self):
return "<%s: %s (#%d)>" % (self.__class__.__name__, self.name, self.id)
def __repr__(self):
return self.__str__()
#
## Begin properties.
#
@property
def _object_store(self):
"""
Short-cut to the global object store.
:rtype: ObjectStore
:returns: Reference to the global object store instance.
"""
return self.mud_service.object_store
@property
def _command_handler(self):
"""
Short-cut to the global command handler.
:rtype: CommandHandler
:returns: Reference to the global command handler instance.
"""
return self.mud_service.command_handler
def _generic_id_to_baseobject_property_getter(self, attrib_name):
"""
A generic getter for attributes that store object IDs. Given an
object ID, retrieve it or None.
:rtype: BaseObject or None
:returns: The ``BaseObject`` instance for the attribute, or None
if there is no value.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
obj_id = getattr(self, attrib_name)
if obj_id:
#noinspection PyTypeChecker
return self._object_store.get_object(obj_id)
else:
return None
def _generic_baseobject_to_id_property_setter(self, attrib_name, obj_or_id):
"""
Sets this object's zone.
:param obj_or_id: The object or object ID to set as the
object's zone master.
:type obj_or_id: A ``BaseObject`` sub-class or an ``int``.
"""
if isinstance(obj_or_id, int):
# Already an int, assume this is an object ID.
setattr(self, attrib_name, obj_or_id)
elif isinstance(obj_or_id, basestring):
raise Exception("BaseObject.set_%s() can't accept strings for object IDs: %s" % (
attrib_name, obj_or_id))
elif obj_or_id is None:
setattr(self, attrib_name, None)
else:
# Looks like a BaseObject sub-class. Grab the object ID.
setattr(self, attrib_name, obj_or_id.id)
@property
def attributes(self):
"""
Redirects to the object's attributes dict.
:rtype: dict
"""
return self._attributes
def get_location(self):
"""
Determines the object's location and returns the instance representing
this object's location.
:returns: The ``BaseObject`` instance (sub-class) that this object
is currently in. Typically a ``RoomObject``, but can also be
other types.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
return self._generic_id_to_baseobject_property_getter('location_id')
def set_location(self, obj_or_id):
"""
Sets this object's location.
:param obj_or_id: The object or object ID to set as the
object's location.
:type obj_or_id: A ``BaseObject`` sub-class or a ``str``.
"""
if self.base_type == 'room':
# Rooms can't have locations.
return
self._generic_baseobject_to_id_property_setter('location_id', obj_or_id)
location = property(get_location, set_location)
def get_zone(self):
"""
Determines the object's zone and returns the instance representing
this object's zone.
:rtype: BaseObject or None
:returns: The ``BaseObject`` instance (sub-class) that is this object's
zone master object.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
return self._generic_id_to_baseobject_property_getter('zone_id')
def set_zone(self, obj_or_id):
"""
Sets this object's zone.
:param obj_or_id: The object or object ID to set as the
object's zone master.
:type obj_or_id: A ``BaseObject`` sub-class or an ``int``.
"""
self._generic_baseobject_to_id_property_setter('zone_id', obj_or_id)
zone = property(get_zone, set_zone)
#noinspection PyPropertyDefinition
@property
def base_type(self):
"""
BaseObject's primary three sub-classes are Room, Player, Exit,
and Thing. These are all considered the top-level children, and
everything else will be children of them. Room, Player, Exit, and
Thing are the only three valid base types, and each parent should
return one of the following for quick-and-easy type checking:
* room
* player
* exit
* thing
This should only be used for display, never for inheritance checking!
isinstance and friends are there for that.
:rtype: str
"""
raise NotImplementedError('Over-ride in sub-class.')
#
## Begin regular methods.
#
@inlineCallbacks
def save(self):
"""
Shortcut for saving an object to the object store it's a member of.
"""
saved_obj = yield self._object_store.save_object(self)
returnValue(saved_obj)
@inlineCallbacks
def destroy(self):
"""
Destroys the object.
"""
# Destroy all exits that were linked to this object.
if self.base_type not in ['exit', 'player']:
for exit in self._object_store.find_exits_linked_to_obj(self):
yield exit.destroy()
# Un-set the zones on all objects who are members of this object.
zone_members = self._object_store.find_objects_in_zone(self)
if zone_members:
raise ObjectHasZoneMembers(
"Object has zone members. @zmo/empty first, or use "
"@zmo/delete instead.")
# Destroys this object, once all cleanup is done.
yield self._object_store.destroy_object(self)
def execute_command(self, command_string):
"""
Directs the object to execute a certain command. Passes the command
string through the command handler.
:param str command_string: The command to run.
"""
# Input gets handed off to the command handler, where it is parsed
# and routed through various command tables.
if not self._command_handler.handle_input(self, command_string):
self.emit_to('Huh?')
def emit_to(self, message):
"""
Emits to any Session objects attached to this object.
:param str message: The message to emit to any Sessions attached to
the object.
"""
assert self.id is not None, "Attempting to emit to an object with no ID."
self.mud_service.proxyamp.callRemote(
EmitToObjectCmd,
object_id=self.id,
message=message
)
def emit_to_contents(self, message, exclude=None):
"""
Emits the given message to everything in this object's inventory.
:param str message: The message to emit to any object within
this one.
:param BaseObject exclude: A list of objects who are to be
excluded from the emit list. These objects will not see the emit.
"""
if not exclude:
exclude = []
else:
exclude = [obj.id for obj in exclude]
contents = self.get_contents()
for obj in contents:
if obj.id not in exclude:
obj.emit_to(message)
def move_to(self, destination_obj, force_look=True):
"""
Moves this object to the given destination.
:param BaseObject destination_obj: Where to move this object to.
:param bool force_look: If True, the object will run the "look"
command between movements.
"""
old_location_obj = self.location
#noinspection PyUnresolvedReferences
old_location_obj.before_object_leaves_event(self)
destination_obj.before_object_enters_event(self)
self.set_location(destination_obj)
self.save()
#noinspection PyUnresolvedReferences
old_location_obj.after_object_leaves_event(self)
destination_obj.after_object_enters_event(self)
if force_look:
self.execute_command('look')
def is_admin(self):
"""
This always returns ``False``, since objects don't have administrative
powers by default.
:rtype: bool
:returns: ``False``
"""
return False
def get_contents(self):
"""
Returns the list of objects 'inside' this object.
:rtype: list
:returns: A list of :class:`BaseObject` instances whose location is
this object.
"""
return self._object_store.get_object_contents(self)
#noinspection PyUnusedLocal
def get_description(self, invoker, from_inside=False):
"""
Returns the description of this object.
:param BaseObject invoker: The object asking for the description.
:param bool from_inside: If True, use | |
c.append(c[-1] + x)
a = b = 0
best = 0
while b <= N:
if a == b or c[b] - c[a] <= total / 3:
b += 1
if b > N:
break
else:
a += 1
best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))(
'Case #%d: %.10f' % (testCase, float(best) / float(total)))
return a
def func_5e3caad5f10949b4a7bf6adc141d6507(t, c, total, testCase, N):
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
while b <= N:
if a == b or c[b] - c[a] <= total / 3:
b += 1
if b > N:
break
else:
a += 1
best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))(
'Case #%d: %.10f' % (testCase, float(best) / float(total)))
return best
def func_4c8e2d5a756b4bd89a16bf0c14a8d348(t, c, total, testCase, N):
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
while b <= N:
if a == b or c[b] - c[a] <= total / 3:
b += 1
if b > N:
break
else:
a += 1
best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))(
'Case #%d: %.10f' % (testCase, float(best) / float(total)))
return b
def func_5eab9f7b320e48fd879cb72b2a894a6a(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return r
def func_becd3957036f485eaa1bf3fe0b369a1b(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return N
def func_a8850c84fc9947f3bbe020eb20ef4b28(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return p
def func_fad58e2d2a1b40b58e64c0dcc651ae34(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return t
def func_83b45d1a6b86428388ab34dcd7dab94f(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return c
def func_838ef7cd3d4042a1885c8d9214f3cc86(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return s
def func_e91541b90e734c448d47605c4c8ad2f7(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return b
def func_51b7f135f7c8423a934b9f8af08db784(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return total
def func_0be2e6040ae440b2a66f7d73224c5ac2(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return x
def func_84da67faa56a4066be8fe731ca2acb2c(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return a
def func_c4dc48a6a011450290ef2effd88d8054(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return i
def func_ba5e9694d94745979cf9a9d4ddefa3b4(infile):
N, p, q, r, s = get(infile)
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
return q
def func_16c3dbeb6e5344458fe56e245ee9f017(s, q, r, p, N):
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
return c
def func_bbc97581cfc548b3b50c6aa2b0170e5b(s, q, r, p, N):
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
return b
def func_d83b79de8d0b4c37bcf692919d23b92d(s, q, r, p, N):
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
return best
def func_60a90e1921214646a2430f0e59c7665c(s, q, r, p, N):
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
return t
def func_e0f144d5b12f4802b60c02ceead825a7(s, q, r, p, N):
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
return total
def func_45606bd48326415c8106567d6a266583(s, q, r, p, N):
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
return a
def func_d5297eaab41943cb9ee1cbefe8756e2d(s, q, r, p, N):
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
return i
def func_6cff14fcaff04caa94aa1bf66a380dea(s, q, r, p, N):
t = [((i * p + q) % r + s) for i in range(N)]
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
return x
def func_6b63e648c3bb4200b08258fbafff6de8(t, N):
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
while b <= N:
if a == b or c[b] - c[a] <= total / 3:
b += 1
if b > N:
break
else:
a += 1
best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))
return a
def func_287b0305205d43b188fbd35aaa169717(t, N):
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
while b <= N:
if a == b or c[b] - c[a] <= total / 3:
b += 1
if b > N:
break
else:
a += 1
best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))
return b
def func_9dfcee1a2871469691a934a9a1845d37(t, N):
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
while b <= N:
if a == b or c[b] - c[a] <= total / 3:
b += 1
if b > N:
break
else:
a += 1
best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))
return total
def func_3b2923ddc1a943cdbabb5144324d0b19(t, N):
total = sum(t)
c = [0]
for x in t:
c.append(c[-1] + x)
a = b = 0
best = 0
while | |
from enum import Enum
import random
import collections
import numpy as np
#
#######################################################################
# Data Types
#######################################################################
#
class DivideRatio(Enum):
DR_8 = ('0', 8.0, '8')
DR_643 = ('1', 64.0/3, '64/3')
# noinspection PyInitNewSignature
def __init__(self, code, value, string):
self._code = code
self._value = value
self._string = string
@property
def code(self):
return self._code
def eval(self):
return self._value
def __str__(self):
return self._string
class _Session(object):
def __init__(self, code, index, string):
self._code = code
self._index = index
self._string = string
@property
def code(self): return self._code
@property
def index(self): return self._index
@property
def string(self): return self._string
def power_on_value(self, interval, persistence, stored_value):
stored_value = stored_value if stored_value is not None \
else InventoryFlag.A
return InventoryFlag.A if interval > persistence else stored_value
def __str__(self):
return self._string
class _Session0(_Session):
def __init__(self):
super().__init__('00', 0, 'S0')
def power_on_value(self, interval, persistence, stored_value):
return InventoryFlag.A
class Session(Enum):
S0 = _Session0()
S1 = _Session('01', 1, 'S1')
S2 = _Session('10', 2, 'S2')
S3 = _Session('11', 3, 'S3')
# noinspection PyInitNewSignature
def __init__(self, session_obj):
assert isinstance(session_obj, _Session)
self.__session__ = session_obj
@property
def code(self):
return self.__session__.code
@property
def index(self):
return self.__session__.index
@property
def string(self):
return self.__session__.string
def power_on_value(self, interval, persistence, stored_value):
return self.__session__.power_on_value(
interval, persistence, stored_value)
def __str__(self):
return self.__session__.__str__()
class TagEncoding(Enum):
FM0 = ('00', 1, "FM0")
M2 = ('01', 2, "M2")
M4 = ('10', 4, "M4")
M8 = ('11', 8, "M8")
# noinspection PyInitNewSignature
def __init__(self, code, symbols_per_bit, string):
self._code = code
self._symbols_per_bit = symbols_per_bit
self._string = string
@property
def code(self):
return self._code
@property
def symbols_per_bit(self):
return self._symbols_per_bit
def __str__(self):
return self._string
@staticmethod
def get(m):
if m == 1:
return TagEncoding.FM0
elif m == 2:
return TagEncoding.M2
elif m == 4:
return TagEncoding.M4
elif m == 8:
return TagEncoding.M8
else:
raise ValueError("m must be 1,2,4 or 8, but {} found".format(m))
class _InvFlag(object):
def __init__(self, value, name, code):
self._value, self._name, self._code = value, name, code
@property
def value(self): return self._value
@property
def code(self): return self._code
@property
def name(self): return self._name
def invert(self): raise NotImplementedError
def __str__(self): return self._name
class _InvFlagA(_InvFlag):
def __init__(self): super().__init__(0, 'A', '0')
def invert(self): return InventoryFlag.B
class _InvFlagB(_InvFlag):
def __init__(self): super().__init__(1, 'B', '1')
def invert(self): return InventoryFlag.A
class InventoryFlag(Enum):
A = _InvFlagA()
B = _InvFlagB()
# noinspection PyInitNewSignature
def __init__(self, obj):
self._obj = obj
def __getattr__(self, item):
if item in {'value', 'name', 'invert', 'code'}:
return getattr(self._obj, item)
else:
raise AttributeError
def __str__(self):
return self._obj.__str__()
class _SelFlag(object):
def __init__(self, code, string):
self._code = code
self._string = string
@property
def code(self): return self._code
def __str__(self): return self._string
def match(self, flag):
raise NotImplementedError
class _SelAll(_SelFlag):
def __init__(self): super().__init__('00', 'ALL')
def match(self, flag): return True
class _SelTrue(_SelFlag):
def __init__(self): super().__init__('11', 'SL')
def match(self, flag): return flag
class _SelFalse(_SelFlag):
def __init__(self): super().__init__('10', '~SL')
def match(self, flag): return not flag
class SelFlag(Enum):
ALL = _SelAll()
NOT_SEL = _SelFalse()
SEL = _SelTrue()
# noinspection PyInitNewSignature
def __init__(self, sel_obj):
assert isinstance(sel_obj, _SelFlag)
self.__sel__ = sel_obj
@property
def code(self): return self.__sel__.code
def __str__(self): return self.__sel__.__str__()
def match(self, flag): return self.__sel__.match(flag)
class MemoryBank(Enum):
RESERVED = ('00', 'Reserved')
EPC = ('01', 'EPC')
TID = ('10', 'TID')
USER = ('11', 'User')
# noinspection PyInitNewSignature
def __init__(self, code, string):
self._code = code
self._string = string
@property
def code(self):
return self._code
def __str__(self):
return self._string
class CommandCode(Enum):
QUERY = ('1000', 'Query')
QUERY_REP = ('00', 'QueryRep')
ACK = ('01', 'ACK')
REQ_RN = ('11000001', 'Req_RN')
READ = ('11000010', 'Read')
# noinspection PyInitNewSignature
def __init__(self, code, string):
self._code = code
self._string = string
@property
def code(self):
return self._code
def __str__(self):
return self._string
class TempRange(Enum):
NOMINAL = (False, "nominal")
EXTENDED = (True, "extended")
# noinspection PyInitNewSignature
def __init__(self, extended, string):
self._extended = extended
self._string = string
@property
def extended(self):
return self._extended
def __str__(self):
return self._string
#
#######################################################################
# Default system-wide Reader Parameters
#######################################################################
#
class StdParams:
tari = 6.25e-6
rtcal = 1.5625e-05
trcal = 3.125e-05
delim = 12.5e-6
Q = 4
divide_ratio = DivideRatio.DR_8
tag_encoding = TagEncoding.FM0
sel = SelFlag.ALL
session = Session.S0
target = InventoryFlag.A
trext = False
read_default_bank = MemoryBank.TID
read_default_word_ptr = 0
read_default_word_count = 4 # FIXME: check this!
temp_range = TempRange.NOMINAL
access_ops = [] # this list contains reader commands for tag access
default_epc = "FF" * 12
default_read_data = "FF" * 8
default_rn = 0x0000
default_crc5 = 0x00
default_crc16 = 0x0000
stdParams = StdParams()
#
#######################################################################
# Tag Operations
#######################################################################
#
class TagOp:
pass
class TagReadOp(TagOp):
bank = MemoryBank.TID
word_ptr = 0
word_count = 0
def __init__(self):
super().__init__()
#
#######################################################################
# API for encoding basic types
#######################################################################
#
def encode_bool(value):
return '1' if value else '0'
def encode_int(value, n_bits):
value %= 2 ** n_bits
return "{:0{width}b}".format(value, width=n_bits)
def encode_word(value):
return encode_int(value, 16)
def encode_byte(value):
return encode_int(value, 8)
def encode_ebv(value, first_block=True):
prefix = '0' if first_block else '1'
if value < 128:
return prefix + format(value, '07b')
else:
return encode_ebv(value >> 7, first_block=False) + \
encode_ebv(value % 128, first_block=first_block)
#
#######################################################################
# Commands
#######################################################################
#
class Command:
def __init__(self, code):
super().__init__()
self._code = code
@property
def code(self):
return self._code
def encode(self):
raise NotImplementedError
@property
def bitlen(self):
s = self.encode()
return len(s)
class Query(Command):
def __init__(self, dr=None, m=None, trext=None, sel=None, session=None,
target=None, q=None, crc=None):
super().__init__(CommandCode.QUERY)
self.dr = dr if dr is not None else stdParams.divide_ratio
self.m = m if m is not None else stdParams.tag_encoding
self.trext = trext if trext is not None else stdParams.trext
self.sel = sel if sel is not None else stdParams.sel
self.session = session if session is not None else stdParams.session
self.target = target if target is not None else stdParams.target
self.q = q if q is not None else stdParams.Q
self.crc = crc if crc is not None else stdParams.default_crc5
def encode(self):
return (self.code.code + self.dr.code + self.m.code +
encode_bool(self.trext) + self.sel.code + self.session.code +
self.target.code + encode_int(self.q, 4) +
encode_int(self.crc, 5))
def __str__(self):
return "{o.code}{{DR({o.dr}),{o.m},TRext({trext}),{o.sel}," \
"{o.session},{o.target},Q({o.q}),CRC(0x{o.crc:02X})}}" \
"".format(o=self, trext=(1 if self.trext else 0))
class QueryRep(Command):
def __init__(self, session=None):
super().__init__(CommandCode.QUERY_REP)
self.session = session if session is not None else stdParams.session
def encode(self):
return self.code.code + self.session.code
def __str__(self):
return "{o.code}{{{o.session}}}".format(o=self)
class Ack(Command):
def __init__(self, rn):
super().__init__(CommandCode.ACK)
self.rn = rn if rn is not None else stdParams.default_rn
def encode(self):
return self.code.code + encode_int(self.rn, 16)
def __str__(self):
return "{o.code}{{0x{o.rn:04X}}}".format(o=self)
class ReqRN(Command):
def __init__(self, rn=None, crc=None):
super().__init__(CommandCode.REQ_RN)
self.rn = rn if rn is not None else stdParams.default_rn
self.crc = crc if crc is not None else stdParams.default_crc16
def encode(self):
return self.code.code + encode_word(self.rn) + encode_word(self.crc)
def __str__(self):
return "{o.code}{{RN(0x{o.rn:04X}),CRC(0x{o.crc:04X})}}".format(o=self)
class Read(Command):
def __init__(self, bank=None, word_ptr=None, word_count=None,
rn=None, crc=None):
super().__init__(CommandCode.READ)
self.bank = (bank if bank is not None
else stdParams.read_default_bank)
self.word_ptr = (word_ptr if word_ptr is not None
else stdParams.read_default_word_ptr)
self.word_count = (word_count if word_count is not None
else stdParams.read_default_word_count)
self.rn = rn if rn is not None else stdParams.default_rn
self.crc = crc if crc is not None else stdParams.default_crc16
def encode(self):
return (self.code.code + self.bank.code + encode_ebv(self.word_ptr) +
encode_byte(self.word_count) + encode_word(self.rn) +
encode_word(self.crc))
def __str__(self):
return "{o.code}{{{o.bank},WordPtr(0x{o.word_ptr:02X})," \
"WordCount({o.word_count}),RN(0x{o.rn:04X})," \
"CRC(0x{o.crc:04X})}}".format(o=self)
#
#######################################################################
# Tag replies
#######################################################################
#
class ReplyType(Enum):
QUERY_REPLY = 0
ACK_REPLY = 1
REQRN_REPLY = 2
READ_REPLY = 3
class Reply:
def __init__(self, reply_type):
super().__init__()
self.__type = reply_type
@property
def bitlen(self):
raise NotImplementedError()
@property
def reply_type(self):
return self.__type
class QueryReply(Reply):
def __init__(self, rn=0x0000):
super().__init__(ReplyType.QUERY_REPLY)
self.rn = rn
@property
def bitlen(self):
return 16
def __str__(self):
return "Reply(0x{o.rn:04X})".format(o=self)
def to_bytes(value):
if isinstance(value, str):
return list(bytearray.fromhex(value))
elif isinstance(value, collections.Iterable):
value = list(value)
for b in value:
if not isinstance(b, int) or not (0 <= b < 256):
raise ValueError("each array element must represent a byte")
return value
else:
raise ValueError("value must be a hex string or bytes collections")
class AckReply(Reply):
def __init__(self, epc="", pc=0x0000, crc=0x0000):
super().__init__(ReplyType.ACK_REPLY)
self._data = to_bytes(epc)
self.pc = pc
self.crc = crc
@property
def bitlen(self):
return 32 + len(self._data) * 8
@property
def epc(self):
return self._data
def get_epc_string(self, byte_separator=""):
return byte_separator.join("{:02X}".format(b) for b in self._data)
def __str__(self):
return "Reply{{PC(0x{o.pc:04X}),EPC({epc})," \
"CRC(0x{o.crc:04X})}}".format(
o=self, epc=self.get_epc_string())
class ReqRnReply(Reply):
def __init__(self, rn=0x0000, crc=0x0000):
super().__init__(ReplyType.REQRN_REPLY)
self.rn = rn
self.crc = crc
@property
def bitlen(self):
return 32
def __str__(self):
return "Reply{{RN(0x{o.rn:04X}),CRC(0x{o.crc:04X})}}".format(o=self)
class ReadReply(Reply):
def __init__(self, data="", rn=0x0000, crc=0x0000, header=False):
super().__init__(ReplyType.READ_REPLY)
self.rn = rn
self.crc = crc
self.header = header
self._data = to_bytes(data)
@property
def memory(self):
return self._data
def get_memory_string(self, byte_separator=""):
return byte_separator.join("{:02X}".format(b) for b in self._data)
@property
def bitlen(self):
return 33 + len(self.memory) * 8
def __str__(self):
return "Reply{{Header({header}),Memory({data}),RN(0x{o.rn:04X})," \
"CRC(0x{o.crc:04X})}}".format(
header=int(self.header), data=self.get_memory_string(), o=self)
#
#######################################################################
# Preambles and frames
#######################################################################
#
class ReaderSync:
DELIM | |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, empress development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
def remove_empty_samples_and_features(table, sample_metadata, ordination=None):
"""Removes empty samples and features from the table and sample metadata.
This should be called *after* matching the table with the sample metadata
and other input artifacts: we assume that the columns of the table
DataFrame are equivalent to the indices of the sample metadata DataFrame.
Parameters
----------
table: biom.Table
Representation of a feature table.
sample_metadata: pd.DataFrame
Sample metadata. The index should describe sample IDs, and the columns
should describe sample metadata fields (e.g. "body site").
ordination: skbio.OrdinationResults, optional
Ordination information to show in Emperor alongside Empress. If this is
passed, this function will check to see if any of the empty samples
or features to be removed from the table are included in the
ordination; if so, this will raise an error (because these empty
items shouldn't be in the ordination in the first place).
Returns
-------
filtered_table: biom.Table
Copy of the input feature table with empty samples and features
removed.
filtered_sample_metadata: pd.DataFrame
Copy of the input sample metadata with empty samples removed.
Raises
------
ValueError
- If the input table is completely empty (i.e. all zeroes).
- If ordination is not None, and the ordination contains empty samples
or features.
References
----------
- Adapted from qurro._df_utils.remove_empty_samples_and_features().
"""
orig_tbl_samples = set(table.ids())
orig_tbl_features = set(table.ids(axis='observation'))
# this code is equivalent to the PR below, we should update once that gets
# merged and a newer BIOM release is publicly available
# https://github.com/biocore/biom-format/pull/847
filtered_table = table.copy()
for ax in {'observation', 'sample'}:
filtered_table = filtered_table.filter(
table.ids(axis=ax)[table.sum(axis=ax) > 0], axis=ax,
inplace=False)
if filtered_table.is_empty():
raise ValueError("All samples / features in matched table are empty.")
# Let user know about which samples/features may have been dropped, if any.
# Also, if we dropped any empty samples, update the sample metadata.
filtered_sample_metadata = sample_metadata
sample_diff = orig_tbl_samples - set(filtered_table.ids())
if sample_diff:
if ordination is not None:
empty_samples_in_ord = sample_diff & set(ordination.samples.index)
if empty_samples_in_ord:
raise ValueError(
(
"The ordination contains samples that are empty (i.e. "
"all 0s) in the table. Problematic sample IDs: {}"
).format(", ".join(sorted(empty_samples_in_ord)))
)
filtered_sample_metadata = filtered_sample_metadata.loc[
filtered_table.ids()
]
print("Removed {} empty sample(s).".format(len(sample_diff)))
feature_diff = orig_tbl_features - \
set(filtered_table.ids(axis='observation'))
if feature_diff:
if ordination is not None and ordination.features is not None:
empty_feats_in_ord = feature_diff & set(ordination.features.index)
if empty_feats_in_ord:
raise ValueError(
(
"The ordination contains features that are empty "
"(i.e. all 0s) in the table. Problematic feature IDs: "
"{}"
).format(", ".join(sorted(empty_feats_in_ord)))
)
print("Removed {} empty feature(s).".format(len(feature_diff)))
return filtered_table, filtered_sample_metadata
def compress_table(table):
"""Converts a feature table to a space-saving format.
Parameters
----------
table: biom.Table
Representation of a feature table. It is assumed that empty samples /
features have already been removed from the table.
Returns
-------
(s_ids, f_ids, s_ids_to_indices, f_ids_to_indices, compressed_table)
s_ids: list
List of the sample IDs in the table.
f_ids: list
List of the feature IDs in the table, analogous to s_ids.
s_ids_to_indices: dict
Inverse of s_ids: this maps sample IDs to their indices in s_ids.
"Indices" refers to a feature or sample's 0-based position in f_ids
or s_ids, respectively.
f_ids_to_indices: dict
Inverse of f_ids: this maps feature IDs to their indices in f_ids,
analogous to s_ids_to_indices.
compressed_table: list
Two-dimensional list. The "outer list" is of length len(s_ids).
Each position i within this outer list holds an "inner list" of
arbitrary (but within the range [1, len(f_ids)]) length.
The i-th inner list contains the feature indices of the
features present (i.e. at any abundance > 0) within the
sample with index i. Each inner list is sorted in ascending order.
References
----------
- Inspired by redbiom and Qurro's JSON data models.
"""
feature_ids = table.ids(axis='observation')
sample_ids = table.ids()
f_ids_to_indices = {fid: idx for idx, fid in enumerate(feature_ids)}
s_ids_to_indices = {sid: idx for idx, sid in enumerate(sample_ids)}
compressed_table = []
for vec in table.iter_data(axis='sample', dense=False):
compressed_table.append([int(i) for i in vec.indices])
return (
list(sample_ids), list(feature_ids), s_ids_to_indices,
f_ids_to_indices, compressed_table
)
def compress_sample_metadata(s_ids_to_indices, metadata):
"""Converts a sample metadata DataFrame to a space-saving format.
We could ostensibly save more space by identifying repeated metadata
values and mapping *those* to integer IDs. (For example, a lot of Qiita
studies' sample metadata files have lots of frequently repeated values like
"host_subject_id", the various empo_* fields, etc.) However, that may be
1) overkill and 2) not worth it until we get to really big datasets
(and/or datasets with lots of repeated values).
Parameters
----------
s_ids_to_indices: dict
Maps sample IDs (strings) to 0-based indices in an existing list of
sample IDs. In practice, this should just be the "s_ids_to_indices"
output from compress_table().
metadata: pd.DataFrame
Sample metadata. The index should describe sample IDs, and the columns
should describe sample metadata fields (e.g. "body site").
The sample IDs in the index should match one-to-one with the keys in
s_ids_to_indices.
Returns
-------
(metadata_columns, metadata_vals)
metadata_columns: list
List of the sample metadata column names, all converted to strings.
metadata_vals: list
Two-dimensional list. The "outer list" is of length
len(s_ids_to_indices.keys()). Each position i within this outer
list holds an "inner list" of length len(metadata_columns).
The c-th value of the i-th inner list contains the c-th
sample metadata column (in metadata_columns)'s value for the
sample with index i, converted to a string.
Raises
------
ValueError
- If the metadata's index and the keys of s_ids_to_indices do not
contain the exact same elements.
- If the values of s_ids_to_indices are invalid: that is, if sorting
the values in ascending order does not produce a list of
[0, 1, 2, 3, ..., len(s_ids_to_indices.keys())].
References
----------
- Inspired by redbiom and Qurro's JSON data models.
"""
sample_ids = s_ids_to_indices.keys()
# NOTE: I think that identically-named samples or metadata columns will
# break this check, but I also think that we can assume by this point that
# the data is at least that sane. (Checking that should be a responsibility
# for earlier in the program.)
if set(sample_ids) != set(metadata.index):
raise ValueError(
"The sample IDs in the metadata's index and s_ids_to_indices are "
"not identical."
)
if sorted(s_ids_to_indices.values()) != list(range(len(sample_ids))):
raise ValueError("Indices (values) of s_ids_to_indices are invalid.")
# Rename sample IDs to indices in the metadata
indexed_metadata = metadata.rename(index=s_ids_to_indices)
# Sort the metadata's rows by the sample indices
sorted_i_metadata = indexed_metadata.sort_index(
axis="index", ascending=True
)
# Convert all of the metadata values to strings
str_s_i_metadata = sorted_i_metadata.astype(str)
# Generate a 2-D list of metadata values
# Based on https://datatofish.com/convert-pandas-dataframe-to-list
sm_vals = str_s_i_metadata.values.tolist()
sm_cols = [str(c) for c in str_s_i_metadata.columns]
return sm_cols, sm_vals
def compress_feature_metadata(tip_metadata, int_metadata):
"""Converts tip/internal node metadata DataFrames to dicts to save space.
This is a pretty early optimization -- ideally we would use 2-D lists as
our final metadata structure, similar to the table / sample metadata
compression. This should be revisited when the tree data node-name
revamping has been merged in.
Parameters
----------
tip_metadata: pd.DataFrame or None
Metadata for tip nodes. If not None, the index should describe node
names, and the columns should describe feature metadata fields.
int_metadata: pd.DataFrame or None
Metadata for internal nodes. If not None, the index should describe
node names, and the columns should describe feature metadata fields.
Note that the columns of tip_metadata and int_metadata should be identical,
even if the feature metadata only describes tip or internal nodes. (In that
case, then the other feature metadata parameter should still be a DataFrame
-- albeit an empty one, with no feature names in its index.) The only case
in which the parameters should be None is if there was no feature metadata
at all.
Returns
-------
(metadata_columns, compressed_tip_metadata, compressed_int_metadata)
metadata_columns: list
List of the feature metadata column names, all converted to
strings. If both input DFs are None, this will be {}.
compressed_tip_metadata: dict
Maps node names in tip_metadata to | |
"""
Utility functions to support entity data testing
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
from utils.py3porting import urljoin
from django.conf import settings
from django.http import QueryDict
from django.utils.http import urlquote, urlunquote
from django.core.urlresolvers import resolve, reverse
from annalist.util import valid_id, extract_entity_id
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist import layout
from annalist import message
from annalist.views.uri_builder import uri_base, uri_params, uri_with_params
from annalist.views.fields.render_placement import (
get_placement_classes
)
from .tests import (
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .entity_testutils import (
collection_dir,
entitydata_list_url_query,
site_view_url,
collection_view_url,
collection_edit_url,
collection_entity_view_url,
site_title,
context_field_row
)
from .entity_testfielddesc import get_field_description, get_bound_field
# -----------------------------------------------------------------------------
#
# Helper functions
#
# -----------------------------------------------------------------------------
def value_or_default(value, default):
"""
Returns the supplied value of it is non None, otherwise the supplied default.
"""
return value if value is not None else default
# -----------------------------------------------------------------------------
#
# Directory generating functions
#
# -----------------------------------------------------------------------------
# Each entity type has its own data directory within a collection:
def recorddata_dir(coll_id="testcoll", type_id="testtype"):
return collection_dir(coll_id) + layout.COLL_TYPEDATA_PATH%{'id': type_id} + "/"
def entitydata_dir(coll_id="testcoll", type_id="testtype", entity_id="testentity"):
return recorddata_dir(coll_id, type_id) + layout.TYPEDATA_ENTITY_PATH%{'id': entity_id} + "/"
# -----------------------------------------------------------------------------
#
# URI generating functions
#
# -----------------------------------------------------------------------------
# These all use the Django `reverse` function so they correspond to
# the declared URI patterns.
def entity_uriref(coll_id="testcoll", type_id="testtype", entity_id="entity_id"):
"""
URI for entity data with trailing slash removed. Used as entity idemntifier in data.
"""
uri = entity_url(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
if uri.endswith("/"):
uri = uri[0:-1] # Strip traling "/"
return uri
def entity_url(coll_id="testcoll", type_id="testtype", entity_id="entity_id"):
"""
URI for entity data; also view using default entity view
"""
if not valid_id(entity_id):
entity_id = "___"
url = collection_entity_view_url(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
return url
def entity_resource_url(
coll_id="testcoll", type_id="testtype", entity_id="entity_id",
resource_ref=layout.ENTITY_DATA_FILE
):
"""
URI for entity resource data
"""
base = entity_url(coll_id, type_id, entity_id)
return urljoin(base, resource_ref)
def entitydata_edit_url(action=None, coll_id="testcoll", type_id=None, entity_id=None, view_id=None):
viewname = (
'AnnalistEntityNewView' if action == "new" else
'AnnalistEntityEditView' if action and view_id else
'AnnalistEntityDataView' if view_id else
'AnnalistEntityDefaultDataView'
)
kwargs = {'coll_id': coll_id}
if action:
kwargs.update({'action': action})
if view_id:
kwargs.update({'view_id': view_id})
if type_id:
kwargs.update({'type_id': type_id})
if entity_id:
kwargs.update({'entity_id': entity_id})
return reverse(viewname, kwargs=kwargs)
def entitydata_list_all_url(
coll_id="testcoll", list_id=None,
scope=None, continuation_url=None, query_params=None):
if list_id:
viewname = "AnnalistEntityGenericList"
kwargs = {'list_id': list_id, 'coll_id': coll_id}
else:
viewname = "AnnalistEntityDefaultListAll"
kwargs = {'coll_id': coll_id}
return entitydata_list_url_query(viewname, kwargs,
query_params,
{ 'scope': scope
, 'continuation_url': continuation_url
})
# if scope is not None:
# kwargs['scope'] = scope
# return reverse(viewname, kwargs=kwargs)
def entitydata_list_type_url(
coll_id="testcoll", type_id="testtype", list_id=None,
scope=None, continuation_url=None, query_params=None):
if list_id:
viewname = "AnnalistEntityGenericList"
kwargs = {'list_id': list_id, 'coll_id': coll_id, 'type_id': type_id}
else:
viewname = "AnnalistEntityDefaultListType"
kwargs = {'coll_id': coll_id, 'type_id': type_id}
return entitydata_list_url_query(viewname, kwargs,
query_params,
{ 'scope': scope
, 'continuation_url': continuation_url
})
# if scope is not None:
# kwargs['scope'] = scope
# return reverse(viewname, kwargs=kwargs)
def entitydata_delete_confirm_url(coll_id="testcoll", type_id="testtype"):
kwargs = {'coll_id': coll_id, 'type_id': type_id}
return reverse("AnnalistEntityDataDeleteView", kwargs=kwargs)
# -----------------------------------------------------------------------------
#
# ----- Entity data
#
# -----------------------------------------------------------------------------
# The following all return some arbitrary entity data, but corresponding to
# the same entity at different stages of the processing (initial values,
# stored values, Django view context data and Django form data).
def entitydata_type(type_id):
"""
Returns type URI/CURIE/ref for indicated type id.
"""
if type_id == "_type":
return "annal:Type"
elif type_id == "_list":
return "annal:List"
elif type_id == "_view":
return "annal:View"
elif type_id == "_field":
return "annal:Field"
else:
return "annal:EntityData"
def entitydata_value_keys(entity_uri=False):
"""
Keys in default view entity data
"""
keys = (
[ '@type'
, 'annal:id', 'annal:type_id'
, 'annal:type'
, 'annal:url'
, 'rdfs:label', 'rdfs:comment'
])
if entity_uri:
keys.add('annal:uri')
return keys
def entitydata_load_keys(entity_uri=False):
return (
recordview_value_keys(entity_uri=entity_uri) |
{"@id", '@type', '@context'}
)
def entitydata_create_values(
entity_id, update="Entity", coll_id="testcoll", type_id="testtype",
entity_uri=None, type_uri=None, supertype_uris=None,
hosturi=TestHostUri,
extra_fields=None):
"""
Data used when creating entity test data
"""
if type_uri is not None:
types = [type_uri, entitydata_type(type_id)]
if supertype_uris is not None:
types = types + supertype_uris
else:
types = [entity_url(coll_id, "_type", type_id), entitydata_type(type_id)]
# log.info('entitydata_create_values: types %r'%(types,))
d = (
{ '@type': types
, 'annal:type': types[0]
, 'rdfs:label': '%s testcoll/%s/%s'%(update, type_id, entity_id)
, 'rdfs:comment': '%s coll testcoll, type %s, entity %s'%(update, type_id, entity_id)
})
if entity_uri:
d['annal:uri'] = entity_uri
if extra_fields:
d.update(extra_fields)
return d
def entitydata_values_add_field(data, property_uri, dup_index, value):
"""
Add field to data; if duplicate then reformat appropriately.
Updates and returns supplied entity value dictionary.
"""
if property_uri in data:
suffix = "__%d"%dup_index
else:
suffix = ""
data[property_uri+suffix] = value
return data
def entitydata_values(
entity_id, update="Entity",
coll_id="testcoll", type_id="testtype",
entity_uri=None,
type_uri=None, supertype_uris=None,
hosturi=TestHostUri
):
# type_uri = entity_url(coll_id, "_type", type_id)
dataurl = entity_url(coll_id, type_id, entity_id)
d = entitydata_create_values(
entity_id, update=update, coll_id=coll_id, type_id=type_id,
entity_uri=entity_uri, type_uri=type_uri, supertype_uris=supertype_uris,
hosturi=hosturi
).copy() #@@ copy needed here?
d.update(
{ '@id': "%s/%s"%(type_id, entity_id)
, '@context': [{"@base": "../../"}, "../../coll_context.jsonld"]
, 'annal:id': entity_id
, 'annal:type_id': type_id
, 'annal:url': dataurl
})
# log.info("entitydata_values %r"%(d,))
return d
# -----------------------------------------------------------------------------
#
# ----- Entity in specified view context data
#
# -----------------------------------------------------------------------------
def specified_view_context_data(
coll_id="testcoll", type_id="testtype",
view_id="Default_view", view_heading="Default record view",
entity_id=None, orig_id=None,
type_ref=None, type_choices=None, type_ids=[],
entity_label=None, entity_descr=None,
record_type="annal:EntityData",
view_fields=None,
update="Entity",
action=None,
continuation_url=None
):
"""
Returns view context test data for entity presented using specified view
"""
if entity_id:
entity_label = value_or_default(entity_label,
'%s (%s/%s/%s)'%(update, coll_id, type_id, entity_id)
)
entity_descr = value_or_default(entity_descr,
'%s coll %s, type %s, entity %s'%(update, coll_id, type_id, entity_id)
)
else:
entity_label = value_or_default(entity_label,
'%s (%s/%s/)'%(update, coll_id, type_id)
)
entity_descr = value_or_default(entity_descr,
'%s coll %s, type %s, no entity id'%(update, coll_id, type_id)
)
continuation_url = value_or_default(continuation_url,
entitydata_list_type_url(coll_id, type_id)
)
view_title = (
"%s - %s - Collection %s"%(entity_label, view_heading, coll_id) if entity_label
else
"%s - Collection %s"%(view_heading, coll_id)
)
if view_fields is None:
view_fields = (
[ context_field_row(
get_bound_field("Entity_id", entity_id), # 0 (0,0)
get_bound_field("Entity_type", type_ref, options=type_choices) # 1 (0,1)
)
, context_field_row(
get_bound_field("Entity_label", entity_label) # 2 (1,0)
)
, context_field_row(
get_bound_field("Entity_comment", entity_descr) # 3 (2,0)
)
])
context_dict = (
{ 'title': view_title
, 'heading': view_heading
, 'coll_id': coll_id
, 'type_id': type_id
, 'view_id': view_id
, 'entity_id': entity_id
, 'orig_type': type_id
, 'record_type': record_type
, 'fields': view_fields
, 'continuation_url': continuation_url
})
if orig_id is not None:
context_dict['orig_id'] = orig_id
elif entity_id and action != "new":
context_dict['orig_id'] = entity_id
if action:
context_dict['action'] = action
return context_dict
# -----------------------------------------------------------------------------
#
# ----- Entity data in Default_view
#
# -----------------------------------------------------------------------------
def default_view_context_data(
entity_id=None, orig_id=None,
coll_id="testcoll", type_id="testtype",
type_ref=None, type_choices=None, type_ids=[],
entity_label=None, entity_descr=None,
view_label="Default record view",
record_type="annal:EntityData",
update="Entity",
action=None,
continuation_url=None
):
"""
Returns view context test data for entity presented using default view
"""
if entity_id:
type_ref = value_or_default(type_ref,
"_type/"+type_id if valid_id(type_id) else None
)
entity_label = value_or_default(entity_label,
'%s %s/%s/%s'%(update, coll_id, type_id, entity_id)
)
entity_descr = value_or_default(entity_descr,
'%s coll %s, type %s, entity %s'%(update, coll_id, type_id, entity_id)
)
else:
type_ref = type_ref or ""
entity_label = value_or_default(entity_label,
'%s data ... (%s/%s)'%(update, coll_id, type_id)
)
entity_descr = value_or_default(entity_descr,
'%s description ... (%s/%s)'%(update, coll_id, type_id)
)
continuation_url = value_or_default(continuation_url,
entitydata_list_type_url("testcoll", type_id)
)
view_title = (
"%s - %s - Collection %s"%(entity_label, view_label, coll_id) if entity_label
else
"%s - Collection %s"%(view_label, coll_id)
)
view_fields = (
[ context_field_row(
get_bound_field("Entity_id", entity_id), # 0 (0,0)
get_bound_field("Entity_type", type_ref, options=type_choices) # 1 (0,1)
)
, context_field_row(
get_bound_field("Entity_label", entity_label) # 2 (1,0)
)
, context_field_row(
get_bound_field("Entity_comment", entity_descr) # 3 (2,0)
)
])
context_dict = (
{ 'title': view_title
, 'heading': view_label
, 'coll_id': coll_id
, 'type_id': type_id
, 'orig_id': orig_id
, 'record_type': record_type
, 'fields': view_fields
, 'continuation_url': continuation_url
})
if orig_id:
context_dict['orig_id'] = orig_id
elif entity_id and action != "new":
context_dict['orig_id'] = entity_id
if action:
context_dict['action'] = action
return context_dict
def entitydata_context_add_field(
context_dict, field_id, dup_index, field_value,
field_name='Entity_comment',
field_label='Comment',
field_render_type='Markdown',
field_value_mode='Value_direct',
field_value_type='annal:Richtext',
field_placement='small:0,12',
field_options=[]
):
"""
Add field value to context; if duplicate then reformat appropriately.
Field details default to Entity_comment
Updates and returns supplied context dictionary.
"""
#@@TODO: use field context from entity_testfielddesc
field_ids = [ f['field_id'] for f in context_dict['fields'] ]
if field_id in field_ids:
suffix = "__%d"%dup_index
else:
suffix = ""
context_dict['fields'].append(
{ 'field_id': field_id
, 'field_value': field_value
, 'field_name': field_name+suffix
, 'field_label': field_label
, 'field_render_type': field_render_type
, 'field_value_mode': field_value_mode
, 'field_value_type': field_value_type
, 'field_placement': get_placement_classes(field_placement)
, 'options': field_options
})
return context_dict
def default_view_form_data(
coll_id="testcoll", orig_coll=None,
type_id="testtype", orig_type=None,
entity_id=None, orig_id=None,
action=None, cancel=None, close=None, | |
atom.residue
chain_id = residue.OBResidue.GetChain()
fea_dict['chain_id'] = chain_id
residue_name = residue.name
fea_dict['residue_name'] = residue_name
residue_num = residue.OBResidue.GetNum()
fea_dict['residue_num'] = residue_num
fea_dict['weight'] = 1.0
mba_coor += [fea_dict]
return mba_coor
def draw_ligand(self, m_ligand, PF_dict, output_name, size):
type_list = self.feature_type_list
patom_list = list()
for fea in type_list:
patoms = list()
fff = PF_dict[fea]
for ff in fff:
atom_idx_group = ff[0]
for idx in atom_idx_group:
patoms += [idx-1]
patom_list += [patoms]
m_ligand_noH = Chem.RemoveHs(m_ligand)
m_ligand_noH.RemoveConformer(0)
mol_list = [m_ligand_noH]*6
img = Draw.MolsToGridImage(mol_list, legends=type_list,
highlightAtomLists=patom_list,
subImgSize=size, molsPerRow=2)
img.save(output_name)
def box_protein(self, PF_coor_protein, cmin, cmax):
"""
select PF of PF_coor_protein in the box
input:
PF_coor_protein: dict
cmin : np.array
cmax : np.array
output:
PF_coor_box: dict
"""
type_list = self.feature_type_list + ['Metal']
PF_coor_box = dict()
for fea in type_list:
if fea not in PF_coor_protein:
continue
fea_dict_list = PF_coor_protein[fea]
PF_coor_box[fea] = list()
for fea_dict in fea_dict_list:
pseudo_atom_coor = fea_dict['pseudo_atom_coor']
if (pseudo_atom_coor > cmax).any():
continue
elif (pseudo_atom_coor < cmin).any():
continue
PF_coor_box[fea] += [fea_dict]
return PF_coor_box
def find_template(self, PF_coor_protein, template_ligand_file, include_hydrophobic=False):
template_dict = dict()
feature_type_list_receptor = self.feature_type_list + ['Metal']
for feature_type_receptor in feature_type_list_receptor:
if not include_hydrophobic and feature_type_receptor == 'Hydrophobic':
continue
template_dict[feature_type_receptor] = dict()
file_format = template_ligand_file.split('.')[-1]
ms = list(pybel.readfile(file_format, template_ligand_file))
m_ligand = ms[0]
ligand_bond_dict = self.get_bond_info(m_ligand)
PF_dict_ligand = self.find_PF(m_ligand, ligand_bond_dict)
num_model = len(ms)
if num_model > 1:
num_model = num_model - 1
PF_coor_ligand_dict = dict()
interaction_dict = dict()
model_idx = 0
m_ligand = ms[model_idx]
PF_coor_ligand = self.find_PF_coor(m_ligand, PF_dict_ligand, ligand_bond_dict)
mba_coor = self.find_mba(m_ligand, is_protein=False)
PF_coor_ligand['MBA'] = mba_coor
PF_coor_ligand_dict[model_idx] = PF_coor_ligand
interaction = self.find_interaction(PF_coor_protein, PF_coor_ligand,
ligand_bond_dict)
interaction_dict[model_idx] = interaction
total_rec_pf_dict = self.count_interaction_receptor(interaction_dict)
rec_pf_dict = total_rec_pf_dict[0]
for feature_type_receptor in rec_pf_dict.keys():
if not include_hydrophobic and feature_type_receptor == 'Hydrophobic':
continue
rec_pf = rec_pf_dict[feature_type_receptor]
atom_idx_group_list = rec_pf.keys()
for atom_idx_group in atom_idx_group_list:
if atom_idx_group not in template_dict[feature_type_receptor]:
template_dict[feature_type_receptor][atom_idx_group] = 0
template_dict[feature_type_receptor][atom_idx_group] += 1
return template_dict
def select_template_protein(self, PF_coor_protein, template_dict,
cut_num=0):
"""
select PF of PF_coor_protein in template
input:
PF_coor_protein: dict
template_dict: dict
output:
PF_coor_info: dict
"""
type_list = self.feature_type_list + ['Metal']
PF_coor_info = dict()
for fea in type_list:
if fea not in PF_coor_protein:
continue
if fea not in template_dict:
continue
fea_dict_list = PF_coor_protein[fea]
PF_coor_info[fea] = list()
for fea_dict in fea_dict_list:
atom_idx_group = fea_dict['atom_idx_group']
if atom_idx_group not in template_dict[fea]:
continue
num_in_template = template_dict[fea][atom_idx_group]
if num_in_template < cut_num:
continue
PF_coor_info[fea] += [fea_dict]
return PF_coor_info
def find_interaction(self, PF_coor_protein, PF_coor_ligand,
ligand_bond_dict):
"""
find pharmacophoric interaction
input:
PF_coor_protein: dict
PF_coor_ligand: dict
ligand_bond_dict: dict
output:
interaction: dict
"""
type_list = self.feature_type_list + ['MBA']
# type_list = self.feature_type_list
coor_ligand_list = list()
for fea_ligand in type_list:
if fea_ligand not in PF_coor_ligand:
continue
fea_dict_list_ligand = PF_coor_ligand[fea_ligand]
for fea_dict_ligand in fea_dict_list_ligand:
coor_ligand = fea_dict_ligand['pseudo_atom_coor']
coor_ligand_list.append(coor_ligand)
coor_ligand_list = np.array(coor_ligand_list)
cmin = coor_ligand_list.min(axis=0) - self.cutoff
cmax = coor_ligand_list.max(axis=0) + self.cutoff
PF_coor_protein_box = self.box_protein(PF_coor_protein, cmin, cmax)
interaction = dict()
for fea_ligand in type_list:
if fea_ligand not in interaction:
interaction[fea_ligand] = dict()
if fea_ligand not in PF_coor_ligand:
continue
fea_dict_list_ligand = PF_coor_ligand[fea_ligand]
cfea_list = self.complementary_feature[fea_ligand]
for fea_dict_ligand in fea_dict_list_ligand:
idx_ligand = fea_dict_ligand['atom_idx_group']
pattern_ligand = fea_dict_ligand['pattern_idx']
coor_ligand = fea_dict_ligand['pseudo_atom_coor']
for cfea_t in cfea_list:
fea_protein = cfea_t[0]
if fea_protein not in interaction[fea_ligand]:
interaction[fea_ligand][fea_protein] = list()
pair_cutoff = cfea_t[1]
if fea_protein not in PF_coor_protein_box:
continue
fea_dict_list_protein = PF_coor_protein_box[fea_protein]
for fea_dict_protein in fea_dict_list_protein:
idx_protein = fea_dict_protein['atom_idx_group']
pattern_protein = fea_dict_protein['pattern_idx']
coor_protein = fea_dict_protein['pseudo_atom_coor']
r_lp = coor_protein - coor_ligand
dist_lp = np.linalg.norm(r_lp)
# if dist_lp > self.cutoff:
if dist_lp > pair_cutoff:
continue
interact_ij = dict()
interact_ij['idx_ligand'] = idx_ligand
interact_ij['idx_protein'] = idx_protein
interact_ij['pattern_ligand'] = pattern_ligand
interact_ij['pattern_protein'] = pattern_protein
interact_ij['dist_lp'] = dist_lp
interact_ij['chain_id'] = fea_dict_protein['chain_id']
interact_ij['residue_name'] = fea_dict_protein['residue_name']
interact_ij['residue_num'] = fea_dict_protein['residue_num']
interact_ij['weight'] = fea_dict_protein['weight']
if fea_ligand == 'HBD' and fea_protein == 'HBA':
num_neighbor_atom = fea_dict_ligand['num_neighbor_atom']
donor_hydrogen_list = fea_dict_ligand['h_atoms']
if num_neighbor_atom <= 1:
theta = 0
interact_ij['theta'] = theta
else:
dist_ah = 100.0
for donor_hydrogen in donor_hydrogen_list:
hydrogen_coor0 = donor_hydrogen[1]
dist_ah0 = np.linalg.norm(
hydrogen_coor0 - coor_protein)
if dist_ah0 < dist_ah:
dist_ah = dist_ah0
hydrogen_coor = hydrogen_coor0
dist_dh = np.linalg.norm(
hydrogen_coor - coor_ligand)
theta = cal_angle_from_points(
dist_lp, dist_dh, dist_ah)
if theta < 0.6*np.pi:
continue
interact_ij['theta'] = theta
elif fea_ligand == 'HBA' and fea_protein == 'HBD':
num_neighbor_atom = fea_dict_protein['num_neighbor_atom']
donor_hydrogen_list = fea_dict_protein['h_atoms']
if num_neighbor_atom <= 1:
theta = 0
interact_ij['theta'] = theta
else:
dist_ah = 100.0
for donor_hydrogen in donor_hydrogen_list:
hydrogen_coor0 = donor_hydrogen[1]
dist_ah0 = np.linalg.norm(
hydrogen_coor0 - coor_ligand)
if dist_ah0 < dist_ah:
dist_ah = dist_ah0
hydrogen_coor = hydrogen_coor0
dist_dh = np.linalg.norm(
hydrogen_coor - coor_protein)
theta = cal_angle_from_points(
dist_lp, dist_dh, dist_ah)
if theta < 0.6*np.pi:
continue
interact_ij['theta'] = theta
elif fea_ligand == 'Aromatic' and fea_protein == 'Cation':
n_vector_ligand = fea_dict_ligand['v_n']
n_vector_lp = r_lp/dist_lp
theta = np.abs(cal_angle_from_vectors(
n_vector_ligand, n_vector_lp))
if theta > np.pi/2:
theta = np.pi-theta
if theta > 0.5*np.pi/2:
continue
interact_ij['theta'] = theta
elif fea_ligand == 'Cation' and fea_protein == 'Aromatic':
n_vector_protein = fea_dict_protein['v_n']
n_vector_lp = r_lp/dist_lp
theta = np.abs(cal_angle_from_vectors(
n_vector_protein, n_vector_lp))
if theta > np.pi/2:
theta = np.pi-theta
if theta > 0.5*np.pi/2:
continue
interact_ij['theta'] = theta
elif fea_ligand == 'Aromatic' and fea_protein == 'Aromatic':
n_vector_ligand = fea_dict_ligand['v_n']
n_vector_protein = fea_dict_protein['v_n']
n_vector_lp = r_lp/dist_lp
theta = cal_angle_from_vectors(
n_vector_ligand, n_vector_protein)
if theta > np.pi/2:
theta = np.pi-theta
interact_ij['theta'] = theta
alpha = np.abs(cal_angle_from_vectors(
n_vector_ligand, n_vector_lp))
# dist_v = dist_lp * np.cos(alpha)
# dist_h = dist_lp * np.sin(alpha)
# print(np.sqrt(dist_v*dist_v+dist_h*dist_h), dist_lp)
# interact_ij['dist_v'] = dist_v
# interact_ij['dist_h'] = dist_h
if theta > 0.4*np.pi/2 and theta < 0.8*np.pi/2:
continue
interact_ij['alpha'] = alpha
elif fea_ligand == 'MBA' and fea_protein == 'Metal':
if pattern_ligand not in self.metal_interaction_cutoff:
continue
if pattern_protein not in self.metal_interaction_cutoff[pattern_ligand]:
continue
metal_cutoff = self.metal_interaction_cutoff[
pattern_ligand][pattern_protein]
if dist_lp > metal_cutoff:
continue
interaction[fea_ligand][fea_protein] += [interact_ij]
if 'MBA' in interaction:
if 'Metal' in interaction['MBA']:
metal_interaction = interaction['MBA']['Metal']
d_list = list()
d_list = np.array([x['dist_lp'] for x in metal_interaction])
idx_sorted = np.argsort(d_list)
metal_interaction_new = list()
batom_idx = list()
for idx in idx_sorted:
interact_ij = metal_interaction[idx]
idx_ligand = interact_ij['idx_ligand'][0]
# dist_lp = interact_ij['dist_lp']
neighbor_atoms_idx = ligand_bond_dict[idx_ligand]
neighbor_check = False
for neighbor_atom_idx in neighbor_atoms_idx:
if neighbor_atom_idx in batom_idx:
neighbor_check = True
if not neighbor_check:
metal_interaction_new += [interact_ij]
batom_idx += [idx_ligand]
interaction['MBA']['Metal'] = metal_interaction_new
return interaction
def write_PF(self, PF_coor_dict, out_file, is_protein=False):
"""
write pharmacophoric feature
input:
PF_coor_dict: dict
out_file: filename, str
is_protein: bool
"""
if is_protein:
feature_type_list = self.feature_type_list + ['Metal']
else:
feature_type_list = self.feature_type_list + ['MBA']
fp = open(out_file, 'w')
line_out = 'feature_type:atom_idx_group:pattern_idx:pseudo_atom_coor'
line_out += ':atom_type:etc'
if is_protein:
line_out += ':chain_id:residue_name:residue_num:weight'
line_out += '\n'
fp.write(line_out)
model_idx_list = PF_coor_dict.keys()
num_model = len(model_idx_list)
for model_idx in model_idx_list:
PF_coor = PF_coor_dict[model_idx]
if num_model > 1:
line_out = 'MODEL %d\n' % (model_idx + 1)
fp.write(line_out)
for feature_type in feature_type_list:
if feature_type not in PF_coor:
continue
fea_dict_list = PF_coor[feature_type]
for fea_dict in fea_dict_list:
line_out = '%s' % feature_type
# line_out += ':%s' % (str(fea_dict['atom_idx_group']))
idx_line = ''
for atom_idx in fea_dict['atom_idx_group']:
idx_line += '%d,' % atom_idx
line_out += ':(%s)' % idx_line.strip(',')
line_out += ':%s' % (fea_dict['pattern_idx'])
coor = fea_dict['pseudo_atom_coor']
line_out += ':(%.3f,%.3f,%.3f)' % (
coor[0], coor[1], coor[2])
line_out += ':%s' % (fea_dict['atom_type'])
if 'h_atoms' in fea_dict:
hatoms = fea_dict['h_atoms']
hatom_line = ''
if 'num_neighbor_atom' in fea_dict:
num_neighbor_atom = fea_dict['num_neighbor_atom']
hatom_line += '%d;' %num_neighbor_atom
for hatom in hatoms:
line_f = '%d=(%.3f,%.3f,%.3f);'
hatom_line += line_f % (hatom[0], hatom[1][0],
hatom[1][1], hatom[1][2])
line_out += ':%s' % hatom_line.strip(';')
elif 'v_n' in fea_dict:
coor_v_n = fea_dict['v_n']
line_out += ':(%.3f,%.3f,%.3f)' % (
coor_v_n[0], coor_v_n[1], coor_v_n[2])
else:
line_out += ':'
if is_protein:
line_out += ':%s' % (fea_dict['chain_id'])
line_out += ':%s' % (fea_dict['residue_name'])
line_out += ':%d' % (fea_dict['residue_num'])
line_out += ':%.3f' % (fea_dict['weight'])
line_out += '\n'
fp.write(line_out)
fp.close()
def read_PF(self, feature_file, is_protein=False):
PF_coor_dict = dict()
fp = open(feature_file)
lines = fp.readlines()
fp.close()
model_idx = 0
PF_coor_dict[model_idx] = dict()
for line in lines:
if line.startswith('feature_type'):
# title = line.strip().split(':')
continue
if line.startswith('MODEL'):
model_idx = int(line.strip().split()[1]) - 1
fea_dict = dict()
if model_idx not in PF_coor_dict:
PF_coor_dict[model_idx] = dict()
continue
lis = line.strip().split(':')
if len(lis) < 1:
continue
fea_dict = dict()
feature_type = lis[0]
atom_idx_group = lis[1]
pattern_idx = lis[2]
pseudo_atom_coor = lis[3]
atom_type = lis[4]
etc = lis[5]
if feature_type not in PF_coor_dict[model_idx]:
PF_coor_dict[model_idx][feature_type] = list()
fea_dict['feature_type'] = feature_type
aline = atom_idx_group.lstrip('(').rstrip(')')
fea_dict['atom_idx_group'] = np.array(aline.split(','), dtype=int)
fea_dict['pattern_idx'] = int(pattern_idx)
coor = pseudo_atom_coor.lstrip('(').rstrip(')').split(',')
fea_dict['pseudo_atom_coor'] = np.array(coor, dtype=np.float32)
fea_dict['atom_type'] = | |
CAN_DECREASE_CPU_WHEN_RUNNING = 'CAN_DECREASE_CPU_WHEN_RUNNING'
CAN_SNAPSHOT_WHEN_RUNNING = 'CAN_SNAPSHOT_WHEN_RUNNING'
CAN_DETACH_ISO_WHEN_RUNNING = 'CAN_DETACH_ISO_WHEN_RUNNING'
CAN_ATTACH_NIC_WHEN_RUNNING = 'CAN_ATTACH_NIC_WHEN_RUNNING'
CAN_ATTACH_DISK_WHEN_RUNNING = 'CAN_ATTACH_DISK_WHEN_RUNNING'
CAN_RESIZE_DISK_WHEN_RUNNING = 'CAN_RESIZE_DISK_WHEN_RUNNING'
CAN_ATTACH_ISO_WHEN_RUNNING = 'CAN_ATTACH_ISO_WHEN_RUNNING'
CAN_INCREASE_RAM_WHEN_RUNNING = 'CAN_INCREASE_RAM_WHEN_RUNNING'
CAN_DECREASE_RAM_WHEN_RUNNING = 'CAN_DECREASE_RAM_WHEN_RUNNING'
CAN_INCREASE_CPU_WHEN_RUNNING = 'CAN_INCREASE_CPU_WHEN_RUNNING'
CAN_DETACH_DISK_WHEN_RUNNING = 'CAN_DETACH_DISK_WHEN_RUNNING'
CAN_REVERT_WHEN_RUNNING = 'CAN_REVERT_WHEN_RUNNING'
class Snapshotting(PrintableEnum):
"""FCO REST API Snapshotting enum.
The SystemCapability Snapshotting enum enumerates the different
snapshotting capabilities of a system or cluster.
DISK: Disk snapshotting is permitted
SERVER: Server snapshotting is permitted
"""
DISK = 'DISK'
SERVER = 'SERVER'
class TransactionType(PrintableEnum):
"""FCO REST API TransactionType enum.
The TransactionType enum enumerates the available types of
transaction.
CREDIT: A credit transaction
DEBIT: A debit transaction
"""
CREDIT = 'CREDIT'
DEBIT = 'DEBIT'
class EmulatedDevices(PrintableEnum):
"""FCO REST API EmulatedDevices enum.
The SystemCapability EmulatedDevices enum indicates if the
underlying cluster allows user to enable/disable emullated devives
on the server start up.
ALLOW_DISABLED: Cluster emulated devices are set to be disabled
ALLOW_ENABLED: Cluster emulated devices are set to be enabled
ALLOW_ANY: Cluser allow both emulated devices to unabled or
disabled
"""
ALLOW_DISABLED = 'ALLOW_DISABLED'
ALLOW_ENABLED = 'ALLOW_ENABLED'
ALLOW_ANY = 'ALLOW_ANY'
class Condition(PrintableEnum):
"""FCO REST API Condition enum.
The Condition enum specifies a condition used within a SearchFilter.
NOT_ENDS_WITH: True if FQL field concerned when parsed
as a string does not end with the value
supplied
IS_EQUAL_TO: True if FQL field concerned is equal to
one of the values supplied as an array
LATER_THAN: True if FQL field concerned is later
than the value supplied
CONTAINS: True if FQL field concerned when parsed
as a string contains the value supplied
IS_GREATER_THAN: True if FQL field concerned is greater
than the value supplied
IS_LESS_THAN_OR_EQUAL_TO: True if FQL field concerned is less
than or equal to than the value
supplied
NOT_BETWEEN: True if FQL field concerned does not
lie between the two values supplied
(i.e. is less than the first or is
greater than the second)
NOT_CONTAINS: True if FQL field concerned when parsed
as a string does contain the value
supplied
IS_LESS_THAN: True if FQL field concerned is less
than the value supplied
IS_NOT_EQUAL_TO: True if FQL field concerned is not
equal to any of the values supplied as
an array
BETWEEN: True if FQL field concerned lies
between the two values supplied (i.e.
is greater than or equal to the first
and is less than or equal to the
second)
IS_GREATER_THAN_OR_EQUAL_TO: True if FQL field concerned is greater
than or equal to than the value
supplied
NOT_STARTS_WITH: True if FQL field concerned when parsed
as a string does not start with the
value supplied
ENDS_WITH: True if FQL field concerned when parsed
as a string ends with the value
supplied
STARTS_WITH: True if FQL field concerned when parsed
as a string starts with the value
supplied
EARLIER_THAN: True if FQL field concerned is earlier
than the value supplied
"""
NOT_ENDS_WITH = 'NOT_ENDS_WITH'
IS_EQUAL_TO = 'IS_EQUAL_TO'
LATER_THAN = 'LATER_THAN'
CONTAINS = 'CONTAINS'
IS_GREATER_THAN = 'IS_GREATER_THAN'
IS_LESS_THAN_OR_EQUAL_TO = 'IS_LESS_THAN_OR_EQUAL_TO'
NOT_BETWEEN = 'NOT_BETWEEN'
NOT_CONTAINS = 'NOT_CONTAINS'
IS_LESS_THAN = 'IS_LESS_THAN'
IS_NOT_EQUAL_TO = 'IS_NOT_EQUAL_TO'
BETWEEN = 'BETWEEN'
IS_GREATER_THAN_OR_EQUAL_TO = 'IS_GREATER_THAN_OR_EQUAL_TO'
NOT_STARTS_WITH = 'NOT_STARTS_WITH'
ENDS_WITH = 'ENDS_WITH'
STARTS_WITH = 'STARTS_WITH'
EARLIER_THAN = 'EARLIER_THAN'
class InvocationLevel(PrintableEnum):
"""FCO REST API InvocationLevel enum.
The InvocationLevel enum value represents the type of authentication
that is required to invoke a FDL function.
CUSTOMER: Specifies that the function can only be invoked through
the User API
BE: Specifies that the function can only be invoked through
the Admin API
OPEN: Specifies that the function can only be invoked through
the Open API
MBE: Specifies that the function can only be invoked through
the Admin API by the MBE
"""
CUSTOMER = 'CUSTOMER'
BE = 'BE'
OPEN = 'OPEN'
MBE = 'MBE'
class ReportChartType(PrintableEnum):
"""FCO REST API ReportChartType enum.
An enum value representing the type of report chart.
COLUMN: A column chart
LINE: A line chart
BAR: A bar chart
PIE: A pie chart
AREA: An area chart
"""
COLUMN = 'COLUMN'
LINE = 'LINE'
BAR = 'BAR'
PIE = 'PIE'
AREA = 'AREA'
class Limits(PrintableEnum):
"""FCO REST API Limits enum.
The Limits enum enumerates the resource limits set on a customer.
REFUND: The boolean flag which controls refund
generation
MAX_IPv6_SUBNETS: The maximum allowed IPv6 subnets for the
customer
NO_3DS_28_DAY_SPEND_LIMIT: The maximum amount billed permissible in
a 28 day period without 3DS security
checks
MAX_SUBNETS: The maximum number of subnets permitted
CUTOFF_LIMIT: The currency value at which currency
customers are cut off
OVERALL_28_DAY_SPEND_LIMIT: The maximum amount billed permissible in
a 28 day period overall
MAX_IPv4_ADDRESSES: The maximum allowed IPv4 address for the
customer
MAX_DISKS: The maximum number of disks permitted
MAX_IMAGES: The maximum number of images permitted
MAX_VDCS: The maximum number of VDCs permitted
MAX_NETWORK_PRIVATE: The maximum allowed private networks for
the customer
CREDIT_LIMIT_DUE_DAYS: The credit invoice due days limit
CUTOFF_BALANCE: The unit balance level at which
customers should be cut off
MAX_SNAPSHOTS: The maximum number of snapshots
permitted
CREDIT_LIMIT: The maximum outstanding credit balance
permitted
MAX_SERVERS: The maximum number of servers permitted
MAX_CPUS: The maximum number of CPUs permitted
across all servers
MAX_BLOBS: The maximum number of blobs allowed
MAX_CUSTOMER_USERS: The maximum number of contacts/users
permitted
MAX_VLANS: The maximum number of VLANs permitted
per VDC
CUTOFF_DUE_DAYS: The cut-off invoice due days limit
MAX_BLOB_SIZE: The maximum size of blobs allowed
MAX_STORAGEGB: The maximum amount of storage (in GB)
permitted across all servers
MAX_RAM: The maximum amount of RAM permitted
across all servers
MAX_NETWORK_PUBLIC: The maximum allowed public networks for
the customer
"""
REFUND = 'REFUND'
MAX_IPv6_SUBNETS = 'MAX_IPv6_SUBNETS'
NO_3DS_28_DAY_SPEND_LIMIT = 'NO_3DS_28_DAY_SPEND_LIMIT'
MAX_SUBNETS = 'MAX_SUBNETS'
CUTOFF_LIMIT = 'CUTOFF_LIMIT'
OVERALL_28_DAY_SPEND_LIMIT = 'OVERALL_28_DAY_SPEND_LIMIT'
MAX_IPv4_ADDRESSES = 'MAX_IPv4_ADDRESSES'
MAX_DISKS = 'MAX_DISKS'
MAX_IMAGES = 'MAX_IMAGES'
MAX_VDCS = 'MAX_VDCS'
MAX_NETWORK_PRIVATE = 'MAX_NETWORK_PRIVATE'
CREDIT_LIMIT_DUE_DAYS = 'CREDIT_LIMIT_DUE_DAYS'
CUTOFF_BALANCE = 'CUTOFF_BALANCE'
MAX_SNAPSHOTS = 'MAX_SNAPSHOTS'
CREDIT_LIMIT = 'CREDIT_LIMIT'
MAX_SERVERS = 'MAX_SERVERS'
MAX_CPUS = 'MAX_CPUS'
MAX_BLOBS = 'MAX_BLOBS'
MAX_CUSTOMER_USERS = 'MAX_CUSTOMER_USERS'
MAX_VLANS = 'MAX_VLANS'
CUTOFF_DUE_DAYS = 'CUTOFF_DUE_DAYS'
MAX_BLOB_SIZE = 'MAX_BLOB_SIZE'
MAX_STORAGEGB = 'MAX_STORAGEGB'
MAX_RAM = 'MAX_RAM'
MAX_NETWORK_PUBLIC = 'MAX_NETWORK_PUBLIC'
class VirtualizationType(PrintableEnum):
"""FCO REST API VirtualizationType enum.
The VirtualizationType enum enumerates the different types of
virtualization (container or vm).
VIRTUAL_MACHINE: Virtual machine
CONTAINER: Linux container
"""
VIRTUAL_MACHINE = 'VIRTUAL_MACHINE'
CONTAINER = 'CONTAINER'
class EmailVAR(PrintableEnum):
"""FCO REST API EmailVAR enum.
The EmailVAR enumerates the configurable variables for emails.
CARD_NAME: The card name used within the email concerned
CUST_NAME: The customer name used within the email concerned
EMAIL_BODY: The general email body
FAILURE_REASON: The failure reason used within the email
concerned
CURRENCY: The currency used within the email concerned
COMPANY_NAME: The Company name for the email concerned
UNIT_BALANCE: The unit balance used within the email concerned
DATE: The date used within the email concerned
CARD_CHARGE: The credit card charge used within the email
concerned
INVITER: The inviting user used within the email concerned
UNIT_CHARGE: The number of units charged for used within the
email concerned
URL_PARAMS: The parameters to a URL used within the email
concerned
FROM_ADDRESS: The From: address for the email concerned
SUPPORT_URL: The support URL used within the email concerned
USER_LOGIN: The user login used within the email concerned
BCC_ADDRESS: The BCC: address for the email concerned
CONTROL_PANEL_URL: The Control Panel URL for the email concerned
INVOICE_NUMBER: The invoice number used within the email
concerned
EMAIL_FOOTER: The email footer for the email concerned
TOTAL: The total currency value used within the email
concerned
CARD_NUMBER: The card number used within the email concerned
ACTIVATION_KEY: The activation key sent within the email
concerned
REPLY_TO: The Reply-To: address for the email concerned
PASSWORD: The password sent within the email concerned
UNITS: The number of units used within the email
concerned
CUST_UUID: The customer uuid used within the email concerned
CC_ADDRESS: The CC: address for the email concerned
EMAIL_SUBJECT: The general email subject
"""
CARD_NAME = 'CARD_NAME'
CUST_NAME = 'CUST_NAME'
EMAIL_BODY = 'EMAIL_BODY'
FAILURE_REASON = 'FAILURE_REASON'
CURRENCY = 'CURRENCY'
COMPANY_NAME = 'COMPANY_NAME'
UNIT_BALANCE = 'UNIT_BALANCE'
DATE = 'DATE'
CARD_CHARGE = 'CARD_CHARGE'
INVITER = 'INVITER'
UNIT_CHARGE = 'UNIT_CHARGE'
URL_PARAMS = | |
: [u's'] ,
u'楊' : [u'y'] ,
u'㹐' : [u'c', u'z'] ,
u'擗' : [u'p'] ,
u'蝖' : [u'x'] ,
u'偤' : [u'y'] ,
u'糧' : [u'l'] ,
u'齦' : [u'y', u'q', u'k'] ,
u'俱' : [u'j'] ,
u'諳' : [u'a', u't'] ,
u'桴' : [u'f'] ,
u'蚀' : [u's'] ,
u'愅' : [u'g'] ,
u'㘋' : [u'x'] ,
u'厎' : [u'z'] ,
u'麐' : [u'l'] ,
u'礕' : [u'p'] ,
u'䠟' : [u's', u'd'] ,
u'殞' : [u'y'] ,
u'霡' : [u'm'] ,
u'怯' : [u'q'] ,
u'劸' : [u'w'] ,
u'醺' : [u'x'] ,
u'䭉' : [u's'] ,
u'櫈' : [u'd'] ,
u'陋' : [u'l'] ,
u'捙' : [u'y'] ,
u'嗢' : [u'w'] ,
u'郤' : [u'x'] ,
u'筩' : [u'y', u't', u'd'] ,
u'䩳' : [u's'] ,
u'淲' : [u'p'] ,
u'襵' : [u'z'] ,
u'踆' : [u'q', u'c'] ,
u'㒍' : [u'l'] ,
u'唌' : [u'y', u'x', u'd'] ,
u'熏' : [u'x'] ,
u'辛' : [u'x'] ,
u'㠪' : [u'j'] ,
u'䈮' : [u'j', u'w'] ,
u'溱' : [u'q', u'z'] ,
u'錸' : [u'l'] ,
u'㦿' : [u'h'] ,
u'娾' : [u'a'] ,
u'䏃' : [u'm'] ,
u'铍' : [u'p'] ,
u'㕌' : [u'a'] ,
u'牎' : [u'c'] ,
u'寓' : [u'y'] ,
u'聚' : [u'j'] ,
u'坠' : [u'z'] ,
u'珣' : [u'x'] ,
u'顪' : [u'h'] ,
u'臯' : [u'g'] ,
u'潰' : [u'x', u'k'] ,
u'䣵' : [u'e'] ,
u'駿' : [u'j'] ,
u'㩾' : [u'c'] ,
u'䒂' : [u'x', u'j'] ,
u'樉' : [u's'] ,
u'閌' : [u'k'] ,
u'岒' : [u'q'] ,
u'伛' : [u'y'] ,
u'㞠' : [u'l'] ,
u'璢' : [u'l'] ,
u'逥' : [u'h'] ,
u'末' : [u'm'] ,
u'芮' : [u'r'] ,
u'䦴' : [u'y'] ,
u'缻' : [u'f'] ,
u'骾' : [u'g'] ,
u'懄' : [u'q'] ,
u'赇' : [u'q'] ,
u'呍' : [u'h'] ,
u'㳒' : [u'b', u'f'] ,
u'秔' : [u'j', u'g'] ,
u'䛖' : [u'y', u'e'] ,
u'汝' : [u'r'] ,
u'韠' : [u'b'] ,
u'廦' : [u'b'] ,
u'䅯' : [u't'] ,
u'盶' : [u'r'] ,
u'鉹' : [u'c'] ,
u'奿' : [u'f'] ,
u'䐈' : [u'z'] ,
u'碋' : [u'h'] ,
u'蜊' : [u'l'] ,
u'誗' : [u'c'] ,
u'氘' : [u'd'] ,
u'徥' : [u'c'] ,
u'銧' : [u'g'] ,
u'琨' : [u'k'] ,
u'枵' : [u'x'] ,
u'蘴' : [u'f'] ,
u'见' : [u'x', u'j'] ,
u'歂' : [u'c'] ,
u'廏' : [u'j'] ,
u'金' : [u'j'] ,
u'獒' : [u'a'] ,
u'䉜' : [u'z'] ,
u'曟' : [u'c'] ,
u'蕞' : [u'z', u'j'] ,
u'㿥' : [u'h'] ,
u'裫' : [u'y'] ,
u'橬' : [u'c'] ,
u'巹' : [u'j'] ,
u'郻' : [u'q'] ,
u'牼' : [u'k'] ,
u'蒈' : [u'k'] ,
u'笍' : [u'z'] ,
u'䨗' : [u'f'] ,
u'榖' : [u'g'] ,
u'贙' : [u'x'] ,
u'刧' : [u'j'] ,
u'熦' : [u'j'] ,
u'锩' : [u'j'] ,
u'䂰' : [u'l'] ,
u'莲' : [u'l'] ,
u'稷' : [u'j', u'z'] ,
u'䥁' : [u'h'] ,
u'检' : [u'j'] ,
u'豃' : [u'h'] ,
u'兑' : [u'y', u'r', u'd'] ,
u'烐' : [u'z'] ,
u'俚' : [u'l'] ,
u'苜' : [u'm'] ,
u'祡' : [u'c'] ,
u'䡫' : [u'p'] ,
u'埪' : [u'k'] ,
u'譭' : [u'h'] ,
u'偻' : [u'l'] ,
u'翺' : [u'a'] ,
u'鍽' : [u'b'] ,
u'伄' : [u'd'] ,
u'掇' : [u'd'] ,
u'鰎' : [u'j'] ,
u'㚕' : [u'f'] ,
u'圔' : [u'e'] ,
u'䲙' : [u'j'] ,
u'鶣' : [u'p'] ,
u'缤' : [u'b'] ,
u'咩' : [u'm'] ,
u'脰' : [u'd'] ,
u'堶' : [u't'] ,
u'粹' : [u'c', u's'] ,
u'恆' : [u'h'] ,
u'始' : [u's'] ,
u'雕' : [u'd'] ,
u'㝔' : [u'y'] ,
u'䵘' : [u's'] ,
u'懛' : [u'd'] ,
u'驢' : [u'l'] ,
u'啨' : [u'q'] ,
u'䫭' : [u'h'] ,
u'鯷' : [u's', u't'] ,
u'㡶' : [u'j', u'z'] ,
u'絸' : [u'j'] ,
u'勽' : [u'b'] ,
u'栁' : [u'l'] ,
u'辄' : [u'z'] ,
u'嗀' : [u'h'] ,
u'㼏' : [u'n'] ,
u'瀑' : [u'p', u'b'] ,
u'鞔' : [u'm', u'w'] ,
u'溚' : [u'd'] ,
u'舝' : [u'x'] ,
u'崣' : [u'w'] ,
u'㖨' : [u'l'] ,
u'皪' : [u'l'] ,
u'䮬' : [u'm'] ,
u'攳' : [u'x'] ,
u'颶' : [u'j'] ,
u'久' : [u'j'] ,
u'篌' : [u'h'] ,
u'齏' : [u'j'] ,
u'噕' : [u'h'] ,
u'跘' : [u'p'] ,
u'䓞' : [u'l'] ,
u'㵣' : [u'k', u'l'] ,
u'繥' : [u'x'] ,
u'门' : [u'm'] ,
u'泮' : [u'p'] ,
u'聱' : [u'a', u'y'] ,
u'孷' : [u'l'] ,
u'瓾' : [u'm'] ,
u'䨀' : [u'd'] ,
u'溃' : [u'k'] ,
u'褂' : [u'g'] ,
u'刐' : [u'd'] ,
u'皓' : [u'h'] ,
u'鄒' : [u'z', u'j'] ,
u'䆝' : [u's'] ,
u'蒟' : [u'j'] ,
u'稠' : [u'c', u't', u'd'] ,
u'榭' : [u'x'] ,
u'蠬' : [u'l'] ,
u'崺' : [u'y'] ,
u'熽' : [u'x'] ,
u'逼' : [u'b'] ,
u'䃇' : [u'm'] ,
u'蟉' : [u'l'] ,
u'敊' : [u'c'] ,
u'棗' : [u'z'] ,
u'譖' : [u'z', u'j'] ,
u'層' : [u'c'] ,
u'烧' : [u's'] ,
u'鍦' : [u's'] ,
u'䏱' : [u'j'] ,
u'蛳' : [u's'] ,
u'摴' : [u's'] ,
u'㵺' : [u'p'] ,
u'誀' : [u'e'] ,
u'洅' : [u'z'] ,
u'㨋' : [u'c', u'z', u'l'] ,
u'徎' : [u'c'] ,
u'銐' : [u'c'] ,
u'甕' : [u'w'] ,
u'䐟' : [u'x'] ,
u'枞' : [u'c', u'z'] ,
u'鬡' : [u'n'] ,
u'氯' : [u'l'] ,
u'庸' : [u'y'] ,
u'鶺' : [u'j'] ,
u'琿' : [u'h'] ,
u'曈' : [u't'] ,
u'驋' : [u'b'] ,
u'㿎' : [u'f'] ,
u'潙' : [u'w', u'g'] ,
u'姢' : [u'j'] ,
u'鳤' : [u'g'] ,
u'睩' : [u'l'] ,
u'䙳' : [u'e'] ,
u'懲' : [u'c'] ,
u'蕵' : [u's'] ,
u'㻸' : [u'x', u'c', u'j'] ,
u'舆' : [u'y'] ,
u'㢍' : [u'y'] ,
u'夌' : [u'l'] ,
u'綏' : [u's', u'r', u't'] ,
u'莛' : [u't'] ,
u'愜' : [u'q'] ,
u'媡' : [u'l'] ,
u'丮' : [u'j'] ,
u'抱' : [u'b'] ,
u'鼸' : [u'x'] ,
u'㖿' : [u'x'] ,
u'嘾' : [u't'] ,
u'促' : [u'c'] ,
u'飍' : [u'x', u'b'] ,
u'㥌' : [u'j'] ,
u'繎' : [u'r'] ,
u'埓' : [u'l'] ,
u'豚' : [u't', u'd'] ,
u'孠' : [u's'] ,
u'翣' : [u's'] ,
u'鑪' : [u'l'] ,
u'路' : [u'l'] ,
u'捰' : [u'w'] ,
u'闿' : [u'k'] ,
u'㙾' : [u'x'] ,
u'昉' : [u'f'] ,
u'馌' : [u'y'] ,
u'傒' : [u'x'] ,
u'䌛' : [u'y'] ,
u'碢' : [u't'] ,
u'鰥' : [u'y', u'k', u'g'] ,
u'欫' : [u'c'] ,
u'䖴' : [u'y'] ,
u'㸹' : [u'l'] ,
u'猻' : [u's'] ,
u'难' : [u'n'] ,
u'淄' : [u'z'] ,
u'腇' : [u'n'] ,
u'塍' : [u'c'] ,
u'痔' : [u'z'] ,
u'䫖' : [u'c', u'h', u'n', u's', u't', u'y', u'z'] ,
u'恝' : [u'j'] ,
u'鯠' : [u'l'] ,
u'勦' : [u'c', u'j'] ,
u'䵯' : [u't'] ,
u'競' : [u'j'] ,
u'鹹' : [u'x'] ,
u'啿' : [u'd'] ,
u'䀈' : [u'q'] ,
u'粋' : [u'c'] ,
u'茊' : [u'c', u'z'] ,
u'㖑' : [u'x'] ,
u'躗' : [u'w'] ,
u'栘' : [u'y'] ,
u'宥' : [u'y'] ,
u'隧' : [u's', u'z'] ,
u'瀨' : [u'l'] ,
u'䜲' : [u'l'] ,
u'舴' : [u'z'] ,
u'㒻' : [u'm'] ,
u'跁' : [u'p'] ,
u'潂' : [u'h'] ,
u'嫏' : [u'l'] ,
u'闑' : [u'n'] ,
u'睒' : [u's'] ,
u'䙜' : [u'f'] ,
u'拟' : [u'n'] ,
u'腞' : [u'z', u'd', u't'] ,
u'㯥' : [u'c', u'z'] ,
u'賫' : [u'q', u'j'] ,
u'湬' : [u'j'] ,
u'姹' : [u'c'] ,
u'铻' : [u'y', u'w'] ,
u'發' : [u'b', u'f'] ,
u'䖆' : [u'n'] ,
u'肈' : [u'z'] ,
u'缍' : [u'd'] ,
u'丗' : [u's'] ,
u'涖' : [u'l'] ,
u'褙' : [u'b'] ,
u'嘧' : [u'm'] ,
u'疦' : [u'j'] ,
u'鄩' : [u'x'] ,
u'螲' : [u'z', u'd'] ,
u'縷' : [u'l'] ,
u'㜽' : [u'z'] ,
u'䵁' : [u'q'] ,
u'泀' : [u's'] ,
u'衃' : [u'p'] ,
u'啑' : [u's', u'd'] ,
u'瓐' : [u'l'] ,
u'道' : [u'd'] ,
u'䯚' : [u'y', u'o'] ,
u'蛜' : [u'y'] ,
u'絡' : [u'l'] ,
u'䱫' : [u'l'] ,
u'只' : [u'z'] ,
u'轭' : [u'e'] ,
u'呻' : [u's'] ,
u'篺' : [u'p'] ,
u'靽' : [u'b'] ,
u'䬄' : [u'y'] ,
u'枇' : [u'p', u'b'] ,
u'頎' : [u'q', u'k'] ,
u'匔' : [u'q'] ,
u'䢙' : [u'b'] ,
u'馣' : [u'a'] ,
u'㸢' : [u'b'] ,
u'笤' : [u's', u't'] ,
u'傩' : [u'n'] ,
u'蔰' : [u'h'] ,
u'尶' : [u'g'] ,
u'碹' : [u'x'] ,
u'諅' : [u'j'] ,
u'摆' : [u'b'] ,
u'巋' : [u'k'] ,
u'鋕' : [u'z'] ,
u'䥘' : [u'c'] ,
u'旛' : [u'f'] ,
u'鹢' : [u'y', u'n'] ,
u'全' : [u'q'] ,
u'仭' : [u'r'] ,
u'㱶' : [u'q', u'p', u'r', u'b', u'j'] ,
u'祸' : [u'h'] ,
u'国' : [u'g'] ,
u'氁' : [u'm'] ,
u'讄' : [u'l'] ,
u'䊊' : [u'm'] ,
u'㬏' : [u'l'] ,
u'鎔' : [u'r'] ,
u'檚' : [u'c'] ,
u'蘝' : [u'l'] ,
u'夣' : [u'm'] ,
u'犪' : [u'k'] ,
u'侬' : [u'n'] ,
u'愳' : [u'j'] ,
u'鲶' : [u'n'] ,
u'垼' : [u'y'] ,
u'䩅' : [u'z'] ,
u'翌' : [u'y'] ,
u'魏' : [u'w'] ,
u'剕' : [u'f'] ,
u'觘' : [u'c'] ,
u'䃞' : [u'y', u's'] ,
u'㥣' : [u'h'] ,
u'穥' : [u'y'] ,
u'釨' : [u'z'] ,
u'森' : [u's'] ,
u'葱' : [u'c'] ,
u'彷' : [u'p', u'f'] ,
u'㟼' : [u'a'] ,
u'烾' : [u'c'] ,
u'一' : [u'y'] ,
u'檃' : [u'y'] ,
u'贂' : [u'c'] ,
u'嘐' : [u'x', u'j'] ,
u'犓' : [u'c'] ,
u'锒' : [u'l'] ,
u'䖝' : [u'c'] ,
u'肟' : [u'w'] ,
u'縠' : [u'h'] ,
u'㜦' : [u'h', u's', u'x', u'n'] ,
u'涭' : [u's'] ,
u'谬' : [u'm'] ,
u'夺' : [u'd'] ,
u'疽' : [u'j'] ,
u'鐼' | |
<filename>testlib/test_data.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import six
import json
import shutil
import unittest
import gramex.data
import gramex.cache
import pandas as pd
import sqlalchemy as sa
from orderedattrdict import AttrDict
from nose.plugins.skip import SkipTest
from nose.tools import eq_, ok_, assert_raises
from pandas.util.testing import assert_frame_equal as afe
from pandas.util.testing import assert_series_equal as ase
import dbutils
from . import folder, sales_file
server = AttrDict(
mysql=os.environ.get('MYSQL_SERVER', 'localhost'),
postgres=os.environ.get('POSTGRES_SERVER', 'localhost'),
)
def eqframe(actual, expected, **kwargs):
'''Same as assert_frame_equal or afe, but does not compare index'''
expected.index = actual.index
afe(actual, expected, **kwargs)
class TestFilter(unittest.TestCase):
sales = gramex.cache.open(sales_file, 'xlsx')
db = set()
def test_get_engine(self):
check = gramex.data.get_engine
eq_(check(pd.DataFrame()), 'dataframe')
eq_(check('dir:///d:/temp/data.txt'), 'dir')
eq_(check('dir:////root/path'), 'dir')
eq_(check('file:///d:/temp/data.txt'), 'file')
eq_(check('file:////root/path'), 'file')
sqlalchemy_urls = [
'postgresql://scott:tiger@localhost:5432/mydatabase',
'mysql://scott:tiger@localhost/foo',
'oracle://scott:tiger@127.0.0.1:1521/sidname',
'sqlite:///foo.db',
]
for url in sqlalchemy_urls:
eq_(check(url), 'sqlalchemy')
eq_(check(folder), 'dir')
eq_(check(os.path.join(folder, 'test_data.py')), 'file')
eq_(check('/root/nonexistent/'), 'file')
eq_(check('/root/nonexistent.txt'), 'file')
def test_filter_col(self):
cols = ['sales', 'growth', 'special ~!@#$%^&*()_+[]\\{}|;\':",./<>?高']
for col in cols:
for op in [''] + gramex.data.operators:
eq_(gramex.data._filter_col(col + op, cols), (col, None, op))
for agg in ['SUM', 'min', 'Max', 'AVG', 'AnYthiNG']:
eq_(gramex.data._filter_col(col + '|' + agg + op, cols), (col, agg, op))
def test_dirstat(self):
for name in ('test_cache', 'test_config', '.'):
path = os.path.normpath(os.path.join(folder, name))
files = sum((dirs + files for root, dirs, files in os.walk(path)), [])
result = gramex.data.dirstat(path)
eq_(len(files), len(result))
eq_({'path', 'name', 'dir', 'type', 'size', 'mtime', 'level'}, set(result.columns))
ase(result['path'], path + result['dir'].str.replace('/', os.sep) + result['name'],
check_names=False)
def flatten_sort(self, expected, by, sum_na, *columns):
expected.columns = columns
expected.reset_index(inplace=True)
if sum_na:
for col in columns:
if col.lower().endswith('|sum'):
expected[col].replace({0.0: pd.np.nan}, inplace=True)
expected.sort_values(by, inplace=True)
def check_filter(self, df=None, na_position='last', sum_na=False, **kwargs):
'''
Tests a filter method. The filter method filters the sales dataset using
an "args" dict as argument. This is used to test filter with frame, file
and sqlalchemy URLs
- ``na_position`` indicates whether NA are moved to the end or not. Can
be 'first' or 'last'
- ``sum_na`` indicates whether SUM() over zero elements results in NA
(instead of 0)
'''
def eq(args, expected, **eqkwargs):
meta = {}
actual = gramex.data.filter(meta=meta, args=args, **kwargs)
eqframe(actual, expected, **eqkwargs)
return meta
sales = self.sales if df is None else df
meta = eq({}, sales)
eq_(meta['filters'], [])
eq_(meta['ignored'], [])
eq_(meta['sort'], [])
eq_(meta['offset'], 0)
eq_(meta['limit'], None)
m = eq({'देश': ['भारत']},
sales[sales['देश'] == 'भारत'])
eq_(m['filters'], [('देश', '', ('भारत',))])
m = eq({'city': ['Hyderabad', 'Coimbatore']},
sales[sales['city'].isin(['Hyderabad', 'Coimbatore'])])
eq_(m['filters'], [('city', '', ('Hyderabad', 'Coimbatore'))])
# ?col= is treated as non-null col values
m = eq({'sales': []}, sales[pd.notnull(sales['sales'])])
eq_(m['filters'], [('sales', '', ())])
m = eq({'sales': ['']}, sales[pd.notnull(sales['sales'])])
eq_(m['filters'], [('sales', '', ())])
# ?col!= is treated as null col values
# Don't check dtype. Database may return NULL as an object, not float
m = eq({'sales!': []}, sales[pd.isnull(sales['sales'])], check_dtype=False)
eq_(m['filters'], [('sales', '!', ())])
m = eq({'sales!': ['']}, sales[pd.isnull(sales['sales'])], check_dtype=False)
eq_(m['filters'], [('sales', '!', ())])
m = eq({'product!': ['Biscuit', 'Crème']},
sales[~sales['product'].isin(['Biscuit', 'Crème'])])
eq_(m['filters'], [('product', '!', ('Biscuit', 'Crème'))])
m = eq({'city>': ['Bangalore'], 'city<': ['Singapore']},
sales[(sales['city'] > 'Bangalore') & (sales['city'] < 'Singapore')])
eq_(set(m['filters']), {('city', '>', ('Bangalore',)), ('city', '<', ('Singapore',))})
# Ignore empty columns
m = eq({'city': ['Hyderabad', 'Coimbatore', ''], 'c1': [''], 'c2>': [''], 'city~': ['']},
sales[sales['city'].isin(['Hyderabad', 'Coimbatore'])])
m = eq({'city>~': ['Bangalore'], 'city<~': ['Singapore']},
sales[(sales['city'] >= 'Bangalore') & (sales['city'] <= 'Singapore')])
eq_(set(m['filters']), {('city', '>~', ('Bangalore',)), ('city', '<~', ('Singapore',))})
m = eq({'city~': ['ore']},
sales[sales['city'].str.contains('ore')])
eq_(m['filters'], [('city', '~', ('ore',))])
m = eq({'product': ['Biscuit'], 'city': ['Bangalore'], 'देश': ['भारत']},
sales[(sales['product'] == 'Biscuit') & (sales['city'] == 'Bangalore') &
(sales['देश'] == 'भारत')])
eq_(set(m['filters']), {('product', '', ('Biscuit',)), ('city', '', ('Bangalore',)),
('देश', '', ('भारत',))})
m = eq({'city!~': ['ore']},
sales[~sales['city'].str.contains('ore')])
eq_(m['filters'], [('city', '!~', ('ore',))])
m = eq({'sales>': ['100'], 'sales<': ['1000']},
sales[(sales['sales'] > 100) & (sales['sales'] < 1000)])
eq_(set(m['filters']), {('sales', '>', (100,)), ('sales', '<', (1000,))})
m = eq({'growth<': [0.5]},
sales[sales['growth'] < 0.5])
m = eq({'sales>': ['100'], 'sales<': ['1000'], 'growth<': ['0.5']},
sales[(sales['sales'] > 100) & (sales['sales'] < 1000) & (sales['growth'] < 0.5)])
m = eq({'देश': ['भारत'], '_sort': ['sales']},
sales[sales['देश'] == 'भारत'].sort_values('sales', na_position=na_position))
eq_(m['sort'], [('sales', True)])
m = eq({'product<~': ['Biscuit'], '_sort': ['-देश', '-growth']},
sales[sales['product'] == 'Biscuit'].sort_values(
['देश', 'growth'], ascending=[False, False], na_position=na_position))
eq_(m['filters'], [('product', '<~', ('Biscuit',))])
eq_(m['sort'], [('देश', False), ('growth', False)])
m = eq({'देश': ['भारत'], '_offset': ['4'], '_limit': ['8']},
sales[sales['देश'] == 'भारत'].iloc[4:12])
eq_(m['filters'], [('देश', '', ('भारत',))])
eq_(m['offset'], 4)
eq_(m['limit'], 8)
cols = ['product', 'city', 'sales']
m = eq({'देश': ['भारत'], '_c': cols},
sales[sales['देश'] == 'भारत'][cols])
eq_(m['filters'], [('देश', '', ('भारत',))])
ignore_cols = ['product', 'city']
m = eq({'देश': ['भारत'], '_c': ['-' + c for c in ignore_cols]},
sales[sales['देश'] == 'भारत'][[c for c in sales.columns if c not in ignore_cols]])
eq_(m['filters'], [('देश', '', ('भारत',))])
# Non-existent column does not raise an error for any operation
for op in ['', '~', '!', '>', '<', '<~', '>', '>~']:
m = eq({'nonexistent' + op: ['']}, sales)
eq_(m['ignored'], [('nonexistent' + op, [''])])
# Non-existent sorts do not raise an error
m = eq({'_sort': ['nonexistent', 'sales']},
sales.sort_values('sales', na_position=na_position))
eq_(m['ignored'], [('_sort', ['nonexistent'])])
eq_(m['sort'], [('sales', True)])
# Non-existent _c does not raise an error
m = eq({'_c': ['nonexistent', 'sales']}, sales[['sales']])
eq_(m['ignored'], [('_c', ['nonexistent'])])
for by in [['देश'], ['देश', 'city', 'product']]:
# _by= groups by column(s) and sums all numeric columns
# and ignores non-existing columns
expected = sales.groupby(by).agg(AttrDict([
['sales', 'sum'],
['growth', 'sum'],
]))
self.flatten_sort(expected, by, sum_na, 'sales|sum', 'growth|sum')
m = eq({'_by': by + ['na'], '_sort': by}, expected)
eq_(m['by'], by)
eq_(m['ignored'], [('_by', 'na')])
# _by allows custom aggregation
aggs = [
'city|count',
'product|MAX', 'product|MIN',
'sales|count', 'sales|SUM',
'growth|sum', 'growth|AvG',
]
agg_pd = AttrDict([
['city', 'count'],
['product', ['max', 'min']],
['sales', ['count', 'sum']],
['growth', ['sum', 'mean']],
])
expected = sales.groupby(by).agg(agg_pd)
self.flatten_sort(expected, by, sum_na, *aggs)
eq({'_by': by, '_sort': by, '_c': aggs}, expected)
# _by with HAVING as well as WHERE filters
filters = [
(None, (None, None)),
('city == "Singapore"', ('city', 'Singapore')),
('sales > 100', ('sales>', '100')),
]
for query, (key, val) in filters:
# Filter by city. Then group by product and aggregate by sales & growth
filtered = sales if query is None else sales.query(query)
expected = filtered.groupby(by).agg(agg_pd)
self.flatten_sort(expected, by, sum_na, *aggs)
# Make sure there's enough data. Sometimes, I goof up above above
# and pick a scenario that return no data in the first place.
ok_(len(expected) > 0)
for having in aggs:
# Apply HAVING at the mid-point of aggregated data.
# Cannot use .median() since data may not be numeric.
midpoint = expected[having].sort_values().iloc[len(expected) // 2]
# Floating point bugs surface unless we round it off
if isinstance(midpoint, float):
midpoint = round(midpoint, 2)
subset = expected[expected[having] > midpoint]
args = {'_by': by, '_sort': by, '_c': aggs,
having + '>': [six.text_type(midpoint)]}
if query is not None:
args[key] = [val]
# When subset is empty, the SQL returned types may not match.
# Don't check_dtype in that case.
eq(args, subset, check_dtype=len(subset) > 0)
# Invalid values raise errors
with assert_raises(ValueError):
eq({'_limit': ['abc']}, sales)
with assert_raises(ValueError):
eq({'_offset': ['abc']}, sales)
def test_frame(self):
self.check_filter(url=self.sales)
def test_file(self):
self.check_filter(url=sales_file)
afe(
gramex.data.filter(url=sales_file, transform='2.1', sheet_name='dummy'),
gramex.cache.open(sales_file, 'excel', transform='2.2', sheet_name='dummy'),
)
self.check_filter(
url=sales_file,
transform=lambda d: d[d['sales'] > 100],
df=self.sales[self.sales['sales'] > 100],
)
with assert_raises(ValueError):
gramex.data.filter(url='', engine='nonexistent')
with assert_raises(OSError):
gramex.data.filter(url='nonexistent')
with assert_raises(TypeError):
gramex.data.filter(url=os.path.join(folder, 'test_cache_module.py'))
def check_filter_db(self, dbname, url, na_position, sum_na=True):
self.db.add(dbname)
df = self.sales[self.sales['sales'] > 100]
kwargs = {'na_position': na_position, 'sum_na': sum_na}
self.check_filter(url=url, table='sales', **kwargs)
self.check_filter(url=url, table='sales',
transform=lambda d: d[d['sales'] > 100], df=df, **kwargs)
self.check_filter(url=url, table='sales',
query='SELECT * FROM sales WHERE sales > 100', df=df, **kwargs)
self.check_filter(url=url, table='sales',
query='SELECT * FROM sales WHERE sales > 999999',
queryfile=os.path.join(folder, 'sales-query.sql'), df=df, **kwargs)
self.check_filter(url=url, table=['sales', 'sales'],
query='SELECT * FROM sales WHERE sales > 100',
transform=lambda d: d[d['growth'] < 0.5],
df=df[df['growth'] < 0.5], **kwargs)
self.check_filter(url=url,
query='SELECT * FROM sales WHERE sales > 100',
transform=lambda d: d[d['growth'] < 0.5],
df=df[df['growth'] < 0.5], **kwargs)
self.check_filter(url=url, table='sales',
query='SELECT * FROM sales WHERE sales > 100',
transform=lambda d: d[d['growth'] < 0.5],
df=df[df['growth'] < 0.5], **kwargs)
# Check both parameter substitutions -- {} formatting and : substitution
afe(gramex.data.filter(url=url, table='{x}', args={'x': ['sales']}), self.sales)
actual = gramex.data.filter(
url=url, table='{兴}', args={
'兴': ['sales'],
'col': ['growth'],
'val': [0],
'city': ['South Plainfield'],
},
query='SELECT * FROM {兴} WHERE {col} > :val AND city = :city',
)
expected = self.sales[(self.sales['growth'] > 0) &
(self.sales['city'] == 'South Plainfield')]
eqframe(actual, expected)
# _by= _sort= _c=agg(s)
by = ['product']
aggs = ['growth|sum', 'sales|sum']
| |
καλαβρέζικα
καλαθάκι καλαθάρα καλαθάς καλαθιά καλαθοπλεκτική καλαθοπλεχτική καλαθοποιία
καλαθοσφαίριση καλαθοσφαιρίστρια καλαθοσφαιριστής καλαθούνα καλαθόσφαιρα
καλαμάκι καλαμάρι καλαμάτης καλαμήθρα καλαμίδι καλαμίθρα καλαμίνη καλαμαράκι
καλαμαριά καλαματιανός καλαματιανός καλαμιά καλαμιώνας καλαμοκάνα καλαμοκάνης
καλαμοσάκχαρο καλαμοσάκχαρον καλαμοσίταρο καλαμπακιώτης καλαμπακιώτισσα
καλαμποκάλευρο καλαμποκιά καλαμποκόσουπα καλαμπουρτζής καλαμπούρι καλαμπόκι
καλαμόσπιτο καλαμόσχοινο καλαντάρι καλαντίστρια καλαντιστής καλαποδάς καλαπόδι
καλαφάτισμα καλαφατιστήρι καλαϊτζής καλβινίστρια καλβινισμός καλβινιστής
καλειδοσκόπιο καλειδοσκόπιον καλεμκερί καλενδάριον καλεντάρι καλεσμένος
καλημέντο καλημέρα καλημέρισμα καλημαύκι καλημαύχι καληνυχτάκιας καληνύχτα
καληνώρισμα καλησπέρα καλησπέρισμα καλιά καλιακούδα καλιαρντά καλιαρντή
καλικάντζαρος καλικαντζάρι καλικαντζαράκι καλικαντζαρίνα καλικαντζαρούδι
καλιμέντο καλιομαγνήσιο καλιτσούνι καλιφόρνιο καλκάνι καλλιέπεια καλλιέργεια
καλλιγράφος καλλιγραφία καλλιεργήτρια καλλιεργητής καλλιθεάτης καλλιλογία
καλλιστεία καλλιτέχνημα καλλιτέχνης καλλιτέχνιδα καλλιτέχνις καλλιτεχνία
καλλιφωνία καλλονή καλλουργιά καλλυντικό καλλυντικός καλλωπίστρια καλλωπισμός
καλλωπιστής καλλώπισμα καλμάρισμα καλντέρα καλντερίμι καλντεριμιτζού καλοήθεια
καλοβατικά καλοβολιά καλοβουλία καλογεράκι καλογερική καλογεροπαίδι
καλογιάννος καλογιαννοπούλα καλογνωμιά καλογρίτσα καλογραία καλογριά
καλοζωία καλοζωιστής καλοθανατιά καλοθελήτρα καλοθελητής καλοκάρδισμα
καλοκαίριασμα καλοκαγαθία καλοκαιράκι καλοκαιρία καλοκαιριά καλοκαρδισμός
καλολογία καλομεταχείριση καλομεταχείρισμα καλομοιριά καλονάρχημα καλονάρχος
καλοπέρασμα καλοπαντρειά καλοπερασάκιας καλοπισμός καλοπιστία καλοπληρωτής
καλορί καλορίζικα καλορίμετρο καλοριζικιά καλοριφέρ καλοσύνεμα καλοσύνη
καλοτυχιά καλοτύχισμα καλουπατζής καλουπιτζής καλοφάνερος καλοφαγάς καλοφαγία
καλοχειμωνιά καλοχρονιά καλοχρόνισμα καλοψυχία καλοψυχιά καλούδι καλούμα
καλούπι καλούπωμα καλπάκι καλπάκι καλπασμός καλπιά καλπονοθεία καλπονόθευση
καλπουζάνης καλπουζάνος καλπουζανιά καλσόν καλτσάκι καλτσοβελόνα καλτσοδέτα
καλτσόν καλυβάκι καλυκοποιείο καλυμμαύκι καλυμμαύχι καλυτέρευση καλυτέρευσις
καλφαλίκι καλφόπουλο καλωδίωση καλωδιάκι καλωσόρισμα καλωσύνη καλό καλόγερος
καλόγρια καλόν καλόπιασμα καλός καλότα καλύβα καλύβη καλύβι καλύκι καλύμνια
καλύπτρα καλώδιο καμάκι καμάκωμα καμάρα καμάρι καμέλια καμέραμαν καμήλα
καμίνευση καμίνευσις καμίνι καμαράκι καμαρίλα καμαρίλλα καμαρίνι καμαρίτσα
καμαριέρα καμαριέρης καμαροφρυδούσα καμαροφρύδα καμαροφρύδης καμαρούλα
καμαρότος καμασούτρα καματάρης καματάρισσα καματερό καμβάς καμελιέλαιο
καμηλαύκι καμηλαύχι καμηλιέρης καμηλιέρισσα καμηλοπάρδαλη καμηλωτή καμηλό
καμηλόμαλλο καμηλόσουπα καμηλότριχα καμιζόλα καμικάζι καμιλαύκι καμιλαύχι
καμινάρης καμινάς καμινέτο καμινεία καμινετάκι καμινευτήρας καμινευτήριο
καμινεύτρια καμινοβίγλι καμιτσίκι καμιόνι καμουτσί καμουτσίκι καμουτσικιά
καμουφλάρισμα καμπάγι καμπάνα καμπάνια καμπάνισμα καμπέρω καμπή καμπίλε
καμπαέτι καμπανάκι καμπανάρης καμπανίτης καμπαναριό καμπανιά καμπανοπιπεριά
καμπαρέ καμπαρετζού καμπαρντίνα καμπιαδόρος καμπινέ καμπινές καμποτάζ
καμποτζιανά καμποτινισμός καμπούκι καμπούλι καμπούνι καμπούρα καμπούρης
καμπτήρας καμπυλότης καμπυλότητα καμπύλη καμπύλωση καμτσίκι καμτσικιά καμφορά
καμφορόδεντρο καμωματού καμόρα κανάγιας κανάκεμα κανάκι κανάκια κανάλι κανάρα
κανάστα κανάτα κανάτας κανάτι κανέλα κανί κανίβαλος κανίς κανίσκι καναβάτσο
καναδέζα καναδέζος καναδή καναδός κανακάρης κανακάρισσα καναλάκι καναλάρα
καναλισμός καναντέρ καναπές καναπεδάκι καναρίνι καναρινάκι καναρινί κανατάκι
κανατίτσα κανατούλα κανδήλα κανδήλι κανδαυλισμός κανελί κανελόνι κανθαρίδα
κανθαριδίνη κανθός κανιβαλισμός κανκάν καννάβι κανναβάτσα κανναβάτσο
κανναβίς κανναβούρι κανναβόσκοινο κανναβόσπορος κανναβόσχοινο κανναβόχαρτο
κανοκιάλι κανονάκι κανονάρχημα κανονάρχης κανονάρχος κανονίδι κανονιά
κανονικοποίηση κανονικότης κανονικότητα κανονιοβολισμός κανονιοθυρίδα
κανονιοστοιχία κανονιοφόρος κανονισμός κανονιστής κανονιστική καντάδα καντάρι
καντέμης καντήλα καντήλι καντίνα καντίνι κανταΐφι κανταδίτσα κανταδόρικος
κανταρτζής κανταφισμός κανταφιστής καντεμιά καντζελλαρία καντηλέρι καντηλήθρα
καντηλανάφτης καντηλανάφτισσα καντηλιέρι καντιανισμός καντιλέτο καντούνι
καντρίλια καντρόνι καντσονέτα καντόν καντόνι κανό κανόνας κανόνι κανόνισμα
καολίνης καουμπόης καουμπόι καουτσουκόδεντρο καουτσούκ καούνι καούρα καπάκι
καπάρο καπάρος καπάρωμα καπάτσα καπάτσος καπέλλο καπέλο καπέλωμα καπήλευση
καπίστρι καπίστρωμα καπαμάς καπαμπάγκαν καπανταής καπαντατζού καπαρόκουμπο
καπατσοσύνη καπελάδικο καπελάκι καπελάρισμα καπελάς καπελίνα καπελίνο
καπελειό καπελιέρα καπελού καπετάν καπετάνιος καπετάνισσα καπεταν-ψωμάς
καπετανλίκι καπηλεία καπηλειό καπηλευτής καπινός καπιστράνα καπιτάλα καπιτάλας
καπιταλίστας καπιταλίστης καπιταλίστρια καπιταλισμός καπιταλιστής
καπιτονέ καπλάνι καπλάντισμα καπλαμάς καπλαματζής καπλαντοβελόνα καπλοσυκιά
καπνάς καπνέλαιο καπνέμπορας καπνέμπορος καπνίλα καπνίστρια καπναγωγός
καπνεμπορείο καπνεμπορικός καπνεμπόριο καπνεμπόρισσα καπνεργάτης καπνεργάτισσα
καπνεργατικά καπνεργοστάσιο καπνιά καπνικόν καπνιστήρι καπνιστήριο καπνιστής
καπνοβιομηχανία καπνοδοχοκαθαριστής καπνοδόχος καπνοθάλαμος καπνοθήκη
καπνοκαλλιεργητής καπνοκοπτήριο καπνομάγαζο καπνομίχλη καπνομαντεία
καπνοπωλείο καπνοπώλης καπνοπώλις καπνοπώλισσα καπνοσακούλα καπνοσυλλέκτης
καπνοσωλήνας καπνοσύριγγα καπνοσύριγξ καπνοτόπι καπνοφυτεία καπνούρα καπνός
καπνόφυλλο καποτάστο καπουδάν καπουτσίνο καπουτσίνος καπούλι καππαριά
καπρίτσο καπό καπόνι καπότα καράβι καράβλακας καράγιαλης καράμπα καράολος
καράς καράτε καράτι καράφα καράφλα καράφλας καρέ καρέγλα καρέκλα καρένα καρέτα
καρίνα καραβάκι καραβάν-σεράι καραβάνα καραβάνι καραβάρα καραβέλα καραβίδα
καραβιά καραβοκύρης καραβοκύρισσα καραβολίδα καραβομαραγκός καραβοστάσι
καραβοφάναρο καραβόπανο καραβόσκαρο καραβόσκοινο καραβόσκυλο καραβόσκυλος
καραγάτσι καραγκιοζιλίκι καραγκιοζλίκι καραγκιοζοπαίχτης καραγκιόζης
καραγκούνα καραγκούνης καραγκούνισσα καραγωγέας καρακάξα καρακαηδόνα
καρακόλι καραμέλα καραμέλωση καραμελόχρωμα καραμούζα καραμπίνα καραμπινιέρος
καραμπογιά καραμπουζουκλής καραμπόλα καραντί καραντίνα καραντουζένι καραούλι
καραπουτανάρα καραπουτσακλάρα καραπούτανος καρασεβντάς καρατέκα καρατερίστα
καρατζόβας καρατσάι μπαλκάρ καρατόμηση καραφάκι καρβέλι καρβελάκι καρβελούτσα
καρβουνάκι καρβουνέμπορος καρβουναποθήκη καρβουναριό καρβουνιάρης
καρβουνιέρα καρβουνόσκονη καρβούνιασμα καρβύνιο καργιόλα καρδάμωμα καρδάρα
καρδίτιδα καρδίτις καρδερίνα καρδιά καρδιαγγειογραφία καρδιακός καρδιαλγία
καρδιοαγγειογραφία καρδιογνώστης καρδιογνώστρα καρδιογνώστρια καρδιογράφημα
καρδιογραφία καρδιοδυναμική καρδιοκατακτητής καρδιοκλέφτης καρδιοκλέφτρα
καρδιολόγος καρδιομεγαλία καρδιοπάθεια καρδιοπαθής καρδιοσωμός καρδιοσωσμός
καρδιοτοκογράφος καρδιοτομία καρδιοφυσιολογία καρδιοχειρουργική
καρδιοχτύπι καρδιτσαίος καρδιτσιώτης καρδούλα καρεδάκι καρεκλάδικο καρεκλάκι
καρεκλί καρεκλίτσα καρεκλοκένταυρος καρεκλοκενταυρισμός καρεκλολαγνεία
καρενάγιο καρηβαρία καρθαμέλαιο καριέρα καριερίστας καριερισμός
καρικατούρα καριμπού καριοφίλι καριόλα καριόλης καριόφιλο καρκάδι καρκίνος
καρκίνωση καρκίνωσις καρκινοβασία καρκινογένεση καρκινολογία καρκινολόγος
καρκινοποίησις καρκινοφιλία καρκινοφοβία καρκινόλυση καρκόλα καρλίνο καρμίνι
καρμίρης καρμανιόλα καρμιριά καρμπαπενέμες καρμπιλατέρ καρμπιρατέρ
καρμπονάρα καρμπονάρος καρμπυρατέρ καρμπόν καρνάβαλος καρνάγιο καρνέ καρναβάλι
καρναβαλικά καρναβαλιστής καρναγιάρισμα καρντάσαινα καρντάσης καρντάσι
καροσερί καροτέλαιο καροτί καροτίλα καροτίνη καροτοπουρές καροτοσαλάτα
καροτσάκι καροτσέρης καροτσιέρης καροτόζουμο καροτόσουπα καρουζέλ καρούλα
καρούμπαλο καρούμπαλος καρπάζωμα καρπάτσιο καρπέτα καρπαζιά
καρπαθιώτης καρποκάψα καρπολογία καρπολόγημα καρπολόγος καρπουζιά καρποφαγία
καρπούζι καρπωτής καρπόδεση καρπός καρπόσωμα καρπώτρια καρσιλαμάς καρστ
καρτ καρτ ποστάλ καρτάλι καρτέλ καρτέλα καρτέρεμα καρτέρι καρταναγνώστης
καρτελοθήκη καρτερία καρτερικότης καρτερικότητα καρτεροψυχία καρτεσιανισμός
καρτούν καρτούτσο καρτούχος καρτσόνι καρτόνι καρτόφ καρυάτιδα καρυδάκι
καρυδιά καρυδόξυλο καρυδόπιτα καρυδότσουφλο καρυδόφλουδα καρυδόφυλλο
καρυδόψιχα καρυοθραύστης καρυοφύλλι καρυστινός καρυότυπος καρυόφυλλο
καρφάκι καρφί καρφίς καρφίτσα καρφίτσωμα καρφιτσοθήκη καρφοβελόνα καρφωτής
καρχηδόνιος καρωτίδα καρό καρότο καρότσα καρότσι καρύδα καρύδι καρύδωμα
καρύκευση καρώτο κασέ κασέλα κασέρι κασέτα κασίδα κασίδης κασαβέτι κασαμπάς
κασελάκι κασεράκι κασερόπιτα κασετάδικο κασετίνα κασετινούλα κασετοπειρατεία
κασετόφωνο κασιέρα κασιδιάρης κασιώτης κασκέτο κασκαβάλι κασκαντέρ κασκαρίκα
κασκορσές κασκορσεδάκι κασκόλ κασμάς κασμίρ κασμίρι κασμιρικά κασονάκι
κασουβιανά κασπό κασπώ κασσίτερος κασσιτέρωμα κασσιτέρωση κασσιτεροκόλληση
κασσιτερωτής καστάνια καστέλα καστέλι καστέλο καστανάς καστανιά καστανιέτα
καστανοπώλης καστανόμελο καστανόσουπα καστανόχρωμα καστανόχωμα καστελάνος
καστοριανός καστράκι καστρί καστροφύλακας καστρούπολη καστρόπορτα καστρόπυργος
καστόρ καστόρι κασόνα κασόνι κατάβαση κατάβασις κατάβρεγμα κατάβρεξη κατάδειξη
κατάδικος κατάδοση κατάδοσις κατάδυση κατάδυσις κατάθεση κατάθλιψη κατάθλιψις
κατάκλαση κατάκλασις κατάκλιση κατάκλισις κατάκριση κατάκρισις κατάκτηση
κατάληξη κατάληξις κατάληψη κατάληψις κατάλογος κατάλοιπο κατάλοιπον κατάλυμα
κατάλυσις κατάμπαρο κατάνα κατάνευση κατάνευσις κατάντη κατάντημα κατάντια
κατάνυξις κατάπαυση κατάπαυσις κατάπιομα κατάπλασμα κατάπληξη κατάπληξις
κατάπνιξη κατάποση κατάποσις κατάπτωση κατάπτωσις κατάρα κατάραχο κατάργηση
κατάρρευση κατάρρευσις κατάρριψη κατάρριψις κατάρρους κατάρτι κατάρτιση
κατάσβεση κατάσβεσις κατάσκοπος κατάσταση κατάστασις κατάστασις κατάστημα
κατάστιξις κατάστιχο κατάστρωμα κατάστρωμα αποπροσνήωσης κατάστρωση
κατάσχεση κατάταξη κατάταξις κατάτμηση κατάτμησις κατάφαση κατάφασις κατάφυση
κατάχτηση κατάχωση κατάψυξη κατέβασμα κατήγορος κατής κατήφεια κατήφορος
κατήχησις κατίκι κατίσχυση κατίσχυσις καταβαράθρωση καταβαράθρωσις καταβασία
καταβολή καταβολισμός καταβρεγμός καταβρεχτήρας καταβρεχτήρι καταβρόχθιση
καταβυθιστής καταβόδιο καταβόθρα καταβύθιση καταγγελία καταγοήτευση
καταγραφή καταγραφεύς καταγωγή καταγώγιο καταδίκη καταδίωξη καταδίωξις
καταδεχτικότητα καταδημαγώγηση καταδολίευση καταδολίευσις καταδρομέας
καταδρομεύς καταδρομικό καταδρομικόν καταδυνάστευση καταδυνάστευσις καταδότης
καταδότρια καταζήτηση καταζήτησις καταθέτης καταθέτρια καταιγίδα καταιγίς
καταιονίζομαι καταιονίζω καταιονητήρ καταιονητήρας καταιονισμός καταιονιστήρας
καταισχύνη καταιόνηση καταιόνησις κατακάθι κατακάθισμα κατακαλόκαιρο
κατακερματισμός κατακεφαλιά κατακλείδα κατακλείς κατακλυσμός κατακράτηση
κατακρήμνιση κατακρήμνισις κατακρήμνισμα κατακραυγή κατακρεούργηση
κατακτήτρια κατακτητής κατακυρίευση κατακόμβη κατακόρυφος κατακύρωση
καταλάγιασμα καταλήστευση καταλαλήτρα καταλαλητής καταλαλητό καταλαλιά
καταληπτικός καταληψία καταλληλότης καταλληλότητα καταλογή καταλογισιμότητα
καταλογιστό καταλογιστόν καταλογογράφηση καταλυτής καταλύτης καταλύτρα
καταμέρισις καταμέτρηση καταμέτρησις καταμήνυση καταμήνυσις καταμαράν
καταμερισμός καταμεσήμερο καταμετρητής καταμόσχευση καταμόσχευσις κατανάγκη
κατανάλωσις κατανίκηση κατανίκησις καταναγκασμός καταναλωτής καταναλωτισμός
καταναυμάχηση κατανεμητής κατανομή κατανόημα κατανόηση κατανόησις καταξίωση
καταξεριάς καταπάτηση καταπάτησις καταπάτι καταπέλτης καταπέτασμα καταπίεση
καταπίστευμα καταπίστευση καταπίστευσις καταπακτή καταπατητής καταπατώ
καταπιά καταπιεστής καταπιστευματοδόχος καταπιόνας καταπληξία καταπολέμηση
καταποτήρας καταπράυνση καταπτόηση καταπτόησις καταπόνηση καταπόνησις
καταπόντισις καταπόπλους καταπότης καταπότι καταράχι καταρίθμηση καταρίθμησις
καταρράκτης καταρράκωση καταρράκωσις καταρράχτης καταρρίχηση καταρροή
κατασάρκιο κατασήμανση κατασίγαση κατασβεστήρ κατασβεστήρας κατασκήνωση
κατασκευάστρια κατασκευή κατασκευαστής κατασκεύασμα κατασκηνωτής κατασκηνώτρια
κατασκοπεία κατασκόπευση κατασκόπευσις κατασπάραξη κατασπίλωση κατασπίλωσις
κατασπατάλησις καταστάλαγμα κατασταλαχτή καταστατικό καταστατό καταστηματάρχης
καταστιχογράφος καταστιχογραφία καταστολέας καταστολή καταστρατήγηση
καταστρεπτικότητα καταστροφέας καταστροφή καταστροφεύς καταστροφισμός
καταστροφολόγος κατασυκοφάντηση κατασυκοφάντησις κατασχέτης κατασχέτις
κατασώτευση κατασώτευσις κατατεμαχισμός κατατομή κατατονία κατατοπισμός
κατατριβή κατατρόπωση κατατρόπωσις κατατόπι κατατόπιση κατατόπισις καταυγασμός
καταυλισμός καταφερτζής καταφερτζού καταφορά καταφρονήτρα καταφρονήτρια
καταφρονητής καταφρόνεση καταφρόνηση καταφρόνια καταφυγή καταφύγιο καταχανάς
καταχεριά καταχθονιότητα καταχνιά καταχράστρια καταχραστής καταχτητής
καταχώρηση καταχώριση καταψήφιση καταψιά καταψύκτης καταϊφι καταύγαση
κατεβατό κατεδάφιση κατεδάφισις κατεξουσιασμός κατεπάνω κατεργάρης
κατεργαριά κατεργασία κατερινιώτης κατεστημένο κατευθυντικότητα κατευνασμός
κατευόδωση κατευόδωσις κατεχόμενα κατεύθυνση κατεύθυνσις κατζέλο κατζίο
κατηγορηματικότης κατηγορηματικότητα κατηγορητήριο κατηγοριοποίηση
κατηγορούμενη κατηγορούμενο κατηγορούμενος κατηγόρημα κατηγόρια κατηφοριά
κατηφόρισμα κατηχήτρια κατηχητής κατιδεασμός κατιμάς κατιμέρι κατινιά κατιούσα
κατιφές κατιόν κατιόντες κατμάς κατοίκηση κατοίκησις κατοίκιση κατοικία
κατολίσθηση κατολίσθησις κατονομασία κατονόμαση κατοπτρισμός κατοστάευρο
κατοστάρι κατοστάρικο κατουρλής κατουρλιά κατουρλιό κατουρλού κατοχή
κατοχρονίτισσα κατοχύρωση κατοχύρωσις κατούρημα κατράμι κατράμωμα κατρακύλα
κατρακύλι κατρακύλισμα κατραμόκολος κατραμόκωλος κατραμόπανο κατραμόχαρτο
κατρουλιάρης κατρουλιό κατς κατσάβραχο κατσάδα κατσάδιασμα κατσάρωμα κατσί
κατσίκα κατσίκι κατσαβίδι κατσαμάκι κατσαμπρόκος κατσαπλιάς κατσαρίδα
κατσαριδοκτόνο κατσαρολάκι κατσαρολικό κατσαρόλα κατσαρόλι κατσιαπλιάς
κατσιβελιά κατσικάκι κατσικάς κατσικοκλέφτης κατσικοκλέφτρα κατσικοπρόβατα
κατσικούλα κατσικόδρομος κατσιποδιά κατσιφάρα κατσουλιέρης κατσουφιά κατσούλα
κατσούνα κατσούφιασμα κατωμεριά κατωμυλόπετρα κατωσάγονο κατωσέντονο
κατωτερότης κατωτερότητα κατωφέρεια κατόπτευση κατόπτευσις κατόρθωμα | |
:return: features set, target set as arrays
"""
return self._generate_data_by_pointers()
def get_cv_data(self) -> (list, list):
"""
Generate cross-validation set from defined features and target
:return: features set, target set as arrays
"""
return self._generate_data_by_pointers(mode="cv")
def get_testing_data(self) -> (list, list):
"""
Generate testing set from defined features and target
:return: features set, target set as arrays
"""
return self._generate_data_by_pointers(mode="testing")
def get_labels(self) -> (str, str):
"""
:return: training set labels in same order as training set and target name
"""
if not self.features or not self.target:
print("Define training set first. Add features and target")
return
features_label = []
for feature_name in self.features:
features_label.append(feature_name)
target_name = self._get_target_name()
return features_label, target_name
def get_column_data(self, column_name: str) -> list:
"""
Get data from the given column
:param column_name: name from data table
:return: list of data
"""
if column_name not in self.head:
return []
return self.table[column_name].data
def plot(self, parameter1=None, parameter2=None,
features2target=False, all2target=False,
classifier=None) -> None:
"""
Plot 2D pictures.
:param classifier: mark each dot as its class with a color
:param parameter1: axis 1 column name
:param parameter2: axis 2 column name
:param features2target: plot all features to target
:param all2target: plot all to target
:return: figures 2D
"""
if all2target is True:
if not self.target:
print("There is no defined target. Please, select one")
return
target_name = self._get_target_name()
for column_name in self.head:
if column_name in self.target:
continue
self._plot2d_helper(column_name, target_name, "blue")
elif features2target is True:
if not self.target:
print("There is no defined target. Please, select one")
return
target_name = self._get_target_name()
for feature_name in self.features:
if feature_name in self.target:
continue
self._plot2d_helper(feature_name, target_name, "red")
elif parameter1 is not None and parameter2 is not None:
if parameter1 in self.table and parameter2 in self.table:
if classifier is None:
self._plot2d_helper(parameter1, parameter2, "green")
else:
self._plot2d_helper_with_classifier(parameter1, parameter2, classifier)
# if additional_to_draw is not None:
# for function in additional_to_draw:
# function()
def _plot2d_helper_with_classifier(self, parameter1, parameter2, classifier):
"""Draw a picture of 2 features, where data is market per its target class"""
def _define_class_map(column_name: str, main_structure: DataTable):
class_map = []
if column_name in main_structure.class_dict:
idx2class = main_structure.class_dict[column_name][1]
for key in idx2class:
word = idx2class[key]
class_map.append("{} - {}".format(key, word))
return class_map if len(class_map) > 0 else ''
import matplotlib.pyplot as plt
parameter1_data = self.table[parameter1].data
parameter2_data = self.table[parameter2].data
target_name = classifier
target_data = self.table[target_name].data
target_class_map = self.class_dict[target_name][1]
m_rows = len(parameter1_data)
# class_colours = ['b', 'g', 'r']
truth = [False, False, False]
for m_idx in range(m_rows):
f1 = parameter1_data[m_idx]
f2 = parameter2_data[m_idx]
t = target_data[m_idx]
if t == 0:
colour = 'b'
label_ = 'Class 0 as ' + str(target_class_map[t])
p = 0
elif t == 1:
colour = 'r'
label_ = 'Class 1 as ' + str(target_class_map[t])
p = 1
else:
colour = 'g'
label_ = 'Class is undefined'
p = 2
if (p == 0 and not truth[p]) or (p == 1 and not truth[p]) or (p == 2 and not truth[p]):
plt.scatter(f1, f2, color=colour, label=label_)
truth[p] = True
else:
plt.scatter(f1, f2, color=colour)
plt.title(target_name + ' vs ' + parameter1 + ' & ' + parameter2)
plt.legend(loc='best')
parameter1_classification = _define_class_map(parameter1, self)
parameter2_classification = _define_class_map(parameter2, self)
plt.xlabel(parameter1 + ' ' + str(parameter1_classification))
plt.ylabel(parameter2 + ' ' + str(parameter2_classification))
plt.show()
def _get_target_name(self) -> str:
target_name = list(self.target.keys())[0]
return target_name
def split_data(self, training_size: float, cv_size=None, shuffle=False) -> None:
"""
Split data according to user's preferences.
:param training_size: 0.3 - 0.9 desired part of data to use for AI training
:param cv_size: cross-validation data part to test different algorithms
:param shuffle: do we want to shuffle first? True/False
:return: assigned pointers is self._split_pointers which shows how the data
is split on training/cv/testing sets. This is nice to do instead of
copying data
"""
if not 0.3 <= training_size <= 0.9 or ((cv_size is not None) and (training_size + cv_size) >= 0.95):
print("Wrong train-test-cv attitude")
return None
if shuffle is True:
self.shuffle()
m = len(self)
tr_p_st = 0
tr_p_end = int(m * training_size)
self._split_pointers[self._TRAINING] = [[tr_p_st, tr_p_end], True]
ts_p_st = tr_p_end + 1
ts_p_end = m - 1
if cv_size is not None:
cv_part = int(cv_size * m)
cv_p_st = tr_p_end + 1
cv_p_end = cv_p_st + cv_part
self._split_pointers[self._CV] = [[cv_p_st, cv_p_end], True]
ts_p_st = cv_p_end + 1
self._split_pointers[self._TESTING] = [[ts_p_st, ts_p_end], True]
if cv_size is not None:
print("Data was split as follows: {} training set, {} cross-validation set and {} testing set".
format(training_size, cv_size, (1 - training_size - cv_size)))
else:
print("Data was split as follows: {} training set and {} testing set".
format(training_size, 1 - training_size))
def shuffle(self) -> None:
"""
Random data shuffle.
:return: shuffled data
"""
def _swap(array, idx1, idx2):
array[idx1], array[idx2] = array[idx2], array[idx1]
import random
for idx in range(len(self)):
random_idx1 = random.randint(0, len(self) - 1)
random_idx2 = random.randint(0, len(self) - 1)
while random_idx1 == random_idx2:
random_idx2 = random.randint(0, len(self) - 1)
for column_name in self.head:
column_obj = self.table[column_name]
_swap(column_obj.data, random_idx1, random_idx2)
print("Shuffle was done")
def add_new_feature(self, features, power=None) -> None:
"""
Add new desired feature to use.
:param features: feature str or list of features from main table, which user might
want to combine to create a new feature like x3 = x1 * x2,
where x3 - new feature, x1 and x2 - features from main table
:param power: specific command to perform:
if None -> new_feature = features[0] * features[1] * ...
if 0.5 -> new_feature = sqrt(features[0]) for feature[0] >= 0
if positive int -> new_feature = pow(features[0], command)
:return: new column of added feature as native one
"""
def _validate_feature_name(_name: str, _head: dict):
return _name in _head
if type(features) == str:
features = [features]
if features is None:
print("Type features' names in a list format")
return
new_feature_name = ''
new_column_obj = None
if power is None:
_validation_check = False
for feature_name in features:
if not _validate_feature_name(feature_name, self.table):
proposed_feature_name = helper.check_spelling_helper(feature_name, self.head)
user_input = self.__user_confirmation(feature_name, proposed_feature_name)
if user_input[0].lower() == self._YES:
feature_name = proposed_feature_name
else:
print("Skip {} feature".format(feature_name))
continue
if new_column_obj is None:
new_column_obj = self.table[feature_name].copy()
new_column_obj.reset()
else:
_validation_check = True
new_data = self.table[feature_name]
for idx in range(len(new_column_obj)):
new_column_obj.data[idx] = round((new_column_obj.data[idx] + new_data.data[idx]),
self.ROUND_AFTER_COMA)
new_feature_name += "*" + feature_name if len(new_feature_name) > 0 else feature_name
if new_column_obj is not None and _validation_check is True:
self._add_feature_helper(new_feature_name, new_column_obj)
else:
if _validation_check is False:
print("We cannot create same feature as we have in our main table")
else:
print("Please, write write power input")
else:
if power <= 0:
print("Set write power as a positive number")
return
feature_name = features[0]
new_feature_name = feature_name + '^' + "({})".format(power)
new_column_obj = self.table[feature_name].copy()
for idx in range(len(new_column_obj)):
new_column_obj.data[idx] = round(pow(new_column_obj.data[idx], power),
self.ROUND_AFTER_COMA)
self._add_feature_helper(new_feature_name, new_column_obj)
def max_scaling(self, column_name=None) -> None:
"""
Min-Max scaling of assigned column or all table.
:param column_name: string column name which we want to scale
:return: None
"""
if column_name is not None:
column = self.table[column_name]
column.scaling()
print("Column {} was scaled".format(column_name))
else:
if self._data_is_scaled is True:
return
self._data_is_scaled = True
for column_name in self.table:
column = self.table[column_name]
column.scaling()
print("Column {} was scaled".format(column_name))
def deactivate_feature(self, feature_name):
"""
Remove feature from the training set.
:param feature_name: feature name as a string or list of strings
:return: None
"""
def _validate_feature_name(_name: str, _head: dict):
return _name in _head
if type(feature_name) == list:
for internal_feature_name in feature_name:
self.deactivate_feature(internal_feature_name)
return
if _validate_feature_name(feature_name, self.table):
if feature_name in self.features:
del self.features[feature_name]
print("Feature {} was disabled from the training set".format(feature_name))
else:
proposed_name = helper.check_spelling_helper(feature_name, self.head)
if proposed_name is not None:
print("You made a typo mistake. Did you mean {}?".format(proposed_name))
print("Type y/n")
user_input = input()
if user_input[0].lower() == self._YES:
self.deactivate_feature(proposed_name)
else:
print("Nothing was done")
def select_target(self, target_name: str):
"""
Select target to be used from self.table for AI.
:param target_name: target name per table as string
:return: None
"""
if not len(self.target):
self.activate_features(target_name, is_target=True)
else:
if not target_name == self._get_target_name():
print("Do you want to replace existed {} target? Enter y/n".format(self.target))
user_input = input()
if user_input[0].lower() == self._YES:
self.activate_features(target_name, is_target=True)
def activate_features(self, feature_name, is_target=False) -> None:
"""
Select feature to | |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests for the reportlab.platypus.paragraphs module.
"""
__version__=''' $Id: test_platypus_paragraphs.py 3959 2012-09-27 14:39:39Z robin $ '''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import sys, os, unittest
from string import split, strip, join, whitespace
from operator import truth
from types import StringType, ListType
from reportlab.pdfbase.pdfmetrics import stringWidth, registerFont, registerFontFamily
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable, DocAssert
from reportlab.lib.colors import Color
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.utils import _className
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.frames import Frame, ShowBoundaryValue
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate, PageBreak, NextPageTemplate
from reportlab.platypus import tableofcontents
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.paragraph import *
from reportlab.platypus.paragraph import _getFragWords
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
canvas.rect(2.5*cm, 2.5*cm, 15*cm, 25*cm)
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')
frame2 = Frame(2.5*cm, 2.5*cm, 310, 25*cm, id='F2')
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('normal', [frame1], myMainPageFrame)
template1 = PageTemplate('special', [frame2], myMainPageFrame)
self.addPageTemplates([template,template1])
class ParagraphCorners(unittest.TestCase):
"some corner cases which should parse"
def check(self,text,bt = getSampleStyleSheet()['BodyText']):
try:
P = Paragraph(text,style=bt)
except:
raise AssertionError("'%s' should parse"%text)
def test0(self):
self.check('<para />')
self.check('<para/>')
self.check('\t\t\t\n\n\n<para />')
self.check('\t\t\t\n\n\n<para/>')
self.check('<para\t\t\t\t/>')
self.check('<para></para>')
self.check('<para> </para>')
self.check('\t\t\n\t\t\t <para> </para>')
def test1(self):
"This makes several special paragraphs."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
btN = ParagraphStyle('BodyTextTTNone',parent=bt,textTransform='none')
btL = ParagraphStyle('BodyTextTTLower',parent=bt,textTransform='lowercase')
btU = ParagraphStyle('BodyTextTTUpper',parent=bt,textTransform='uppercase')
btC = ParagraphStyle('BodyTextTTCapitalize',parent=bt,textTransform='capitalize')
story.append(Paragraph('''This should be ORDINARY text.''',style=bt))
story.append(Paragraph('''This should be ORDINARY text.''',style=btN))
story.append(Paragraph('''This should be LOWER text.''',style=btL))
story.append(Paragraph('''This should be upper text.''',style=btU))
story.append(Paragraph('''This should be cAPITALIZED text.''',style=btC))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>ORDINARY</b> text.''',style=bt))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>ORDINARY</b> text.''',style=btN))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>LOWER</b> text.''',style=btL))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>upper</b> text.''',style=btU))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>cAPITALIZED</b> text.''',style=btC))
doc = MyDocTemplate(outputfile('test_platypus_specialparagraphs.pdf'))
doc.multiBuild(story)
def test2(self):
'''CJK splitting in multi-frag case'''
style = ParagraphStyle('test', wordWrap = 'CJK')
p = Paragraph('bla <i>blub</i> '*130 , style)
aW,aH=439.275590551,121.88976378
w,h=p.wrap(aW,aH)
S=p.split(aW,aH)
assert len(S)==2, 'Multi frag CJK splitting failed'
w0,h0=S[0].wrap(aW,aH)
assert h0<=aH,'Multi-frag CJK split[0] has wrong height %s >= available %s' % (H0,aH)
w1,h1=S[1].wrap(aW,aH)
assert h0+h1==h, 'Multi-frag-CJK split[0].height(%s)+split[1].height(%s) don\'t add to original %s' % (h0,h1,h)
def test3(self):
'''compare CJK splitting in some edge cases'''
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.enums import TA_LEFT
sty = ParagraphStyle('A')
sty.fontSize = 15
sty.leading = sty.fontSize*1.2
sty.fontName = 'Courier'
sty.alignment = TA_LEFT
sty.wordWrap = 'CJK'
p0=Paragraph('ABCDEFGHIJKL]N',sty)
p1=Paragraph('AB<font color="red">C</font>DEFGHIJKL]N',sty)
canv = Canvas('test_platypus_paragraph_cjk3.pdf')
ix = len(canv._code)
aW = pdfmetrics.stringWidth('ABCD','Courier',15)
w,h=p0.wrap(aW,1000000)
y = canv._pagesize[1]-72-h
p0.drawOn(canv,72,y)
w,h=p1.wrap(aW,1000000)
y -= h+10
p1.drawOn(canv,72,y)
w,h=p0.wrap(aW*0.25-2,1000000)
y -= h+10
p0.drawOn(canv,72,y)
w,h=p1.wrap(aW/4.-2,1000000)
y -= h+10
p1.drawOn(canv,72,y)
assert canv._code[ix:]==['q', '1 0 0 1 72 697.8898 cm', 'q', '0 0 0 rg', 'BT 1 0 0 1 0 57 Tm /F2 15 Tf 18 TL (ABCD) Tj T* (EFGH) Tj T* (IJKL]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 615.8898 cm', 'q', 'BT 1 0 0 1 0 57 Tm 18 TL /F2 15 Tf 0 0 0 rg (AB) Tj 1 0 0 rg (C) Tj 0 0 0 rg (D) Tj T* (EFGH) Tj T* (IJKL]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 353.8898 cm', 'q', '0 0 0 rg', 'BT 1 0 0 1 0 237 Tm /F2 15 Tf 18 TL (A) Tj T* (B) Tj T* (C) Tj T* (D) Tj T* (E) Tj T* (F) Tj T* (G) Tj T* (H) Tj T* (I) Tj T* (J) Tj T* (K) Tj T* (L) Tj T* (]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 91.88976 cm', 'q', 'BT 1 0 0 1 0 237 Tm 18 TL /F2 15 Tf 0 0 0 rg (A) Tj T* (B) Tj T* 1 0 0 rg (C) Tj T* 0 0 0 rg (D) Tj T* (E) Tj T* (F) Tj T* (G) Tj T* (H) Tj T* (I) Tj T* (J) Tj T* (K) Tj T* (L) Tj T* (]) Tj T* (N) Tj T* ET', 'Q', 'Q']
canv.showPage()
canv.save()
class ParagraphSplitTestCase(unittest.TestCase):
"Test multi-page splitting of paragraphs (eyeball-test)."
def test0(self):
"This makes one long multi-page paragraph."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
text = '''If you imagine that the box of X's tothe left is
an image, what I want to be able to do is flow a
series of paragraphs around the image
so that once the bottom of the image is reached, then text will flow back to the
left margin. I know that it would be possible to something like this
using tables, but I can't see how to have a generic solution.
There are two examples of this in the demonstration section of the reportlab
site.
If you look at the "minimal" euro python conference brochure, at the end of the
timetable section (page 8), there are adverts for "AdSu" and "O'Reilly". I can
see how the AdSu one might be done generically, but the O'Reilly, unsure...
I guess I'm hoping that I've missed something, and that
it's actually easy to do using platypus.
'''
from reportlab.platypus.flowables import ParagraphAndImage, Image
from reportlab.lib.testutils import testsFolder
gif = os.path.join(testsFolder,'pythonpowered.gif')
story.append(ParagraphAndImage(Paragraph(text,bt),Image(gif)))
phrase = 'This should be a paragraph spanning at least three pages. '
description = ''.join([('%d: '%i)+phrase for i in xrange(250)])
story.append(ParagraphAndImage(Paragraph(description, bt),Image(gif),side='left'))
doc = MyDocTemplate(outputfile('test_platypus_paragraphandimage.pdf'))
doc.multiBuild(story)
def test1(self):
"This makes one long multi-page paragraph."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
h3 = styleSheet['Heading3']
bt = styleSheet['BodyText']
text = '''If you imagine that the box of X's tothe left is
an image, what I want to be able to do is flow a
series of paragraphs around the image
so that once the bottom of the image is reached, then text will flow back to the
left margin. I know that it would be possible to something like this
using tables, but I can't see how to have a generic solution.
There are two examples of this in the demonstration section of the reportlab
site.
If you look at the "minimal" euro python conference brochure, at the end of the
timetable section (page 8), there are adverts for "AdSu" and "O'Reilly". I can
see how the AdSu one might be done generically, but the O'Reilly, unsure...
I guess I'm hoping that I've missed something, and that
it's actually easy to do using platypus.We can do greek letters <greek>mDngG</greek>. This should be a
u with a dieresis on top <unichar code=0xfc/>="<unichar code="0xfc"/>" and this &#xfc;="ü" and this \\xc3\\xbc="\xc3\xbc". On the other hand this
should be a pound sign &pound;="£" and this an alpha &alpha;="α". You can have links in the page <link href="http://www.reportlab.com" color="blue">ReportLab</link> & <a href="http://www.reportlab.org" color="green">ReportLab.org</a>.
Use scheme "pdf:" to indicate an external PDF link, "http:", "https:" to indicate an external link eg something to open in
your browser. If an internal link begins with something that looks like a scheme, precede with "document:". <strike>This text should have a strike through it.</strike>
'''
from reportlab.platypus.flowables import ImageAndFlowables, Image
from reportlab.lib.testutils import testsFolder
gif = os.path.join(testsFolder,'pythonpowered.gif')
heading = Paragraph('This is a heading',h3)
story.append(ImageAndFlowables(Image(gif),[heading,Paragraph(text,bt)]))
phrase = 'This should be a paragraph spanning at least three pages. '
description = ''.join([('%d: '%i)+phrase for i in xrange(250)])
story.append(ImageAndFlowables(Image(gif),[heading,Paragraph(description, bt)],imageSide='left'))
story.append(NextPageTemplate('special'))
story.append(PageBreak())
VERA = ('Vera','VeraBd','VeraIt','VeraBI')
for v in VERA:
registerFont(TTFont(v,v+'.ttf'))
registerFontFamily(*(VERA[:1]+VERA))
story.append(ImageAndFlowables(
Image(gif,width=280,height=120),
Paragraph('''<font name="Vera">The <b>concept</b> of an <i>integrated</i> one <b><i>box</i></b> solution for <i><b>advanced</b></i> voice and
data applications began with the introduction of the IMACS. The
IMACS 200 carries on that tradition with an integrated solution
optimized for smaller port size applications that the IMACS could not
economically address. An array of the most popular interfaces and
features from the IMACS has been bundled into a small 2U chassis
providing the ultimate in ease of installation.</font>''',
style=ParagraphStyle(
name="base",
fontName="Helvetica",
leading=12,
leftIndent=0,
firstLineIndent=0,
spaceBefore = 9.5,
fontSize=9.5,
)
),
imageSide='left',
)
)
story.append(ImageAndFlowables(
Image(gif,width=240,height=120),
Paragraph('''The concept of an integrated one box | |
the value of the `ssh_public_keys` property.
"""
self._ssh_public_keys = value
@property
def groups(self):
"""
Returns the value of the `groups` property.
"""
return self._groups
@groups.setter
def groups(self, value):
"""
Sets the value of the `groups` property.
"""
self._groups = value
@property
def tags(self):
"""
Returns the value of the `tags` property.
"""
return self._tags
@tags.setter
def tags(self, value):
"""
Sets the value of the `tags` property.
"""
self._tags = value
@property
def domain(self):
"""
Returns the value of the `domain` property.
"""
return self._domain
@domain.setter
def domain(self, value):
"""
Sets the value of the `domain` property.
"""
Struct._check_type('domain', value, Domain)
self._domain = value
@property
def permissions(self):
"""
Returns the value of the `permissions` property.
"""
return self._permissions
@permissions.setter
def permissions(self, value):
"""
Sets the value of the `permissions` property.
"""
self._permissions = value
@property
def principal(self):
"""
Returns the value of the `principal` property.
"""
return self._principal
@principal.setter
def principal(self, value):
"""
Sets the value of the `principal` property.
"""
self._principal = value
@property
def password(self):
"""
Returns the value of the `password` property.
"""
return self._password
@password.setter
def password(self, value):
"""
Sets the value of the `password` property.
"""
self._password = value
@property
def namespace(self):
"""
Returns the value of the `namespace` property.
"""
return self._namespace
@namespace.setter
def namespace(self, value):
"""
Sets the value of the `namespace` property.
"""
self._namespace = value
@property
def email(self):
"""
Returns the value of the `email` property.
"""
return self._email
@email.setter
def email(self, value):
"""
Sets the value of the `email` property.
"""
self._email = value
@property
def user_options(self):
"""
Returns the value of the `user_options` property.
"""
return self._user_options
@user_options.setter
def user_options(self, value):
"""
Sets the value of the `user_options` property.
"""
self._user_options = value
@property
def user_name(self):
"""
Returns the value of the `user_name` property.
"""
return self._user_name
@user_name.setter
def user_name(self, value):
"""
Sets the value of the `user_name` property.
"""
self._user_name = value
@property
def options(self):
"""
Returns the value of the `options` property.
"""
return self._options
@options.setter
def options(self, value):
"""
Sets the value of the `options` property.
"""
self._options = value
@property
def domain_entry_id(self):
"""
Returns the value of the `domain_entry_id` property.
"""
return self._domain_entry_id
@domain_entry_id.setter
def domain_entry_id(self, value):
"""
Sets the value of the `domain_entry_id` property.
"""
self._domain_entry_id = value
@property
def department(self):
"""
Returns the value of the `department` property.
"""
return self._department
@department.setter
def department(self, value):
"""
Sets the value of the `department` property.
"""
self._department = value
class UserOption(Identified):
def __init__(
self,
comment=None,
content=None,
description=None,
id=None,
name=None,
user=None,
):
super(UserOption, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.content = content
self.user = user
@property
def user(self):
"""
Returns the value of the `user` property.
"""
return self._user
@user.setter
def user(self, value):
"""
Sets the value of the `user` property.
"""
Struct._check_type('user', value, User)
self._user = value
@property
def content(self):
"""
Returns the value of the `content` property.
"""
return self._content
@content.setter
def content(self, value):
"""
Sets the value of the `content` property.
"""
self._content = value
class Value(Struct):
def __init__(
self,
datum=None,
detail=None,
):
super(Value, self).__init__(
)
self.datum = datum
self.detail = detail
@property
def datum(self):
"""
Returns the value of the `datum` property.
"""
return self._datum
@datum.setter
def datum(self, value):
"""
Sets the value of the `datum` property.
"""
self._datum = value
@property
def detail(self):
"""
Returns the value of the `detail` property.
"""
return self._detail
@detail.setter
def detail(self, value):
"""
Sets the value of the `detail` property.
"""
self._detail = value
class VcpuPin(Struct):
def __init__(
self,
cpu_set=None,
vcpu=None,
):
super(VcpuPin, self).__init__(
)
self.cpu_set = cpu_set
self.vcpu = vcpu
@property
def vcpu(self):
"""
Returns the value of the `vcpu` property.
"""
return self._vcpu
@vcpu.setter
def vcpu(self, value):
"""
Sets the value of the `vcpu` property.
"""
self._vcpu = value
@property
def cpu_set(self):
"""
Returns the value of the `cpu_set` property.
"""
return self._cpu_set
@cpu_set.setter
def cpu_set(self, value):
"""
Sets the value of the `cpu_set` property.
"""
self._cpu_set = value
class Vendor(Identified):
def __init__(
self,
comment=None,
description=None,
id=None,
name=None,
):
super(Vendor, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
pass
class Version(Identified):
def __init__(
self,
build=None,
comment=None,
description=None,
full_version=None,
id=None,
major=None,
minor=None,
name=None,
revision=None,
):
super(Version, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.build = build
self.full_version = full_version
self.major = major
self.minor = minor
self.revision = revision
@property
def full_version(self):
"""
Returns the value of the `full_version` property.
"""
return self._full_version
@full_version.setter
def full_version(self, value):
"""
Sets the value of the `full_version` property.
"""
self._full_version = value
@property
def major(self):
"""
Returns the value of the `major` property.
"""
return self._major
@major.setter
def major(self, value):
"""
Sets the value of the `major` property.
"""
self._major = value
@property
def minor(self):
"""
Returns the value of the `minor` property.
"""
return self._minor
@minor.setter
def minor(self, value):
"""
Sets the value of the `minor` property.
"""
self._minor = value
@property
def build(self):
"""
Returns the value of the `build` property.
"""
return self._build
@build.setter
def build(self, value):
"""
Sets the value of the `build` property.
"""
self._build = value
@property
def revision(self):
"""
Returns the value of the `revision` property.
"""
return self._revision
@revision.setter
def revision(self, value):
"""
Sets the value of the `revision` property.
"""
self._revision = value
class VirtioScsi(Struct):
def __init__(
self,
enabled=None,
):
super(VirtioScsi, self).__init__(
)
self.enabled = enabled
@property
def enabled(self):
"""
Returns the value of the `enabled` property.
"""
return self._enabled
@enabled.setter
def enabled(self, value):
"""
Sets the value of the `enabled` property.
"""
self._enabled = value
class VirtualNumaNode(NumaNode):
def __init__(
self,
comment=None,
cpu=None,
description=None,
host=None,
id=None,
index=None,
memory=None,
name=None,
node_distance=None,
numa_node_pins=None,
numa_tune_mode=None,
statistics=None,
vm=None,
):
super(VirtualNumaNode, self).__init__(
comment=comment,
cpu=cpu,
description=description,
host=host,
id=id,
index=index,
memory=memory,
name=name,
node_distance=node_distance,
statistics=statistics,
)
self.numa_node_pins = numa_node_pins
self.numa_tune_mode = numa_tune_mode
self.vm = vm
@property
def numa_tune_mode(self):
"""
Returns the value of the `numa_tune_mode` property.
"""
return self._numa_tune_mode
@numa_tune_mode.setter
def numa_tune_mode(self, value):
"""
Sets the value of the `numa_tune_mode` property.
"""
Struct._check_type('numa_tune_mode', value, NumaTuneMode)
self._numa_tune_mode = value
@property
def vm(self):
"""
Returns the value of the `vm` property.
"""
return self._vm
@vm.setter
def vm(self, value):
"""
Sets the value of the `vm` property.
"""
Struct._check_type('vm', value, Vm)
self._vm = value
@property
def numa_node_pins(self):
"""
Returns the value of the `numa_node_pins` property.
"""
return self._numa_node_pins
@numa_node_pins.setter
def numa_node_pins(self, value):
"""
Sets the value of the `numa_node_pins` property.
"""
self._numa_node_pins = value
class Vlan(Struct):
def __init__(
self,
id=None,
):
super(Vlan, self).__init__(
)
self.id = id
@property
def id(self):
"""
Returns the value of the `id` property.
"""
return self._id
@id.setter
def id(self, value):
"""
Sets the value of the `id` property.
"""
self._id = value
class VmBase(Identified):
def __init__(
self,
auto_pinning_policy=None,
bios=None,
cluster=None,
comment=None,
console=None,
cpu=None,
cpu_pinning_policy=None,
cpu_profile=None,
cpu_shares=None,
creation_time=None,
custom_compatibility_version=None,
custom_cpu_model=None,
custom_emulated_machine=None,
custom_properties=None,
delete_protected=None,
description=None,
display=None,
domain=None,
high_availability=None,
id=None,
initialization=None,
io=None,
large_icon=None,
lease=None,
memory=None,
memory_policy=None,
migration=None,
migration_downtime=None,
multi_queues_enabled=None,
name=None,
origin=None,
os=None,
placement_policy=None,
quota=None,
rng_device=None,
serial_number=None,
small_icon=None,
soundcard_enabled=None,
sso=None,
start_paused=None,
stateless=None,
storage_domain=None,
storage_error_resume_behaviour=None,
time_zone=None,
tpm_enabled=None,
tunnel_migration=None,
type=None,
usb=None,
virtio_scsi=None,
virtio_scsi_multi_queues=None,
virtio_scsi_multi_queues_enabled=None,
):
super(VmBase, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.auto_pinning_policy = auto_pinning_policy
self.bios = bios
self.cluster = cluster
self.console = console
self.cpu = cpu
self.cpu_pinning_policy = cpu_pinning_policy
self.cpu_profile = cpu_profile
self.cpu_shares = cpu_shares
self.creation_time = creation_time
self.custom_compatibility_version = custom_compatibility_version
self.custom_cpu_model = custom_cpu_model
self.custom_emulated_machine = custom_emulated_machine
self.custom_properties = custom_properties
self.delete_protected = delete_protected
self.display = display
self.domain = domain
self.high_availability = high_availability
self.initialization = initialization
self.io = io
self.large_icon = large_icon
self.lease = lease
self.memory = memory
self.memory_policy = memory_policy
self.migration = migration
self.migration_downtime = migration_downtime
self.multi_queues_enabled = multi_queues_enabled
self.origin = origin
self.os = os
self.placement_policy = placement_policy
self.quota = quota
self.rng_device = rng_device
self.serial_number = serial_number
self.small_icon = small_icon
self.soundcard_enabled = soundcard_enabled
self.sso = sso
self.start_paused = start_paused
self.stateless = stateless
self.storage_domain = storage_domain
self.storage_error_resume_behaviour = storage_error_resume_behaviour
self.time_zone = time_zone
self.tpm_enabled = tpm_enabled
self.tunnel_migration = tunnel_migration
self.type = type
self.usb = usb
self.virtio_scsi = virtio_scsi
self.virtio_scsi_multi_queues = virtio_scsi_multi_queues
self.virtio_scsi_multi_queues_enabled = virtio_scsi_multi_queues_enabled
@property
def delete_protected(self):
"""
Returns the value of the `delete_protected` property.
| |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""QWidget displaying a 2D image with histograms on its sides.
The :class:`ImageView` implements this widget, and
:class:`ImageViewMainWindow` provides a main window with additional toolbar
and status bar.
Basic usage of :class:`ImageView` is through the following methods:
- :meth:`ImageView.getColormap`, :meth:`ImageView.setColormap` to update the
default colormap to use and update the currently displayed image.
- :meth:`ImageView.setImage` to update the displayed image.
For an example of use, see `imageview.py` in :ref:`sample-code`.
"""
from __future__ import division
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "26/04/2018"
import logging
import numpy
import collections
import silx
from .. import qt
from .. import colors
from . import items, PlotWindow, PlotWidget, actions
from ..colors import Colormap
from ..colors import cursorColorForColormap
from .tools import LimitsToolBar
from .Profile import ProfileToolBar
from ...utils.proxy import docstring
from .tools.RadarView import RadarView
from .utils.axis import SyncAxes
from ..utils import blockSignals
from . import _utils
from .tools.profile import manager
from .tools.profile import rois
_logger = logging.getLogger(__name__)
ProfileSumResult = collections.namedtuple("ProfileResult",
["dataXRange", "dataYRange",
'histoH', 'histoHRange',
'histoV', 'histoVRange',
"xCoords", "xData",
"yCoords", "yData"])
def computeProfileSumOnRange(imageItem, xRange, yRange, cache=None):
"""
Compute a full vertical and horizontal profile on an image item using a
a range in the plot referential.
Optionally takes a previous computed result to be able to skip the
computation.
:rtype: ProfileSumResult
"""
data = imageItem.getValueData(copy=False)
origin = imageItem.getOrigin()
scale = imageItem.getScale()
height, width = data.shape
xMin, xMax = xRange
yMin, yMax = yRange
# Convert plot area limits to image coordinates
# and work in image coordinates (i.e., in pixels)
xMin = int((xMin - origin[0]) / scale[0])
xMax = int((xMax - origin[0]) / scale[0])
yMin = int((yMin - origin[1]) / scale[1])
yMax = int((yMax - origin[1]) / scale[1])
if (xMin >= width or xMax < 0 or
yMin >= height or yMax < 0):
return None
# The image is at least partly in the plot area
# Get the visible bounds in image coords (i.e., in pixels)
subsetXMin = 0 if xMin < 0 else xMin
subsetXMax = (width if xMax >= width else xMax) + 1
subsetYMin = 0 if yMin < 0 else yMin
subsetYMax = (height if yMax >= height else yMax) + 1
if cache is not None:
if ((subsetXMin, subsetXMax) == cache.dataXRange and
(subsetYMin, subsetYMax) == cache.dataYRange):
# The visible area of data is the same
return cache
# Rebuild histograms for visible area
visibleData = data[subsetYMin:subsetYMax,
subsetXMin:subsetXMax]
histoHVisibleData = numpy.nansum(visibleData, axis=0)
histoVVisibleData = numpy.nansum(visibleData, axis=1)
histoHMin = numpy.nanmin(histoHVisibleData)
histoHMax = numpy.nanmax(histoHVisibleData)
histoVMin = numpy.nanmin(histoVVisibleData)
histoVMax = numpy.nanmax(histoVVisibleData)
# Convert to histogram curve and update plots
# Taking into account origin and scale
coords = numpy.arange(2 * histoHVisibleData.size)
xCoords = (coords + 1) // 2 + subsetXMin
xCoords = origin[0] + scale[0] * xCoords
xData = numpy.take(histoHVisibleData, coords // 2)
coords = numpy.arange(2 * histoVVisibleData.size)
yCoords = (coords + 1) // 2 + subsetYMin
yCoords = origin[1] + scale[1] * yCoords
yData = numpy.take(histoVVisibleData, coords // 2)
result = ProfileSumResult(
dataXRange=(subsetXMin, subsetXMax),
dataYRange=(subsetYMin, subsetYMax),
histoH=histoHVisibleData,
histoHRange=(histoHMin, histoHMax),
histoV=histoVVisibleData,
histoVRange=(histoVMin, histoVMax),
xCoords=xCoords,
xData=xData,
yCoords=yCoords,
yData=yData)
return result
class _SideHistogram(PlotWidget):
"""
Widget displaying one of the side profile of the ImageView.
Implement ProfileWindow
"""
sigClose = qt.Signal()
sigMouseMoved = qt.Signal(float, float)
def __init__(self, parent=None, backend=None, direction=qt.Qt.Horizontal):
super(_SideHistogram, self).__init__(parent=parent, backend=backend)
self._direction = direction
self.sigPlotSignal.connect(self._plotEvents)
self._color = "blue"
self.__profile = None
self.__profileSum = None
def _plotEvents(self, eventDict):
"""Callback for horizontal histogram plot events."""
if eventDict['event'] == 'mouseMoved':
self.sigMouseMoved.emit(eventDict['x'], eventDict['y'])
def setProfileColor(self, color):
self._color = color
def setProfileSum(self, result):
self.__profileSum = result
if self.__profile is None:
self.__drawProfileSum()
def prepareWidget(self, roi):
"""Implements `ProfileWindow`"""
pass
def setRoiProfile(self, roi):
"""Implements `ProfileWindow`"""
if roi is None:
return
self._roiColor = colors.rgba(roi.getColor())
def getProfile(self):
"""Implements `ProfileWindow`"""
return self.__profile
def setProfile(self, data):
"""Implements `ProfileWindow`"""
self.__profile = data
if data is None:
self.__drawProfileSum()
else:
self.__drawProfile()
def __drawProfileSum(self):
"""Only draw the profile sum on the plot.
Other elements are removed
"""
profileSum = self.__profileSum
try:
self.removeCurve('profile')
except Exception:
pass
if profileSum is None:
try:
self.removeCurve('profilesum')
except Exception:
pass
return
if self._direction == qt.Qt.Horizontal:
xx, yy = profileSum.xCoords, profileSum.xData
elif self._direction == qt.Qt.Vertical:
xx, yy = profileSum.yData, profileSum.yCoords
else:
assert False
self.addCurve(xx, yy,
xlabel='', ylabel='',
legend="profilesum",
color=self._color,
linestyle='-',
selectable=False,
resetzoom=False)
self.__updateLimits()
def __drawProfile(self):
"""Only draw the profile on the plot.
Other elements are removed
"""
profile = self.__profile
try:
self.removeCurve('profilesum')
except Exception:
pass
if profile is None:
try:
self.removeCurve('profile')
except Exception:
pass
self.setProfileSum(self.__profileSum)
return
if self._direction == qt.Qt.Horizontal:
xx, yy = profile.coords, profile.profile
elif self._direction == qt.Qt.Vertical:
xx, yy = profile.profile, profile.coords
else:
assert False
self.addCurve(xx,
yy,
legend="profile",
color=self._roiColor,
resetzoom=False)
self.__updateLimits()
def __updateLimits(self):
if self.__profile:
data = self.__profile.profile
vMin = numpy.nanmin(data)
vMax = numpy.nanmax(data)
elif self.__profileSum is not None:
if self._direction == qt.Qt.Horizontal:
vMin, vMax = self.__profileSum.histoHRange
elif self._direction == qt.Qt.Vertical:
vMin, vMax = self.__profileSum.histoVRange
else:
assert False
else:
vMin, vMax = 0, 0
# Tune the result using the data margins
margins = self.getDataMargins()
if self._direction == qt.Qt.Horizontal:
_, _, vMin, vMax = _utils.addMarginsToLimits(margins, False, False, 0, 0, vMin, vMax)
elif self._direction == qt.Qt.Vertical:
vMin, vMax, _, _ = _utils.addMarginsToLimits(margins, False, False, vMin, vMax, 0, 0)
else:
assert False
if self._direction == qt.Qt.Horizontal:
dataAxis = self.getYAxis()
elif self._direction == qt.Qt.Vertical:
dataAxis = self.getXAxis()
else:
assert False
with blockSignals(dataAxis):
dataAxis.setLimits(vMin, vMax)
class _CustomProfileManager(manager.ProfileManager):
"""This custom profile manager uses a single predefined profile window
if it is specified. Else the behavior is the same as the default
ProfileManager """
def setProfileWindow(self, profileWindow):
self.__profileWindow = profileWindow
def createProfileWindow(self, plot, roi):
if isinstance(roi, rois.ProfileImageVerticalLineROI):
return self._verticalWidget
if isinstance(roi, rois.ProfileImageHorizontalLineROI):
return self._horizontalWidget
if self.__profileWindow is not None:
return self.__profileWindow
else:
return super(_CustomProfileManager, self).createProfileWindow(plot, roi)
def clearProfileWindow(self, profileWindow):
if profileWindow is self._horizontalWidget:
profileWindow.setProfile(None)
elif profileWindow is self._verticalWidget:
profileWindow.setProfile(None)
elif self.__profileWindow is not None:
self.__profileWindow.setProfile(None)
else:
return super(_CustomProfileManager, self).clearProfileWindow(profileWindow)
def setSideWidgets(self, horizontalWidget, verticalWidget):
self._horizontalWidget = horizontalWidget
self._verticalWidget = verticalWidget
class _ProfileToolBar(ProfileToolBar):
"""Override the profile toolbar to provide our own profile manager"""
def createProfileManager(self, parent, plot):
return _CustomProfileManager(parent, plot)
class ImageView(PlotWindow):
"""Display a single image with horizontal and vertical histograms.
Use :meth:`setImage` to control the displayed image.
This class also provides the :class:`silx.gui.plot.Plot` API.
The :class:`ImageView` inherits from :class:`.PlotWindow` (which provides
the toolbars) and also exposes :class:`.PlotWidget` API for further
plot control (plot title, axes labels, aspect ratio, ...).
:param parent: The parent of this widget or None.
:param backend: The backend to use for the plot (default: matplotlib).
See :class:`.PlotWidget` for the list of supported backend.
:type backend: str or :class:`BackendBase.BackendBase`
"""
HISTOGRAMS_COLOR = 'blue'
"""Color to use for the side histograms."""
HISTOGRAMS_HEIGHT = 200
"""Height in pixels of the side histograms."""
IMAGE_MIN_SIZE = 200
"""Minimum size in pixels of the image area."""
# Qt signals
valueChanged = qt.Signal(float, float, float)
"""Signals that the data value under the cursor has changed.
It provides: row, column, data value.
When the cursor is over an histogram, either row or column is Nan
and the provided data value is the histogram value
(i.e., the sum along the corresponding row/column).
Row and columns are either Nan or integer values.
"""
def __init__(self, parent=None, backend=None):
self._imageLegend = '__ImageView__image' + str(id(self))
| |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 10:08:59 2017
@author: Lennart
This class exists to handle all the writing of the charactor to the pdf.
"""
from Wolke import Wolke
import PdfSerializer
import Definitionen
import Objekte
import Talentbox
from subprocess import check_output
import os
import math
import logging
import tempfile
from Charakter import KampfstilMod
from Hilfsmethoden import Hilfsmethoden, WaffeneigenschaftException
import sys
import traceback
from EventBus import EventBus
from CheatsheetGenerator import CheatsheetGenerator
from CharakterPrintUtility import CharakterPrintUtility
import yaml
from shutil import which
import platform
from PyQt5 import QtWidgets, QtCore, QtGui
import copy
class PdfExporter(object):
def __init__(self):
self.CharakterBogen = None
self.ExtraPage = os.path.join("Data", "ExtraSpells.pdf")
self.PrintRules = True
self.RulesPage = os.path.join("Data", "Regeln.pdf")
self.Energie = ""
self.CheatsheetGenerator = CheatsheetGenerator()
self.MergePerLineCount = 3
def setCharakterbogen(self, charakterbogen):
self.CharakterBogen = charakterbogen
def pdfErstellen(self, filename, printRules, progressCallback):
progressCallback(0)
'''
This entire subblock is responsible for filling all the fields of the
Charakterbogen. It has been broken down into seven subroutines for
testing purposes,
'''
fields = {}
self.pdfErsterBlock(fields)
self.pdfZweiterBlock(fields)
extraVorteile = self.pdfDritterBlock(fields)
self.pdfVierterBlock(fields)
self.pdfFünfterBlock(fields)
(extraUeber, extraTalente) = self.pdfSechsterBlock(fields)
self.pdfSiebterBlock(fields)
self.pdfAchterBlock(fields)
# Plugins die felder filtern lassen
fields = EventBus.applyFilter("pdf_export", fields)
progressCallback(10)
# PDF erstellen - Felder bleiben bearbeitbar
handle, out_file = tempfile.mkstemp()
os.close(handle)
allPages = [out_file]
PdfSerializer.write_pdf(self.CharakterBogen.filePath, fields, out_file, False)
progressCallback(20)
# Extraseiten
extraPageAdded = False
if len(extraVorteile) > 0 or len(extraUeber) > 0 or len(extraTalente):
if not os.path.isfile(self.ExtraPage):
raise Exception()
extraPageAdded = True
pageCount = 0
while len(extraVorteile) > 0 or len(extraUeber) > 0 or len(extraTalente) > 0:
pageCount += 1
fieldsNew = {}
self.createExtra(fieldsNew, extraVorteile, extraUeber, extraTalente)
fieldsNew = EventBus.applyFilter("pdf_export_extrapage", fieldsNew)
handle, out_file = tempfile.mkstemp()
os.close(handle)
allPages.append(out_file)
PdfSerializer.write_pdf(self.ExtraPage, fieldsNew, out_file, False)
progressCallback(20 + min(35, 20 + 3*pageCount))
progressCallback(35)
#Entferne Seite 3, falls keine übernatürlichen Fertigkeiten
if not ('Uebervorteil1' in fields) and \
not ('Ueberfer1NA' in fields) and \
not ('Uebertal1NA' in fields) and not extraPageAdded:
handle, out_file = tempfile.mkstemp()
os.close(handle)
PdfSerializer.shrink(allPages[0], 1, self.CharakterBogen.seitenProfan, out_file)
os.remove(allPages[0])
allPages[0] = out_file
progressCallback(40)
if printRules:
rules, ruleLineCounts = self.CheatsheetGenerator.prepareRules()
startIndex = 0
pageCount = 0
rulesFields = {}
while startIndex != -1:
pageCount += 1
rulesFields["Seite"] = pageCount
str, startIndex = self.CheatsheetGenerator.writeRules(rules, ruleLineCounts, startIndex)
rulesFields["Regeln1"] = str
rulesFields["Regeln2"] = ""
if startIndex != -1:
str, startIndex = self.CheatsheetGenerator.writeRules(rules, ruleLineCounts, startIndex)
rulesFields["Regeln2"] = str
handle, out_file = tempfile.mkstemp()
os.close(handle)
PdfSerializer.write_pdf(self.RulesPage, rulesFields, out_file, False)
allPages.append(out_file)
progressCallback(min(70, 40 + 3*pageCount))
progressCallback(70)
allPages = self.concatImage(allPages)
allPages = EventBus.applyFilter("pdf_concat", allPages)
progressCallback(75)
PdfSerializer.concat(allPages, filename)
progressCallback(90)
for page in allPages:
os.remove(page)
progressCallback(95)
EventBus.doAction("pdf_geschrieben", { "filename" : filename })
progressCallback(100)
#Open PDF with default application:
if Wolke.Settings['PDF-Open']:
os.startfile(filename, 'open')
def pdfErsterBlock(self, fields):
logging.debug("PDF Block 1")
fields['Name'] = Wolke.Char.name
fields['Rasse'] = Wolke.Char.rasse
fields['Kultur'] = Wolke.Char.heimat
fields['Statu'] = Definitionen.Statusse[Wolke.Char.status]
fields['Finanzen'] = Definitionen.Finanzen[Wolke.Char.finanzen]
fields['Kurzb'] = Wolke.Char.kurzbeschreibung
fields['Schip'] = Wolke.Char.schipsMax
if Wolke.Char.finanzenAnzeigen:
fields['Schipm'] = Wolke.Char.schips
# Erste Acht Eigenheiten
for i in range(0, min(8, len(Wolke.Char.eigenheiten))):
fields['Eigen' + str(i+1)] = Wolke.Char.eigenheiten[i]
def pdfZweiterBlock(self, fields):
logging.debug("PDF Block 2")
for key in Definitionen.Attribute:
fields[key] = Wolke.Char.attribute[key].wert
fields[key + '2'] = Wolke.Char.attribute[key].probenwert
fields[key + '3'] = Wolke.Char.attribute[key].probenwert
fields['WundschwelleBasis'] = Wolke.Char.wsBasis
fields['Wundschwelle'] = Wolke.Char.ws
fields['WS'] = Wolke.Char.ws
checked = 'Yes'
if "Unverwüstlich" in Wolke.Char.vorteile:
fields['ModUnverwuestlich'] = checked
fields['MagieresistenzBasis'] = Wolke.Char.mrBasis
fields['Magieresistenz'] = Wolke.Char.mr
if "Willensstark I" in Wolke.Char.vorteile:
fields['ModWillensstark1'] = checked
if "Willensstark II" in Wolke.Char.vorteile:
fields['ModWillensstark2'] = checked
if "Unbeugsamkeit" in Wolke.Char.vorteile:
fields['ModUnbeugsam'] = checked
fields['GeschwindigkeitBasis'] = Wolke.Char.gsBasis
fields['Geschwindigkeit'] = Wolke.Char.gs
if "Flink I" in Wolke.Char.vorteile:
fields['ModFlink1'] = checked
if "Flink II" in Wolke.Char.vorteile:
fields['ModFlink2'] = checked
fields['SchadensbonusBasis'] = Wolke.Char.schadensbonusBasis
fields['Schadensbonus'] = Wolke.Char.schadensbonus
fields['InitiativeBasis'] = Wolke.Char.iniBasis
fields['Initiative'] = Wolke.Char.ini
fields['INIm'] = Wolke.Char.ini
if "Kampfreflexe" in Wolke.Char.vorteile:
fields['ModKampfreflexe'] = checked
if "Gefäß der Sterne" in Wolke.Char.vorteile:
fields['ModGefaess'] = checked
isZauberer = Wolke.Char.aspBasis + Wolke.Char.aspMod > 0
isGeweiht = Wolke.Char.kapBasis + Wolke.Char.kapMod > 0
if isZauberer:
fields['AstralenergieBasis'] = Wolke.Char.aspBasis
fields['Astralenergie'] = Wolke.Char.asp.wert + Wolke.Char.aspBasis + Wolke.Char.aspMod
fields['ModAstralenergie'] = Wolke.Char.asp.wert
if isGeweiht:
fields['KarmaenergieBasis'] = Wolke.Char.kapBasis
fields['Karmaenergie'] = Wolke.Char.kap.wert + Wolke.Char.kapBasis + Wolke.Char.kapMod
fields['ModKarmaenergie'] = Wolke.Char.kap.wert
self.Energie = ""
if isZauberer:
self.Energie = str(Wolke.Char.asp.wert + Wolke.Char.aspBasis + Wolke.Char.aspMod)
if isGeweiht:
self.Energie += " / "
if isGeweiht:
self.Energie += str(Wolke.Char.kap.wert + Wolke.Char.kapBasis + Wolke.Char.kapMod)
fields['EN'] = self.Energie
fields['DHm'] = Wolke.Char.dhStern
fields['GSm'] = Wolke.Char.gsStern
fields['WSm'] = Wolke.Char.wsStern
@staticmethod
def getCellIndex(numElements, maxCells):
# This method maps elements to cell indices. It is used to keep alphabetical order in case of overflow
perCell = numElements // maxCells
extraCounter = numElements % maxCells
cellCounter = 1
cell = 0
cellIndex = [0]*numElements
for i in range(0, numElements):
cellIndex[i] = cell
pc = perCell
if extraCounter > 0:
pc += 1
if cellCounter < pc:
cellCounter += 1
else:
cellCounter = 1
cell += 1
extraCounter -= 1
return cellIndex
def printVorteile(self, fields, vorteileAllgemein, vorteileKampf, vorteileUeber):
# Fill fields
cellIndex = PdfExporter.getCellIndex(len(vorteileAllgemein), self.CharakterBogen.maxVorteile)
for i in range(0, len(vorteileAllgemein)):
field = 'Vorteil' + str(cellIndex[i]+1)
if not field in fields:
fields[field] = vorteileAllgemein[i]
else:
fields[field] += " | " + vorteileAllgemein[i]
cellIndex = PdfExporter.getCellIndex(len(vorteileKampf), self.CharakterBogen.maxKampfVorteile)
for i in range(0, len(vorteileKampf)):
field = 'Kampfvorteil' + str(cellIndex[i]+1)
if not field in fields:
fields[field] = vorteileKampf[i]
else:
fields[field] += " | " + vorteileKampf[i]
ueberLen = min(self.CharakterBogen.maxÜberVorteile * self.MergePerLineCount, len(vorteileUeber))
cellIndex = PdfExporter.getCellIndex(ueberLen, self.CharakterBogen.maxÜberVorteile)
for i in range(0, ueberLen):
field = 'Uebervorteil' + str(cellIndex[i]+1)
if not field in fields:
fields[field] = vorteileUeber[i]
else:
fields[field] += " | " + vorteileUeber[i]
del vorteileUeber[:ueberLen]
def pdfDritterBlock(self, fields):
logging.debug("PDF Block 3")
vorteile = CharakterPrintUtility.getVorteile(Wolke.Char)
(vorteileAllgemein, vorteileKampf, vorteileUeber) = CharakterPrintUtility.groupVorteile(Wolke.Char, vorteile, link = True)
# Move vorteile to the next category if there is overflow
maxVort = self.CharakterBogen.maxVorteile * self.MergePerLineCount
if len(vorteileAllgemein) > maxVort:
vorteileKampf.extend(vorteileAllgemein[maxVort:])
del vorteileAllgemein[maxVort:]
maxVort = self.CharakterBogen.maxKampfVorteile * self.MergePerLineCount
if len(vorteileKampf) > maxVort:
vorteileUeber.extend(vorteileKampf[maxVort:])
del vorteileKampf[maxVort:]
self.printVorteile(fields, vorteileAllgemein, vorteileKampf, vorteileUeber)
# return uebervorteile - they need to go on extra page if any are left
return vorteileUeber
def printFertigkeiten(self, fields, baseStr, fertigkeitenNames):
count = 1
for el in fertigkeitenNames:
if el not in Wolke.Char.fertigkeiten:
continue
fertigkeit = Wolke.Char.fertigkeiten[el]
if baseStr:
base = baseStr + str(count)
fields[base + "NA"] = fertigkeit.name
fields[base + "FA"] = fertigkeit.steigerungsfaktor
fields[base + "AT"] = \
fertigkeit.attribute[0] + '/' + \
fertigkeit.attribute[1] + '/' + \
fertigkeit.attribute[2]
else:
base = el[0:5]
# Fix Umlaute
if el == "Gebräuche":
base = "Gebra"
elif el == "Überleben":
base = "Ueber"
if fertigkeit.basiswertMod == 0:
fields[base + "BA"] = fertigkeit.basiswert
else:
fields[base + "BA"] = str(fertigkeit.basiswert + fertigkeit.basiswertMod) + "*"
fields[base + "FW"] = fertigkeit.wert
fields[base + "TA"] = ", ".join([t.anzeigeName for t in CharakterPrintUtility.getTalente(Wolke.Char, fertigkeit)])
fields[base + "PW"] = fertigkeit.probenwert + fertigkeit.basiswertMod
fields[base + "PWT"] = fertigkeit.probenwertTalent + fertigkeit.basiswertMod
count += 1
fertigkeitenNames.clear()
def pdfVierterBlock(self, fields):
logging.debug("PDF Block 4")
# Freie Fertigkeiten
freieFerts = CharakterPrintUtility.getFreieFertigkeiten(Wolke.Char)
count = 1
for fert in freieFerts:
if ('Frei' + str(count)) in fields:
fields['Frei' + str(count)] += ", " + fert
else:
fields['Frei' + str(count)] = fert
count += 1
if count > self.CharakterBogen.maxFreie:
count = 1
fertigkeiten = CharakterPrintUtility.getFertigkeiten(Wolke.Char)
if self.CharakterBogen.kurzbogenHack:
standardFerts = ["Handgemenge", "Hiebwaffen", "Klingenwaffen", "Stangenwaffen",
"Schusswaffen", "Wurfwaffen", "Athletik", "Heimlichkeit",
"Selbstbeherrschung", "Wahrnehmung", "Autorität",
"Beeinflussung", "Gebräuche", "Derekunde", "Magiekunde",
"Mythenkunde", "Überleben", "Alchemie", "Handwerk",
"Heilkunde", "Verschlagenheit"]
self.printFertigkeiten(fields, None, copy.copy(standardFerts))
self.printFertigkeiten(fields, "Indi", [f for f in fertigkeiten if not f in standardFerts])
else:
self.printFertigkeiten(fields, "Fertigkeit", fertigkeiten)
def pdfFünfterBlock(self, fields):
logging.debug("PDF Block 5")
# Fill three rows of Rüstung
for i in range(0, min(3, len(Wolke.Char.rüstung))):
el = Wolke.Char.rüstung[i]
if not el.name:
base = 'Ruest' + str(i+1)
fields[base + 'NA'] = ""
fields[base + 'RS'] = ""
fields[base + 'BE'] = ""
fields[base + 'WS'] = ""
base += 'RS'
fields[base + 'Bein'] = ""
fields[base + 'lArm'] = ""
fields[base + 'rArm'] = ""
fields[base + 'Bauch'] = ""
fields[base + 'Brust'] = ""
fields[base + 'Kopf'] = ""
else:
base = 'Ruest' + str(i+1)
fields[base + 'NA'] = el.name
fields[base + 'RS'] = el.getRSGesamtInt() + Wolke.Char.rsmod
fields[base + 'BE'] = max(el.be - Wolke.Char.rüstungsgewöhnung, 0)
fields[base + 'WS'] = int(el.getRSGesamtInt() + Wolke.Char.rsmod + Wolke.Char.ws)
base += 'RS'
fields[base + 'Bein'] = el.rs[0]+Wolke.Char.rsmod+Wolke.Char.ws
fields[base + 'lArm'] = el.rs[1]+Wolke.Char.rsmod+Wolke.Char.ws
fields[base + 'rArm'] = el.rs[2]+Wolke.Char.rsmod+Wolke.Char.ws
fields[base + 'Bauch'] = el.rs[3]+Wolke.Char.rsmod+Wolke.Char.ws
fields[base + 'Brust'] = el.rs[4]+Wolke.Char.rsmod+Wolke.Char.ws
fields[base + 'Kopf'] | |
import ast
import functools
import inspect
from abc import ABCMeta, abstractmethod
from typing import Type
from friendly_states.exceptions import IncorrectSummary, InheritedFromState, CannotInferOutputState, \
DuplicateStateNames, DuplicateOutputStates, UnknownOutputState, ReturnedInvalidState, GetStateDidNotReturnState
from .exceptions import StateChangedElsewhere, IncorrectInitialState, MultipleMachineAncestors
from .utils import snake
class StateMeta(ABCMeta):
subclasses = None
name_to_state = None
slug_to_state = None
states = None
direct_transitions = None
is_complete = False
machine = None
def __new__(mcs, name, bases, attrs):
"""
Called when a new class is declared with this metaclass.
In particular, called when subclassing BaseState.
Just keeps track of their machines and their subclasses,
the real work happens in complete()
"""
cls: StateMeta = super().__new__(mcs, name, bases, attrs)
if cls.is_complete:
raise ValueError(
"This machine is already complete, you cannot add more subclasses.",
)
machine_classes = [
ancestor
for ancestor in cls.__mro__
if isinstance(ancestor, StateMeta)
if ancestor.is_machine
]
if not machine_classes:
# This is not part of any machine
return cls
if len(machine_classes) > 1:
raise MultipleMachineAncestors(
"Multiple machine classes found in ancestors of {cls}: {machine_classes}",
cls=cls,
machine_classes=machine_classes,
)
machine: StateMeta = machine_classes[0]
assert issubclass(machine, BaseState)
cls.machine = machine
# This class is a machine root
# It gets fresh collections for its states
if cls is machine:
cls.subclasses = set()
return cls
# Check that abstract classes have been declared correctly
for ancestor in cls.__mro__[1:]:
if ancestor is machine:
break
if machine not in ancestor.__mro__:
# This ancestor is unrelated to state machines
continue
if not ancestor.is_abstract:
raise InheritedFromState(
"{cls} inherits from {ancestor} and both are part of the machine {machine}, "
"but {ancestor} is not abstract. If it should be, mark it with is_abstract = True. "
"You cannot inherit from actual state classes.",
cls=cls,
ancestor=ancestor,
machine=machine,
)
machine.subclasses.add(cls)
return cls
def complete(cls):
"""
Must be called on the machine after all subclasses have been declared.
Replaces the transitions with wrappers that do the state change magic,
sets many of the metadata attributes, and checks validity and the summary.
"""
if not cls.is_machine:
raise ValueError(
"complete() can only be called on state machine roots, i.e. "
"classes marked with is_machine = True.",
)
cls.states = frozenset(
sub for sub in cls.subclasses
if not sub.is_abstract
)
cls.name_to_state = {state.__name__: state for state in cls.states}
if len(cls.states) != len(cls.name_to_state):
raise DuplicateStateNames(
"Some of the states {states} in this machine have the same name.",
states=cls.states,
)
slug_to_state = [(state.slug, state) for state in cls.states]
cls.slug_to_state = dict(slug_to_state)
if len(cls.states) != len(cls.slug_to_state):
raise DuplicateStateNames(
"Some of the states in this machine have the same slug: {slug_to_state}",
slug_to_state=sorted(slug_to_state),
)
for sub in cls.subclasses:
transitions = []
for method_name, func in list(sub.__dict__.items()):
# Find functions with a return annotation like
# -> [OutputState, ...]
if not inspect.isfunction(func):
continue
annotation = func.__annotations__.get("return")
if not annotation:
continue
output_names = extract_state_names(annotation)
if not output_names:
continue
transition = sub._make_transition_wrapper(func, output_names)
transitions.append(transition)
# Replace the function
setattr(sub, method_name, transition)
sub.direct_transitions = frozenset(transitions)
summary = cls.__dict__.get("Summary")
if summary:
cls.check_summary(summary)
cls.is_complete = True
def _make_transition_wrapper(cls, func, output_names):
"""
Returns a function which wraps a transition to replace it.
The wrapper does the state change after calling the original function.
"""
if len(set(output_names)) != len(output_names):
raise DuplicateOutputStates(
"The transition function {func} in the class {cls} "
"declares some output states more than once: {output_names}",
func=func,
cls=cls,
output_names=output_names,
)
try:
output_states = frozenset(
cls.name_to_state[name]
for name in output_names
)
except KeyError as e:
raise UnknownOutputState(
"The transition function {func} in the class {cls} "
"declares an output state {name} which doesn't exist "
"in the state machine. The available states are {states}. "
"Did you forget to inherit from the machine?",
func=func,
cls=cls,
states=cls.states,
name=e.args[0],
) from e
@functools.wraps(func)
def wrapper(self: BaseState, *args, **kwargs):
result: 'Type[BaseState]' = func(self, *args, **kwargs)
if result is None:
# Infer the next state based on the annotation
if len(output_states) > 1:
raise CannotInferOutputState(
"This transition {func} has multiple output states {output_states}, "
"you must return one.",
output_states=sorted(output_states),
func=func,
)
(result,) = output_states
if result not in output_states:
raise ReturnedInvalidState(
"The transition {func} returned {result}, "
"which is not in the declared output states {output_states}",
output_states=sorted(output_states),
func=func,
result=result,
)
current = self._get_and_check_state(
StateChangedElsewhere,
"The state of {obj} has changed to {state} since instantiating {desired}. "
"Did you change the state inside a transition method? Don't."
)
self.set_state(current, result)
wrapper.output_states = output_states
return wrapper
def __repr__(cls):
if cls.machine:
return cls.__name__
return super().__repr__()
def __lt__(cls, other):
if not isinstance(other, StateMeta):
return NotImplemented
return cls.__name__ < other.__name__
@property
def is_machine(cls):
return cls.__dict__.get("is_machine", False)
@property
def is_abstract(cls):
return cls.__dict__.get("is_abstract", False)
@property
def slug(cls):
"""
The state in string form so that it can be stored in databases etc.
If a state is renamed, this should be set explicitly as a class attribute
to the original value to avoid data problems.
"""
return cls.__dict__.get("slug", cls.__name__)
@property
def label(cls):
"""
Display name of state for forms etc.
"""
result = cls.__dict__.get("label")
if result is not None:
return result
else:
return snake(cls.slug).replace("_", " ").title()
@property
def is_state(cls):
return cls in (cls.states or ())
@property
def transitions(cls):
return frozenset().union(*[
getattr(sub, "direct_transitions", ()) or ()
for sub in cls.__mro__
])
@property
def output_states(cls):
"""
Set of states which can be reached directly from this state.
"""
if not cls.is_state:
raise AttributeError("This is not a state class")
return frozenset().union(*[
getattr(func, "output_states", [])
for func in cls.transitions
])
def check_summary(cls, graph):
"""
Checks that the summary graph matches the state classes.
"""
missing_classes = []
wrong_outputs = []
for state_name, annotation in graph.__annotations__.items():
output_names = extract_state_names(annotation)
assert output_names is not None
state = cls.name_to_state.get(state_name)
if state:
actual_output_names = {
out.__name__
for out in state.output_states
}
if set(output_names) != actual_output_names:
wrong_outputs.append((state, output_names, actual_output_names))
else:
missing_classes.append((state_name, output_names))
if not (missing_classes or wrong_outputs):
return
message = "\n"
if missing_classes:
message += "Missing states:"
for state_name, output_names in missing_classes:
message += f"\n\nclass {state_name}({cls.__name__}):"
if output_names:
for output in output_names:
message += f"""
def to_{snake(output)}(self) -> [{output}]:
pass\n"""
else:
message += "\n pass\n\n\n"
if wrong_outputs:
message += "Wrong outputs:\n\n"
for state, output_names, actual_output_names in wrong_outputs:
message += f"Outputs of {state.__name__}:\n"
message += f"According to summary : {', '.join(sorted(output_names))}\n"
message += f"According to actual classes: {', '.join(sorted(actual_output_names))}\n\n"
raise IncorrectSummary(message)
class BaseState(metaclass=StateMeta):
"""
Abstract base class of all states.
To make state machines you will need a concrete implementation
with get_state and set_state, usually AttributeState.
"""
def __init__(self, obj):
if not type(self).is_complete:
raise ValueError(
f"This machine is not complete, call {self.machine.__name__}.complete() "
f"after declaring all states (subclasses).",
)
self.obj = obj
self.__class__ = self._get_and_check_state(
IncorrectInitialState,
"{obj} should be in state {desired} but is actually in state {state}"
)
def _get_and_check_state(self, exception_class, message_format):
state = self.get_state()
if not (isinstance(state, type) and issubclass(state, BaseState)):
raise GetStateDidNotReturnState(
f"get_state is supposed to return a subclass of {BaseState.__name__}, "
"but it returned {returned}",
returned=state,
)
desired = type(self)
if not issubclass(state, desired):
raise exception_class(
message_format,
obj=self.obj,
desired=desired,
state=state,
)
return state
@abstractmethod
def get_state(self) -> 'Type[BaseState]':
pass
@abstractmethod
def set_state(self, previous_state: 'Type[BaseState]', new_state: 'Type[BaseState]'):
pass
def __repr__(self):
return f"{type(self).__name__}(obj={repr(self.obj)})"
class AttributeState(BaseState):
"""
A simple base state class which keeps the state in an attribute of the object.
This is the most common base class for machines.
By default the attribute is named 'state', this can be overridden with the
attr_name attribute on this class.
"""
attr_name = "state"
def get_state(self):
return getattr(self.obj, self.attr_name)
def set_state(self, previous_state, new_state):
setattr(self.obj, self.attr_name, new_state)
class MappingKeyState(BaseState):
"""
An alternative base state class which gets/sets the state via square bracket access,
e.g. obj['state'].
By default the mapping key is the string 'state', this can be overridden with the
key_name attribute on this class.
"""
key_name = "state"
def get_state(self):
return self.obj[self.key_name]
def set_state(self, previous_state, new_state):
self.obj[self.key_name] = new_state
def extract_state_names(annotation):
if not isinstance(annotation, str):
raise ValueError(
"Found non-string annotation. Remember to add:\n\n"
"from __future__ import annotations\n\n"
"at the top of your file."
)
try:
tree = ast.parse(annotation)
except SyntaxError:
return None
if len(tree.body) != 1:
return None
lst = tree.body[0].value
if not | |
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""
Function utilities_excel_query receives attachment information and two additional fields -
list of excel style ranges and comma separated list of names for defined ranges. It then
gets the attachment and gets the data from it.
It uses Openpyxl library (http://openpyxl.readthedocs.io/en/stable/).
Logic for extracting data and parsing inputs is in the WorksheetData class.
"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
import openpyxl
import re # to extract ranges from user input
import io # to pass attachment to openpyxl
import datetime # to deal with sheet's date fields
import json
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'utilities_excel_query"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_utilities", {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_utilities", {})
@function("utilities_excel_query")
def _utilities_excel_query_function(self, event, *args, **kwargs):
"""Function: Extract ranges of data or named ranges specified by the user from an excel document."""
try:
# Get the function parameters:
# Attachment information
yield StatusMessage("Starting the function")
attachment_id = kwargs.get("attachment_id", None) # number
incident_id = kwargs.get("incident_id", None) # number
task_id = kwargs.get("task_id", None) # number
# What data to get
excel_ranges = kwargs.get("excel_ranges", None) # text
excel_defined_names = kwargs.get("excel_defined_names", None) # text
log = logging.getLogger(__name__)
log.info("task_id: {0}".format(task_id))
log.info("incident_id: {0}".format(incident_id))
log.info("attachment_id: {0}".format(attachment_id))
log.info("excel_ranges: {0}".format(excel_ranges))
log.info("excel_defined_names: {0}".format(excel_defined_names))
if excel_defined_names is None:
excel_defined_names = ""
if excel_ranges is None:
excel_ranges = ""
# PUT YOUR FUNCTION IMPLEMENTATION CODE HERE
yield StatusMessage("Reading the attachment.")
attachment_data = self._get_attachment_binary(task_id, incident_id, attachment_id)
yield StatusMessage("Processing attachment.")
worksheet_data = WorksheetData(io.BytesIO(attachment_data), {
"ranges": WorksheetData.parse_excel_notation(excel_ranges),
"named_ranges": WorksheetData.parse_defined_names_notation(excel_defined_names)
})
worksheet_data.parse() # extracts all the data to result
worksheet_data.serialize()
result = worksheet_data.result
# Produce a FunctionResult with the results
yield StatusMessage("Done.")
yield FunctionResult(result)
except Exception as e:
log.error(str(e))
yield FunctionError("A function error occurred.")
def _get_attachment_binary(self, task_id, incident_id, attachment_id):
"""
Gets the attachment with the rest client as binary.
:param task_id: Number
task id provided by user
:param incident_id: Number
incident id provided by user
:param attachment_id: Number
attachment id provided by user
:return: Binary object
"""
if incident_id is None and task_id is None:
raise FunctionError("Either incident id or the task id has to be specified.")
if attachment_id is None:
raise FunctionError("The attachment id has to be specified.")
if task_id:
data_uri = "/tasks/{}/attachments/{}/contents".format(task_id, attachment_id)
else:
data_uri = "/incidents/{}/attachments/{}/contents".format(incident_id, attachment_id)
client = self.rest_client()
data = client.get_content(data_uri) # type: object
return data
class WorksheetData(object):
"""
Facade pattern with openpyxl library that will parse the given worksheet into
the json of requested content.
"""
# This reg exp captures the form "Name"!A1:B2 or 'Name'!A1
# Has 4 capturing groups - name of the sheet, top_left and bottom_right (if range given None if not), and cell
EXCEL_RANGE_REG_EXP = r"(?:(?:\"|\')([\w\ ,;.\"\']+)(?:\"|\')!" \
r"(?:(?:([a-zA-Z]+[\d]+)[:]([a-zA-Z]+[\d]+))|([a-zA-Z]+[\d]+)))+"
# These constant adjust the return JSON namings
SHEETS_TITLES = "titles"
PARSED_SHEETS = "sheets"
PARSED_SHEETS_LIST = "parsed_sheets"
NAMED_RANGES = "named_ranges"
def __init__(self, path, opts={}):
"""
:param path : String
an absolute path to the file
:param opts: Dict
contains options with the following options:
named_ranges: List of String names of the defined_names
ranges: List of Objects that have name, top-left, and bottom-right
coordinates of the range to be grabbed.
"""
super(WorksheetData, self).__init__()
self._file_path = path
try:
self.wb = openpyxl.load_workbook(self._file_path, read_only=True)
except IOError as e:
log = logging.getLogger(__name__)
log.error(str(e))
raise FunctionError("Error opening the provided file.")
# options
self.opts = opts
# the eventual return value
self.result = {}
@staticmethod
def serializer(obj):
"""
when JSON passes a type it cannot serialize, check if it's one of the
types that can come from a worksheet and serialize it.
:return: String
serialized field
"""
if isinstance(obj, datetime.datetime):
# the standard of Resilient is in microseconds
return (obj - datetime.datetime(1970, 1, 1)).total_seconds()*1000
return str(obj)
@staticmethod
def parse_excel_notation(ranges):
"""
Takes in a string that has comma separated excel notation ranges
:param ranges: String
string
:return: List[object]
List of objects of a form {"name": "", "top_left": "", "bottom_right": ""}
"""
if ranges is None:
ranges = ""
result = []
range_matches = re.finditer(WorksheetData.EXCEL_RANGE_REG_EXP, ranges)
for match in range_matches:
name = match.group(1)
# check if 2 coordinates were provided, or 1
if match.group(2):
top_left = match.group(2)
bottom_right = match.group(3)
# if the top left isn't provided, then a single coordinate in group 4 is given
if not match.group(2):
top_left = bottom_right = match.group(4)
result.append({
"name": name,
"top_left":top_left,
"bottom_right": bottom_right
})
return result
@staticmethod
def parse_defined_names_notation(defined_names):
"""
:param defined_names: Is a list that gets passed to function, with comma separated
names of defined ranges to be extracted
:return: List[String]
the list of split names
"""
if defined_names is None:
defined_names = ""
split_names = [x.strip() for x in defined_names.split(",") if len(x.strip()) > 0]
return split_names
def parse(self):
"""
Goes through the options and fills our self.result accordingly.
"""
self.result = {self.SHEETS_TITLES: self.wb.sheetnames}
# check if "named_ranges" is in the opts, and is not falsy
if "named_ranges" in self.opts and self.opts["named_ranges"]:
self.parse_named_ranges(self.opts["named_ranges"])
if "ranges" in self.opts and self.opts["ranges"]:
self.parse_sheet_ranges(self.opts["ranges"])
self.add_keys()
def serialize(self):
self.result = json.loads(json.dumps(self.result, default=self.serializer))
def parse_named_ranges(self, named_ranges):
"""
Gets a list or a string of named ranges from options
and calls parse_named_range for each of them
"""
self.result[self.NAMED_RANGES] = {}
# check if a list of named ranges is requested or a single named range
if isinstance(named_ranges, list):
for name in named_ranges:
self.parse_named_range(name)
elif isinstance(named_ranges, str):
self.parse_named_range(named_ranges)
elif isinstance(named_ranges, bool) and named_ranges:
for range in self.wb.defined_names.definedName: # defined_names is a list of DefinedName objects
self.parse_named_range(range.name)
def parse_named_range(self, name):
"""
:param name: String
name of the named range to add to the return.
It contains multiple ranges itself.
"""
result = {}
# check if the named range is really in the workbook
if name not in self.wb.defined_names:
raise FunctionError("The defined range {0} doesn't exist".format(name))
ranges = self.wb.defined_names[name] # get the data of the named range
destinations = ranges.destinations # returns a generator of tuples (ws title, range)
for sheet_name, rng in destinations:
if sheet_name not in result:
result[sheet_name] = {}
# worksheet
ws = self.wb[sheet_name]
try:
result[sheet_name][rng] = ([[cell.value for cell in row] for row in ws[rng]])
except ValueError as e:
log = logging.getLogger(__name__)
log.error(str(e))
raise FunctionError("Requested range {0} is not correct.".format(rng.coord))
self.result[self.NAMED_RANGES][name] = result
def parse_sheet_ranges(self, ranges):
"""
Gets the list of ranges provided by user from the options and processes it.
:param ranges: List[object] - list of range objects
"""
if self.PARSED_SHEETS not in self.result:
self.result[self.PARSED_SHEETS] = {}
# rng is for range, since range is a builtin name
for rng in ranges:
self.parse_sheet_range(rng)
def parse_sheet_range(self, range):
"""
Parses a particular range - extract the data from the sheet.`
:param range: object
stores information about the range - sheet, top_left, bottom_right
"""
# make sure the worksheet actually exists
try:
ws = self.wb[range["name"]]
except KeyError as e:
log = logging.getLogger(__name__)
log.error(str(e))
raise FunctionError("The sheet {} provided by user doesn't exist".format(range["name"]))
# additional thing to do for read only sheets to make sure only necessary data is read
ws.calculate_dimension(force=True)
try:
data = ws[range["top_left"]:range["bottom_right"]]
except ValueError as e:
log = logging.getLogger(__name__)
log.error(str(e))
raise FunctionError("The range coordinates {0},{1} provided by user are incorrect".
format(range["top_left"], range["bottom_right"]))
if range["name"] not in self.result[self.PARSED_SHEETS]:
self.result[self.PARSED_SHEETS][range["name"]] = {}
result = [[cell.value for cell in row] for row in data]
range_name = "{0}:{1}".format(range["top_left"], range["bottom_right"])
self.result[self.PARSED_SHEETS][range["name"]][range_name] = result
def add_keys(self):
"""
Goes through self.result and adds _keys list to every dictionary as
a workaround for dictionary iteration in the post processing.
So adds _keys for all the sheets, and all the ranges in "sheets"
and keys for all named_ranges, their sheets, and their ranges in "defined_names"
"""
if self.PARSED_SHEETS in self.result:
self.result[self.PARSED_SHEETS]["_keys"] = \
[sheet for sheet in self.result[self.PARSED_SHEETS].keys() if sheet != "_keys"]
for sheet_name, sheet in self.result[self.PARSED_SHEETS].items():
# _keys is not a sheet so don't iterate over it, would need it even if the top line was moved down
# | |
<reponame>Honestpuck/python-jamf
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#pylint: disable=no-member, missing-class-docstring, signature-differs, missing-function-docstring
"""
records
A class for each type of record/object in Jamf
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'Copyright (c) 2020 <NAME>'
__license__ = 'MIT'
__date__ = '2020-09-21'
__version__ = "0.3.6"
import json
import re
import sys
from os import path, _exit
from pprint import pprint
#pylint: disable=relative-beyond-top-level
from . import convert
from .api import API
__all__ = (
'AdvancedComputerSearches',
'AdvancedMobileDeviceSearches',
'AdvancedUserSearches',
'Buildings',
'BYOProfiles',
'Categories',
'Classes',
'ComputerConfigurations',
'ComputerExtensionAttributes',
'ComputerGroups',
'ComputerReports',
'Computers',
'Departments',
'DirectoryBindings', # produces an error when getting by name
'DiskEncryptionConfigurations',
'DistributionPoints',
'DockItems',
'Ebooks',
'Ibeacons',
'JSONWebTokenConfigurations',
'LDAPServers',
'LicensedSoftware',
'MacApplications',
'ManagedPreferenceProfiles',
'MobileDeviceApplications',
'MobileDeviceCommands',
'MobileDeviceConfigurationProfiles',
'MobileDeviceEnrollmentProfiles',
'MobileDeviceExtensionAttributes',
'MobileDeviceInvitations',
'MobileDeviceProvisioningProfiles',
'MobileDevices',
'NetbootServers',
'NetworkSegments',
'OSXConfigurationProfiles', # produces an error when getting by name
'Packages',
'PatchExternalSources',
'PatchInternalSources',
'PatchPolicies',
'PatchSoftwareTitles',
'Peripherals',
'PeripheralTypes',
'Policies',
'Printers',
'RemovableMACAddresses',
'RestrictedSoftware',
'Scripts',
'Sites',
'SoftwareUpdateServers',
'UserExtensionAttributes',
'UserGroups',
'Users',
'VPPAccounts',
'VPPAssignments',
'VPPInvitations',
'WebHooks',
# Add all non-Jamf Record classes to valid_records below
'JamfError')
def valid_records():
valid = tuple(x for x in __all__ if not x in [
# Add all non-Jamf Record classes here
'JamfError'
])
return valid
#pylint: disable=eval-used
def class_name(name, case_sensitive=True):
if case_sensitive and name in valid_records():
return eval(name)
for temp in valid_records():
if name.lower() == temp.lower():
return eval(temp)
raise JamfError(f"{name} is not a valid record.")
#pylint: disable=super-init-not-called
class JamfError(Exception):
def __init__(self, message):
print(f"jctl: error: {message}", file=sys.stderr)
_exit(1)
class Record():
"""
A class for an object or list of objects on Jamf Pro
NOTE: For reasons known only to itself Jamf uses 'wordstogether' for the
endpoint in the URL but 'underscore_between' for the XML tags in some
endpoints and there are cases where the endpoint and object tag are more
different than that.
This means we need to know 3 strings for each object type, the endpoint,
the top of the list, and the top of the object. We pass them in to __new__
as a dict.
Just in case that's not confusing enough the id tag is not always 'id'.
"""
_swagger = json.load(open(path.dirname(__file__)+'/records.json', 'r'))
_broken_api = [
'/directorybindings/name/{name}',
'/osxconfigurationprofiles/name/{name}',
]
def __new__(cls, args, kwargs):
"""
See the class docstring for an explanation of the parameters
"""
rec = super().__new__(cls)
# The treasure chest
rec._data = None
rec.session = API()
rec._index = -1
# The endpoint url, e.g. "Policies" class becomes "policies" endpoint
if not hasattr(cls, '_swagger_path_name'):
rec._swagger_path_name = cls.__name__.lower()
# Get the definition name, which almost always is the plural name
# exceptions: LicensedSoftware
if not hasattr(cls, '_swagger_def_name'):
rec._swagger_def_name = rec.get_schema(rec._swagger_path_name)
# If there's an xml entry, use it for the definition name
temp2 = cls._swagger['definitions'][rec._swagger_def_name]
if ('xml' in temp2 and 'name' in temp2['xml']):
rec._swagger_def_name = temp2['xml']['name']
if not hasattr(cls, '_id_text'):
rec._id_text = "id"
if not hasattr(cls, '_id_text2'):
rec._id_text2 = "id"
if not hasattr(cls, '_swagger_plural_key'):
# Get the schema, which almost always is the singular name
temp1 = rec._swagger_path_name+'/'+rec._id_text+"/{"+rec._id_text2+"}"
rec._swagger_plural_key = rec.get_schema(temp1)
#getPlural and below
if not hasattr(cls, '_swagger_singular_key'):
rec._swagger_singular_key = rec._swagger_plural_key
if not hasattr(cls, '_list_to_dict_key'):
rec._list_to_dict_key = 'id'
return rec
def __init__(self, query='', python_data=None, json_file=None, json_data=None):
if json_data:
python_data = json.load(json_data)
self.post(python_data)
elif json_file is not None:
if path.exists(json_file):
python_data = json.load(open(json_file, 'r'))
self.post(python_data)
else:
raise JamfError(f"File does not exist: {json_file}.")
elif python_data is not None:
self.post(python_data)
else:
self.get(query)
def get(self, record=''):
if record == '':
self.getPlural()
else:
self.getSingle(record)
def getPlural(self):
lst = self.session.get(self._swagger_path_name)
if self._swagger_def_name in lst:
lst2 = lst[self._swagger_def_name]
if not lst2 or 'size' in lst2 and lst2['size'] == '0':
return {}
if self._swagger_plural_key in lst2:
if 'size' in lst2 and lst2['size'] == '1':
self._data = self.list_to_dict([lst2[self._swagger_plural_key]])
else:
self._data = self.list_to_dict(lst2[self._swagger_plural_key])
self.plural = True
else:
raise JamfError(f"Endpoint {self._swagger_path_name} - "
f"{self._swagger_def_name} has no member named "
f"{self._swagger_plural_key} (_swagger_plural_key).")
else:
raise JamfError(f"Endpoint {self._swagger_path_name} has no "
f"member named {self._swagger_def_name}. Check "
f"the swagger definition file for the name of "
f"{self._swagger_path_name} and set the property "
f"_swagger_def_name for class ({self._swagger_path_name}).")
def getSingle(self, record, key_text=None):
if key_text:
self._key = record
self._key_text = key_text
else:
try:
# This wont work if the name is actually a number...
self._key = int(record)
self._key_text = self._id_text
except ValueError:
self._key = record
self._key_text = 'name'
end = f'{self._swagger_path_name}/{self._key_text}/{self._key}'
if self.is_action_valid('get', self._key_text):
results = self.session.get(end)
if self._swagger_singular_key in results:
if results[self._swagger_singular_key]:
self._data = results[self._swagger_singular_key]
else:
self._data = {}
self.plural = False
else:
print("-------------------------------------"
"-------------------------------------\n"
"Data dump\n")
pprint(results)
raise JamfError(f"Endpoint {end} has no member named "
f"{self._swagger_singular_key}"
f"(_swagger_singular_key).")
else:
if self._key_text == "name":
# print(f'Converting {record} to id, hope for the best')
# Warning! Infinite regression if not careful!
self.getSingle(self.convert_name_to_id(record), self._id_text)
else:
raise JamfError(f'{end}[get] is an invalid action')
def put(self, data=None, raw=False):
if not hasattr(self, '_key'):
raise JamfError('Record has no id or name.')
end = f'{self._swagger_path_name}/{self._key_text}/{self._key}'
if not self.is_action_valid('put', self._key_text):
raise JamfError(f'{end} is an invalid endpoint')
# Data
if data:
out = {self._swagger_singular_key: data}
else:
out = {self._swagger_singular_key: self._data}
out = convert.dict_to_xml(out)
return self.session.put(end, out, raw)
def post(self, python_data, raw=False):
if not self._data:
end = f'{self._swagger_path_name}/{self._id_text}/0'
if not self.is_action_valid('post', self._id_text):
raise JamfError(f'{end} is an invalid endpoint')
out = {self._swagger_singular_key: python_data}
out = convert.dict_to_xml(out)
return self.session.post(end, out, raw)
else:
raise JamfError("Can't post a record, use put")
def delete(self, raw=False):
if not self.plural:
if not hasattr(self, '_key'):
raise JamfError('Record has no id or name.')
end = f'{self._swagger_path_name}/{self._key_text}/{self._key}'
if not self.is_action_valid('delete', self._key_text):
raise JamfError(f'{end} is an invalid endpoint')
return self.session.delete(end, raw)
else:
raise JamfError("Can't delete a list of records (too dangerous)")
def list_to_dict(self, lst):
"""
convert list returned by get() into a dict. In most cases it will
be keyed on the ID and only have the name but some record types
call them something different and some record types have more than
name and id. For those it is keyed on ID still but that contains
a further dict with the remaining keys
"""
dct = {}
keys = list(lst[0].keys())
if len(keys) == 2:
for elem in lst:
dct.update({elem[self._list_to_dict_key]: elem[keys[1]]})
else:
for elem in lst:
keys = elem.pop(self._list_to_dict_key)
dct.update({keys: elem})
return dct
def get_schema(self, swagger_path):
temp1 = self._swagger['paths']['/'+swagger_path]['get']
schema = temp1['responses']['200']['schema']['$ref']
if schema.startswith("#/definitions/"):
schema = schema[14:]
return schema
def is_action_valid(self, a, key_text):
p = f'/{self._swagger_path_name}/{key_text}/{{{key_text}}}'
if p in self._broken_api:
return False
return p in self._swagger['paths'] and a in self._swagger['paths'][p]
def convert_name_to_id(self, record_name):
self.getPlural()
try:
return int(self.id(record_name))
except ValueError:
raise JamfError(f"Couldn't convert {record_name} to id")
def data(self):
return self._data
def list(self, regexes=None, exactMatches=None, ids=None, returnIds=False):
results_ = []
if not self._data or not self.plural:
return results_
for recordId, recordName in self._data.items():
append_ = False
if recordName:
# some results are id:name, some are id:{name:name}
if not isinstance(recordName, str) and 'name' in recordName:
recordName = recordName['name']
if ids:
for id_ in ids:
if id_ and recordId == id_:
append_ = True
if regexes:
for rr in regexes:
if not append_ and re.search(rr, recordName):
append_ = True
if exactMatches:
for em in exactMatches:
if not append_ and recordName == em:
append_ = True
if not regexes and not exactMatches and not ids:
append_ = True
if append_:
if returnIds:
results_.append([recordName,recordId])
else:
results_.append(recordName)
return sorted(results_, key=lambda k: (k is None, k == "", k))
def get_path(self, path):
if self._data:
if self.plural:
pass
else:
temp = path.split(',')
placeholder = self._data
for jj in temp:
if placeholder:
if jj in placeholder:
placeholder = placeholder[jj]
else:
return None
else:
return None
return placeholder
def set_path(self, path, value):
if self._data:
if self.plural:
pass
else:
temp = path.split(',')
key = temp.pop()
placeholder = self._data
for jj in temp:
if placeholder:
if jj in placeholder:
placeholder = placeholder[jj]
else:
return False
else:
return False
placeholder[key] = value
return True
def records_by_name(self):
objs = {}
for ii in self._data:
jj = self._data[ii]
if isinstance(jj, str):
objs[jj] = ii
elif 'name' in self._data[ii]:
objs[self._data[ii]['name']] = ii
else:
pprint(self._data)
raise JamfError("Couldn't flip names and id's because"
"name is missing.")
return objs
def id(self, name=None):
if self.plural and name is not None:
objs = self.records_by_name()
return objs[name]
else:
return self._data['id']
def __iter__(self):
if self.plural:
return self
else:
return None
def __next__(self):
if self.plural:
self._index+=1
if not self._data or self._index+1 > len(self._data):
raise StopIteration
return list(self._data.keys())[self._index]
else:
return None
class AdvancedComputerSearches(Record):
def __new__(cls, *args, **kwargs):
| |
(tuple[float, int, array, Variable]): operation parameters
Keyword Args:
wires (Sequence[int]): Subsystems it acts on. If not given, args[-1]
is interpreted as wires.
do_queue (bool): Indicates whether the operation should be
immediately pushed into a :class:`BaseQNode` circuit queue.
This flag is useful if there is some reason to run an Operation
outside of a BaseQNode context.
"""
# pylint: disable=abstract-method
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the operator.
The order of the eigenvalues needs to match the order of
the computational basis vectors.
This is a *class method* that must be defined for all
new diagonal operations, that returns the eigenvalues
of the operator in the computational basis.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.RZ._eigvals(0.5)
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigenvalue representation
"""
raise NotImplementedError
@property
def eigvals(self):
r"""Eigenvalues of an instantiated diagonal operation.
The order of the eigenvalues needs to match the order of
the computational basis vectors.
**Example:**
>>> U = qml.RZ(0.5, wires=1)
>>> U.eigvals
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigvals representation
"""
return super().eigvals
@classmethod
def _matrix(cls, *params):
return np.diag(cls._eigvals(*params))
# =============================================================================
# Base Observable class
# =============================================================================
class Observable(Operator):
"""Base class for observables supported by a device.
:class:`Observable` is used to describe Hermitian quantum observables.
As with :class:`~.Operator`, the following class attributes must be
defined for all observables:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
Args:
params (tuple[float, int, array, Variable]): observable parameters
Keyword Args:
wires (Sequence[int]): subsystems it acts on.
Currently, only one subsystem is supported.
do_queue (bool): Indicates whether the operation should be
immediately pushed into the Operator queue.
"""
# pylint: disable=abstract-method
return_type = None
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the observable.
The order of the eigenvalues needs to match the order of
the computational basis vectors when the observable is
diagonalized using :attr:`diagonalizing_gates`.
This is a *class method* that must be defined for all
new diagonal operations, that returns the eigenvalues
of the operator in the computational basis.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.PauliZ._eigvals()
>>> array([1, -1])
Returns:
array: eigenvalue representation
"""
raise NotImplementedError
@property
def eigvals(self):
r"""Eigenvalues of an instantiated observable.
The order of the eigenvalues needs to match the order of
the computational basis vectors when the observable is
diagonalized using :attr:`diagonalizing_gates`. This is a requirement for using qubit observables in quantum functions.
**Example:**
>>> U = qml.PauliZ(wires=1)
>>> U.eigvals
>>> array([1, -1])
Returns:
array: eigvals representation
"""
return super().eigvals
def __init__(self, *params, wires=None, do_queue=True):
# extract the arguments
if wires is None:
wires = params[-1]
params = params[:-1]
super().__init__(*params, wires=wires, do_queue=do_queue)
def __repr__(self):
"""Constructor-call-like representation."""
temp = super().__repr__()
if self.return_type is None:
return temp
if self.return_type is Probability:
return repr(self.return_type) + "(wires={})".format(self.wires)
return repr(self.return_type) + "(" + temp + ")"
def __matmul__(self, other):
if isinstance(other, Tensor):
return other.__rmatmul__(self)
if isinstance(other, Observable):
return Tensor(self, other)
raise ValueError("Can only perform tensor products between observables.")
def diagonalizing_gates(self):
r"""Returns the list of operations such that they
diagonalize the observable in the computational basis.
Returns:
list(qml.Operation): A list of gates that diagonalize
the observable in the computational basis.
"""
raise NotImplementedError
class Tensor(Observable):
"""Container class representing tensor products of observables.
To create a tensor, simply initiate it like so:
>>> T = Tensor(qml.PauliX(0), qml.Hermitian(A, [1, 2]))
You can also create a tensor from other Tensors:
>>> T = Tensor(T, qml.PauliZ(4))
The ``@`` symbol can be used as a tensor product operation:
>>> T = qml.PauliX(0) @ qml.Hadamard(2)
"""
# pylint: disable=abstract-method
return_type = None
tensor = True
par_domain = None
def __init__(self, *args): # pylint: disable=super-init-not-called
self._eigvals_cache = None
self.obs = []
for o in args:
if isinstance(o, Tensor):
self.obs.extend(o.obs)
elif isinstance(o, Observable):
self.obs.append(o)
else:
raise ValueError("Can only perform tensor products between observables.")
def __str__(self):
"""Print the tensor product and some information."""
return "Tensor product {}: {} params, wires {}".format(
[i.name for i in self.obs], len(self.params), self.wires
)
def __repr__(self):
"""Constructor-call-like representation."""
return "Tensor(" + ", ".join([repr(o) for o in self.obs]) + ")"
@property
def name(self):
"""All constituent observable names making up the tensor product.
Returns:
list[str]: list containing all observable names
"""
return [o.name for o in self.obs]
@property
def num_wires(self):
"""Number of wires the tensor product acts on.
Returns:
int: number of wires
"""
return len(self.wires)
@property
def wires(self):
"""All wires in the system the tensor product acts on.
Returns:
list[int]: wires addressed by the observables in the tensor product
"""
return [w for o in self.obs for w in o.wires]
@property
def params(self):
"""Raw parameters of all constituent observables in the tensor product.
Returns:
list[Any]: flattened list containing all dependent parameters
"""
return [p for sublist in [o.params for o in self.obs] for p in sublist]
@property
def num_params(self):
"""Raw parameters of all constituent observables in the tensor product.
Returns:
list[Any]: flattened list containing all dependent parameters
"""
return len(self.params)
@property
def parameters(self):
"""Evaluated parameter values of all constituent observables in the tensor product.
Returns:
list[list[Any]]: nested list containing the parameters per observable
in the tensor product
"""
return [o.parameters for o in self.obs]
@property
def non_identity_obs(self):
"""Returns the non-identity observables contained in the tensor product.
Returns:
list[:class:`~.Observable`]: list containing the non-identity observables
in the tensor product
"""
return [obs for obs in self.obs if not isinstance(obs, qml.Identity)]
def __matmul__(self, other):
if isinstance(other, Tensor):
self.obs.extend(other.obs)
return self
if isinstance(other, Observable):
self.obs.append(other)
return self
raise ValueError("Can only perform tensor products between observables.")
def __rmatmul__(self, other):
if isinstance(other, Observable):
self.obs[:0] = [other]
return self
raise ValueError("Can only perform tensor products between observables.")
__imatmul__ = __matmul__
@property
def eigvals(self):
"""Return the eigenvalues of the specified tensor product observable.
This method uses pre-stored eigenvalues for standard observables where
possible.
Returns:
array[float]: array containing the eigenvalues of the tensor product
observable
"""
if self._eigvals_cache is not None:
return self._eigvals_cache
standard_observables = {"PauliX", "PauliY", "PauliZ", "Hadamard"}
# observable should be Z^{\otimes n}
self._eigvals_cache = pauli_eigs(len(self.wires))
# TODO: check for edge cases of the sorting, e.g. Tensor(Hermitian(obs, wires=[0, 2]),
# Hermitian(obs, wires=[1, 3, 4])
# Sorting the observables based on wires, so that the order of
# the eigenvalues is correct
obs_sorted = sorted(self.obs, key=lambda x: x.wires)
# check if there are any non-standard observables (such as Identity)
if set(self.name) - standard_observables:
# Tensor product of observables contains a mixture
# of standard and non-standard observables
self._eigvals_cache = np.array([1])
for k, g in itertools.groupby(obs_sorted, lambda x: x.name in standard_observables):
if k:
# Subgroup g contains only standard observables.
self._eigvals_cache = np.kron(self._eigvals_cache, pauli_eigs(len(list(g))))
else:
# Subgroup g contains only non-standard observables.
for ns_ob in g:
# loop through all non-standard observables
self._eigvals_cache = np.kron(self._eigvals_cache, ns_ob.eigvals)
return self._eigvals_cache
def diagonalizing_gates(self):
"""Return the gate set that diagonalizes a circuit according to the
specified tensor observable.
This method uses pre-stored eigenvalues for standard observables where
possible and stores the corresponding eigenvectors from the eigendecomposition.
Returns:
list: list containing the gates diagonalizing the tensor observable
"""
diag_gates = []
for o in self.obs:
diag_gates.extend(o.diagonalizing_gates())
return diag_gates
@property
def matrix(self):
r"""Matrix representation of the tensor operator
in the computational basis.
**Example:**
Note that the returned matrix *only includes explicitly
declared observables* making up the tensor product;
that is, it only returns the matrix for the specified
subsystem it is defined for.
>>> O = qml.PauliZ(0) @ qml.PauliZ(2)
>>> O.matrix
array([[ 1, 0, 0, 0],
[ 0, -1, 0, 0],
[ 0, 0, -1, 0],
[ 0, 0, 0, 1]])
To get the full :math:`2^3\times 2^3` Hermitian matrix
acting on the 3-qubit system, the identity on wire 1
must be explicitly included:
>>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
>>> O.matrix
array([[ 1., 0., 0., 0., 0., 0., 0., 0.],
[ 0., -1., | |
self.assertEqual(x.__self__, B)
# invalid super cases
try:
x = super(B, 'abc')
self.assertUnreachable()
except TypeError:
pass
try:
super(B,A)
self.assertUnreachable()
except TypeError:
pass
class A(object):
def __init__(self, name):
self.__name__ = name
def meth(self):
return self.__name__
classmeth = classmethod(meth)
class B(A): pass
b = B('derived')
self.assertEqual(super(B,b).__thisclass__.__name__, 'B')
self.assertEqual(super(B,b).__self__.__name__, 'derived')
self.assertEqual(super(B,b).__self_class__.__name__, 'B')
self.assertEqual(super(B,b).classmeth(), 'B')
# descriptor supper
class A(object):
def meth(self): return 'A'
class B(A):
def meth(self):
return 'B' + self.__super.meth()
B._B__super = super(B)
b = B()
self.assertEqual(b.meth(), 'BA')
def test_class_method_calls(self):
"""class method should get correct meta class."""
class D(object):
@classmethod
def classmeth(cls): pass
self.assertEqual(D.classmeth.__class__, MethodType)
class MetaType(type): pass
class D(object, metaclass = MetaType):
@classmethod
def classmeth(cls): pass
self.assertEqual(D.classmeth.__class__, MethodType)
def test_cases(self):
def runTest(testCase):
class foo(testCase.subtype):
def __new__(cls, param):
ret = testCase.subtype.__new__(cls, param)
self.assertTrue(ret == testCase.newEq)
self.assertTrue((ret != testCase.newEq) != True)
return ret
def __init__(self, param):
testCase.subtype.__init__(self, param)
self.assertTrue(self == testCase.initEq)
self.assertTrue((self != testCase.initEq) != True)
a = foo(testCase.param)
self.assertTrue((type(a) == foo) == testCase.match)
class TestCase(object):
__slots__ = ['subtype', 'newEq', 'initEq', 'match', 'param']
def __init__(self, subtype, newEq, initEq, match, param):
self.match = match
self.subtype = subtype
self.newEq = newEq
self.initEq = initEq
self.param = param
cases = [TestCase(int, 2, 2, True, 2),
TestCase(list, [], [2,3,4], True, (2,3,4)),
TestCase(deque, deque(), deque((2,3,4)), True, (2,3,4)),
TestCase(set, set(), set((2,3,4)), True, (2,3,4)),
TestCase(frozenset, frozenset((2,3,4)), frozenset((2,3,4)), True, (2,3,4)),
TestCase(tuple, (2,3,4), (2,3,4), True, (2,3,4)),
TestCase(str, 'abc', 'abc', True, 'abc'),
TestCase(float, 2.3, 2.3, True, 2.3),
TestCase(type, type(object), type(object), False, object),
TestCase(long, long(10000000000), long(10000000000), True, long(10000000000)),
#TestCase(complex, complex(2.0, 0), complex(2.0, 0), True, 2.0), # complex is currently a struct w/ no extensibel, we fail here
# TestCase(file, 'abc', True), # ???
]
for case in cases:
runTest(case)
@unittest.skipIf(is_posix or is_netcoreapp, 'missing System.Windows.Forms support')
@skipUnlessIronPython()
def test_call_base_init(self):
"""verify we can call the base init directly"""
import clr
clr.AddReferenceByPartialName('System.Windows.Forms')
from System.Windows.Forms import Form
class MyForm(Form):
def __init__(self, title):
Form.__init__(self)
self.Text = title
a = MyForm('abc')
self.assertEqual(a.Text, 'abc')
#TestCase(bool, True, True), # not an acceptable base type
def test_func_flags(self):
def foo0(): pass
def foo1(*args): pass
def foo2(**args): pass
def foo3(*args, **kwargs): pass
def foo4(a): pass
def foo5(a, *args): pass
def foo6(a, **args): pass
def foo7(a, *args, **kwargs): pass
def foo8(a,b,c,d,e,f): pass
def foo9(a,b): pass
self.assertEqual(foo0.__code__.co_flags & 12, 0)
self.assertEqual(foo1.__code__.co_flags & 12, 4)
self.assertEqual(foo2.__code__.co_flags & 12, 8)
self.assertEqual(foo3.__code__.co_flags & 12, 12)
self.assertEqual(foo4.__code__.co_flags & 12, 0)
self.assertEqual(foo5.__code__.co_flags & 12, 4)
self.assertEqual(foo6.__code__.co_flags & 12, 8)
self.assertEqual(foo7.__code__.co_flags & 12, 12)
self.assertEqual(foo8.__code__.co_flags & 12, 0)
self.assertEqual(foo9.__code__.co_flags & 12, 0)
self.assertEqual(foo0.__code__.co_argcount, 0)
self.assertEqual(foo1.__code__.co_argcount, 0)
self.assertEqual(foo2.__code__.co_argcount, 0)
self.assertEqual(foo3.__code__.co_argcount, 0)
self.assertEqual(foo4.__code__.co_argcount, 1)
self.assertEqual(foo5.__code__.co_argcount, 1)
self.assertEqual(foo6.__code__.co_argcount, 1)
self.assertEqual(foo7.__code__.co_argcount, 1)
self.assertEqual(foo8.__code__.co_argcount, 6)
self.assertEqual(foo9.__code__.co_argcount, 2)
def test_big_calls(self):
# check various function call sizes and boundaries
sizes = [3, 4, 5, 7, 8, 9, 13, 15, 16, 17, 23, 24, 25, 31, 32, 33, 47, 48, 49, 63, 64, 65, 127, 128, 129, 254, 255, 256, 257, 258, 511, 512, 513]
# mono has a limitation of < 1023
if not is_mono:
sizes.extend([1023, 1024, 1025, 2047, 2048, 2049])
for size in sizes:
d = {}
# w/o defaults
if size <= 255 or is_cli:
exec('def f(' + ','.join(['a' + str(i) for i in range(size)]) + '): return ' + ','.join(['a' + str(i) for i in range(size)]), d)
else:
with self.assertRaises(SyntaxError):
exec('def f(' + ','.join(['a' + str(i) for i in range(size)]) + '): return ' + ','.join(['a' + str(i) for i in range(size)]), d)
continue
# w/ defaults
exec('def g(' + ','.join(['a' + str(i) + '=' + str(i) for i in range(size)]) + '): return ' + ','.join(['a' + str(i) for i in range(size)]), d)
if size <= 255 or is_cli:
# CPython allows function definitions > 255, but not calls w/ > 255 params.
exec('a = f(' + ', '.join([str(x) for x in range(size)]) + ')', d)
self.assertEqual(d["a"], tuple(range(size)))
exec('a = g()', d)
self.assertEqual(d["a"], tuple(range(size)))
exec('a = g(' + ', '.join([str(x) for x in range(size)]) + ')', d)
self.assertEqual(d["a"], tuple(range(size)))
exec('a = f(*(' + ', '.join([str(x) for x in range(size)]) + '))', d)
self.assertEqual(d["a"], tuple(range(size)))
def test_compile(self):
x = compile("print(2/3)", "<string>", "exec", 8192)
if is_cli:
self.assertEqual(x.co_flags & 8192, 0)
else:
self.assertEqual(x.co_flags & 8192, 8192)
x = compile("2/3", "<string>", "eval", 8192)
self.assertEqual(eval(x), 2.0 / 3.0)
names = [ "", ".", "1", "\n", " ", "@", "%^",
"a", "A", "Abc", "aBC", "filename.py",
"longlonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglong",
"""
stuff
more stuff
last stuff
"""
]
for name in names:
self.assertEqual(compile("print(2/3)", name, "exec", 8192).co_filename,
name)
def test_filename(self):
c = compile("x = 2", "test", "exec")
self.assertEqual(c.co_filename, 'test')
def test_name(self):
def f(): pass
f.__name__ = 'g'
self.assertEqual(f.__name__, 'g')
if is_cli:
self.assertTrue(repr(f).startswith('<function g'))
else:
self.assertTrue(repr(f).startswith('<function FunctionTest.test_name.<locals>.f'))
f.__qualname__ = 'x'
self.assertEqual(f.__qualname__, 'x')
if is_cli:
self.assertTrue(repr(f).startswith('<function g'))
else:
self.assertTrue(repr(f).startswith('<function x'))
def test_argcount(self):
def foo0(): pass
def foo1(*args): pass
def foo2(**args): pass
def foo3(*args, **kwargs): pass
def foo4(a): pass
def foo5(a, *args): pass
def foo6(a, **args): pass
def foo7(a, *args, **kwargs): pass
def foo8(a,b,c,d,e,f): pass
def foo9(a,b): pass
self.assertEqual(foo0.__code__.co_argcount, 0)
self.assertEqual(foo1.__code__.co_argcount, 0)
self.assertEqual(foo2.__code__.co_argcount, 0)
self.assertEqual(foo3.__code__.co_argcount, 0)
self.assertEqual(foo4.__code__.co_argcount, 1)
self.assertEqual(foo5.__code__.co_argcount, 1)
self.assertEqual(foo6.__code__.co_argcount, 1)
self.assertEqual(foo7.__code__.co_argcount, 1)
self.assertEqual(foo8.__code__.co_argcount, 6)
self.assertEqual(foo9.__code__.co_argcount, 2)
def test_defaults(self):
defaults = [None, object, int, [], 3.14, [3.14], (None,), "a string"]
for default in defaults:
def helperFunc(): pass
self.assertEqual(helperFunc.__defaults__, None)
self.assertEqual(helperFunc.__defaults__, None)
def helperFunc1(a): pass
self.assertEqual(helperFunc1.__defaults__, None)
self.assertEqual(helperFunc1.__defaults__, None)
def helperFunc2(a=default): pass
self.assertEqual(helperFunc2.__defaults__, (default,))
helperFunc2(a=7)
self.assertEqual(helperFunc2.__defaults__, (default,))
def helperFunc3(a, b=default, c=[42]): c.append(b)
self.assertEqual(helperFunc3.__defaults__, (default, [42]))
helperFunc3("stuff")
self.assertEqual(helperFunc3.__defaults__, (default, [42, default]))
def test_splat_defaults(self):
def g(a, b, x=None):
return a, b, x
def f(x, *args):
return g(x, *args)
self.assertEqual(f(1, *(2,)), (1,2,None))
def test_argument_eval_order(self):
"""Check order of evaluation of function arguments"""
x = [1]
def noop(a, b, c):
pass
noop(x.append(2), x.append(3), x.append(4))
self.assertEqual(x, [1,2,3,4])
def test_method_attr_access(self):
class foo(object):
def f(self): pass
abc = 3
self.assertEqual(MethodType(foo, 'abc').abc, 3)
#TODO: @skip("interpreted") # we don't have FuncEnv's in interpret modes so this always returns None
def test_function_closure_negative(self):
def f(): pass
for assignment_val in [None, 1, "a string"]:
with self.assertRaises(AttributeError):
f.__closure__ = assignment_val
def test_paramless_function_call_error(self):
def f(): pass
try:
f(*(1, ))
self.assertUnreachable()
except TypeError: pass
try:
f(**{'abc':'def'})
self.assertUnreachable()
except TypeError: pass
def test_function_closure(self):
def f(): pass
self.assertEqual(f.__closure__, None)
def f():
def g(): pass
return g
self.assertEqual(f().__closure__, None)
def f():
x = 4
def g(): return x
return g
self.assertEqual(sorted([x.cell_contents for x in f().__closure__]), [4])
def f():
x = 4
def g():
y = 5
def h(): return x,y
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().__closure__]), [4, 5])
# don't use z
def f():
x = 4
def g():
y = 5
z = 7
def h(): return x,y
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().__closure__]), [4, 5])
def f():
x = 4
def g():
y = 5
z = 7
def h(): return x,y,z
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().__closure__]), [4, 5, 7])
def f():
x = 4
a = 9
def g():
y = 5
z = 7
def h(): return x,y
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().__closure__]), [4, 5])
# closure cells are not recreated
callRes = f()
a = sorted([id(x) for x in callRes.__closure__])
b = sorted([id(x) for x in callRes.__closure__])
self.assertEqual(a, b)
def f():
x = 4
a = 9
def g():
y = 5
z = 7
def h(): return x,y,a,z
return h
return g()
self.assertEqual(sorted([x.cell_contents for x in f().__closure__]), [4, 5, 7, 9])
self.assertRaises(TypeError, hash, f().__closure__[0])
def f():
x = 5
def g():
return x
return g
def h():
x = 5
def g():
return x
return g
def j():
x = 6
def g():
return x
return g
self.assertEqual(f().__closure__[0], h().__closure__[0])
self.assertTrue(f().__closure__[0] != j().__closure__[0])
# <cell at 45: int object at 44>
self.assertTrue(repr(f().__closure__[0]).startswith('<cell at '))
self.assertTrue(repr(f().__closure__[0]).find(': int object at ') != -1)
def test_func_code(self):
def foo(): pass
def assign(): foo.__code__ = None
self.assertRaises(TypeError, assign)
def def_func_doc(self):
foo.func_doc = 'abc'
self.assertEqual(foo.__doc__, 'abc')
foo.__doc__ = 'def'
self.assertEqual(foo.func_doc, 'def')
foo.func_doc = None
self.assertEqual(foo.__doc__, None)
self.assertEqual(foo.func_doc, None)
def test_func_defaults(self):
def f(a, b): return (a, b)
f.__defaults__ = (1,2)
self.assertEqual(f(), (1,2))
f.__defaults__ = (1,2,3,4)
self.assertEqual(f(), (3,4))
f.__defaults__ = None
self.assertRaises(TypeError, f)
f.__defaults__ = (1,2)
self.assertEqual(f.__defaults__, (1,2))
del f.__defaults__
| |
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'kbd'
# something user would type
class Var(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'var'
# variable
class Cite(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'cite'
# citation
class Abbr(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'abbr'
# abbreviation
class Q(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'cite': attribCite_319597539676272225,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'cite': attribCite_319597539676272225,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'q'
# inlined quote
class Sub(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'sub'
# subscript
class Sup(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'sup'
# superscript
# fixed pitch font
class I(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'i'
# italic font
class B(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'b'
# bold font
class Small(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'onkeypress': attribOnkeypress_532917457362969849,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'small'
# smaller font
# ==================== Object ======================================
#
# object is used to embed objects as part of HTML pages.
# param elements should precede other content. Parameters
# can also be expressed as attribute/value pairs on the
# object element itself when brevity is desired.
#
class Object(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'classid': attribClassid_1987276650902832004,
u'usemap': attribUsemap_209539994327519769,
u'onmousedown': attribOnmousedown_312304592206311721,
u'codetype': attribCodetype_1518623320855463449,
u'border': attribBorder_4105672098752522596,
u'id': attribId_4002951160133423716,
u'style': attribStyle_733285237156411536,
u'title': attribTitle_1178737426446382009,
u'hspace': attribHspace_511202655064171876,
u'archive': attribArchive_809163088491035041,
u'width': attribWidth_936277652245334569,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseup': attribOnmouseup_162556595998286400,
u'type': attribType_2839642281990897124,
u'onclick': attribOnclick_1389815037327772224,
u'onmouseout': attribOnmouseout_55467262469652544,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'codebase': attribCodebase_2013075475851788100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'height': attribHeight_3964235387625190441,
u'data': attribData_8570479821456996,
u'class': attribClass_1166814720137472289,
u'lang': attribLang_267608473188383376,
u'name': attribName_1939937075622105121,
u'standby': attribStandby_1257958614369601764,
u'align': attribAlign_242187980190960400,
u'vspace': attribVspace_3197471081211222544,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'declare': attribDeclare_2910115601546336836,
u'dir': attribDir_4297072167429554704,
u'tabindex': attribTabindex_1133897031401996169,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'classid': attribClassid_1987276650902832004,
'usemap': attribUsemap_209539994327519769,
'onmousedown': attribOnmousedown_312304592206311721,
'codetype': attribCodetype_1518623320855463449,
'border': attribBorder_4105672098752522596,
'id': attribId_4002951160133423716,
'style': attribStyle_733285237156411536,
'title': attribTitle_1178737426446382009,
'hspace': attribHspace_511202655064171876,
'archive': attribArchive_809163088491035041,
'width': attribWidth_936277652245334569,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseup': attribOnmouseup_162556595998286400,
'type': attribType_2839642281990897124,
'xml_lang': attribXml_lang_1645670971257252241,
'onclick': attribOnclick_1389815037327772224,
'onmouseout': attribOnmouseout_55467262469652544,
'onkeypress': attribOnkeypress_532917457362969849,
'onkeydown': attribOnkeydown_1257884844152169025,
'codebase': attribCodebase_2013075475851788100,
'class_': attribClass_1166814720137472289,
'onmouseover': attribOnmouseover_741809317326693841,
'height': attribHeight_3964235387625190441, | |
= []
shape = self._get_attr(tfnode, 'shape')
if shape is None:
size = [1, 224, 224, 3]
else:
for dimen in shape.dim:
size.append(dimen.size)
if size[0] < 0:
shape = [1]
else:
shape = [size[0]]
for i in range(1, len(size)):
shape.append(size[i])
if self.start_length == 0:
self.start_length = len(shape)
nnef_node = node.External(shape=shape,
_uid=self.gen_node_name(tfnode.name),
_output_shape=shape)
inputs = {}
attrs = {'shape': None}
return nnef_node, inputs, attrs
def import_PlaceholderWithDefault(self, tfnode):
self.name_convs[self.gen_node_name(tfnode.name)] = self.gen_node_name(tfnode.input[0])
return None, None, None
def import_Pow(self, tfnode):
tf_inputs = {'x':0, 'y':1}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
nnef_node_y = self.get_node_from_pool(tfnode, tf_inputs['y'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Pow(x=nnef_node_x,
y=nnef_node_y,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_RealDiv(self, tfnode):
tf_inputs = {'x':0, 'y':1}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
nnef_node_y = self.get_node_from_pool(tfnode, tf_inputs['y'])
output_shape = self.define_elementwise_binary_output_shape(nnef_node_x, nnef_node_y)
nnef_node = node.Div(x=nnef_node_x,
y=nnef_node_y,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Relu(self, tfnode):
tf_inputs = {'x': 0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Relu(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape,)
return nnef_node, tf_inputs, attrs
def import_Relu6(self, tfnode):
tf_inputs = {'x': 0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node_relu = node.Relu(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name)+'_relu',
_output_shape=output_shape)
self.node_pool[nnef_node_relu.name] = nnef_node_relu
nnef_node = node.Min(x=nnef_node_relu,
y=6.0,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Reshape(self, tfnode):
tf_inputs = {'input': 0, 'shape': 1}
attrs = {}
nnef_node_input = self.get_node_from_pool(tfnode, tf_inputs['input'])
nnef_node_shape = self.get_node_from_pool(tfnode, tf_inputs['shape'])
self.name_convs[nnef_node_input.name] = self.gen_node_name(tfnode.name)
shape = None
if nnef_node_shape.op == 'variable':
shape = list(nnef_node_shape.get_tensordatafile().get_data().get_array()[0][0])
self.remove_node_from_pool(nnef_node_shape)
elif nnef_node_shape.op == 'shape_of':
shape = nnef_node_shape.output_shape[:]
else:
shape = np.reshape(np.asarray(nnef_node_shape.get_value(), dtype=np.int32), [-1])
self.remove_node_from_pool(nnef_node_shape)
if shape == [-1, 10, 768] and tfnode.name == 'Reshape_4':
shape = [1, 1, 768]
if shape == [-1, 10, 768] and tfnode.name == 'Reshape_4':
shape = [1, 1, 768]
in_shape = nnef_node_input.output_shape[:]
output_shape = []
for i in shape:
output_shape.append(i)
if -1 in output_shape:
in_size = 1
for i in in_shape:
in_size *= i
neg_index = -1
for i in range(len(output_shape)):
if output_shape[i] == -1:
neg_index = i
else:
in_size = in_size/output_shape[i]
output_shape[neg_index] = int(in_size)
output_shape = [int(v) for v in output_shape]
nnef_node = node.Reshape(input=nnef_node_input,
shape=output_shape,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape,
_maintain_format=False)
return nnef_node, tf_inputs, attrs
def import_ResizeArea(self, tfnode):
if self.start_format == None:
self.start_format = 'NHWC'
tf_inputs = {'input':0, 'factor': 1}
attrs = {}
nnef_node_input = self.get_node_from_pool(tfnode, tf_inputs['input'])
nnef_node_factor = self.get_node_from_pool(tfnode, tf_inputs['factor'])
input_shape = nnef_node_input.output_shape[:]
if nnef_node_factor.op == 'variable':
output_size = nnef_node_factor.get_tensordatafile().get_data().get_array()[0][0]
self.remove_node_from_pool(nnef_node_factor)
else:
print(nnef_node_factor.op)
assert False, "Not currently handled"
factor = []
output_shape = [input_shape[0]]
for i in range(len(input_shape[1:-1])):
assert input_shape[i+1]%output_size[i] == 0, "Unable to convert, ResizeArea uses non-integer factors"
factor.append(int(input_shape[i+1]/output_size[i]))
output_shape.append(int(output_size[i]))
output_shape.append(input_shape[-1])
nnef_node = node.AreaDownsample(input=nnef_node_input,
factor=factor,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape,
_data_format='NHWC')
return nnef_node, tf_inputs, attrs
def import_ResizeBilinear(self, tfnode):
if self.start_format == None:
self.start_format = 'NHWC'
tf_inputs = {'input':0, 'factor': 1}
attrs = {}
nnef_node_input = self.get_node_from_pool(tfnode, tf_inputs['input'])
nnef_node_factor = self.get_node_from_pool(tfnode, tf_inputs['factor'])
input_shape = nnef_node_input.output_shape[:]
if nnef_node_factor.op == 'variable':
output_size = nnef_node_factor.get_tensordatafile().get_data().get_array()[0][0]
self.remove_node_from_pool(nnef_node_factor)
else:
print(nnef_node_factor.op)
assert False, "Not currently handled"
factor = []
output_shape = [input_shape[0]]
for i in range(len(input_shape[1:-1])):
assert output_size[i]%input_shape[i+1] == 0, "Unable to convert, ResizeBilinear uses non-integer factors"
factor.append(int(output_size[i]/input_shape[i+1]))
output_shape.append(int(output_size[i]))
output_shape.append(input_shape[-1])
nnef_node = node.MultilinearUpsample(input=nnef_node_input,
factor=factor,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape,
_data_format='NHWC')
return nnef_node, tf_inputs, attrs
def import_ResizeNearestNeighbor(self, tfnode):
if self.start_format == None:
self.start_format = 'NHWC'
tf_inputs = {'input':0, 'factor': 1}
attrs = {}
nnef_node_input = self.get_node_from_pool(tfnode, tf_inputs['input'])
nnef_node_factor = self.get_node_from_pool(tfnode, tf_inputs['factor'])
input_shape = nnef_node_input.output_shape[:]
if nnef_node_factor.op == 'variable':
output_size = nnef_node_factor.get_tensordatafile().get_data().get_array()[0][0]
self.remove_node_from_pool(nnef_node_factor)
else:
print(nnef_node_factor.op)
assert False, "Not currently handled"
factor = []
output_shape = [input_shape[0]]
if input_shape[1] < output_size[0]:
for i in range(len(input_shape[1:-1])):
assert output_size[i]%input_shape[i+1] == 0, "Unable to convert, ResizeNearestNeighbor uses non-integer factors"
factor.append(int(output_size[i]/input_shape[i+1]))
output_shape.append(int(output_size[i]))
output_shape.append(input_shape[-1])
nnef_node = node.NearestUpsample(input=nnef_node_input,
factor=factor,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape,
_data_format='NHWC')
else:
for i in range(len(input_shape[1:-1])):
assert input_shape[i+1]%output_size[i] == 0, "Unable to convert, ResizeNearestNeighbor uses non-integer factors"
factor.append(int(input_shape[i+1]/output_size[i]))
output_shape.append(int(output_size[i]))
output_shape.append(input_shape[-1])
nnef_node = node.NearestDownsample(input=nnef_node_input,
factor=factor,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape,
_data_format='NHWC')
return nnef_node, tf_inputs, attrs
def import_Round(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Round(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Rsqrt(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Rsqrt(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Select(self, tfnode):
tf_inputs = {'condition':0, 'true_value':1, 'false_value':2}
attrs = {}
nnef_node_condition = self.get_node_from_pool(tfnode, tf_inputs['condition'])
nnef_node_true_value = self.get_node_from_pool(tfnode, tf_inputs['true_value'])
nnef_node_false_value = self.get_node_from_pool(tfnode, tf_inputs['false_value'])
output_shape = self.define_elementwise_binary_output_shape(nnef_node_true_value, nnef_node_false_value)
nnef_node = node.Select(condition=nnef_node_condition,
true_value=nnef_node_true_value,
false_value=nnef_node_false_value,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Shape(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.ShapeOf(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Sigmoid(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Sigmoid(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Sign(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Sign(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Slice(self, tfnode):
tf_inputs = {'input': 0, 'begin':1, 'end':2}
attrs = {}
nnef_node_input = self.get_node_from_pool(tfnode, tf_inputs['input'])
nnef_node_begin = self.get_node_from_pool(tfnode, tf_inputs['begin'])
nnef_node_end = self.get_node_from_pool(tfnode, tf_inputs['end'])
if nnef_node_begin.op == 'variable':
begin = nnef_node_begin.get_tensordatafile().get_data().get_array()[0][0]
elif nnef_node_begin.op == 'constant':
begin = np.reshape(np.asarray(nnef_node_begin.parameters['value'], dtype=np.int32), nnef_node_begin.parameters['shape'])
else:
begin = nnef_node_begin.get_value()
if nnef_node_end.op == 'variable':
end = nnef_node_end.get_tensordatafile().get_data().get_array()[0][0]
elif nnef_node_end == 'constant':
end = np.reshape(np.asarray(nnef_node_end.parameters['value'], dtype=np.int32), nnef_node_end.parameters['shape'])
else:
end = nnef_node_end.get_value()
axes = list(range(len(begin)))
output_shape = len(axes)*[0]
for i in range(len(axes)):
output_shape[i] = int(end[i]-begin[i])
nnef_node = node.Slice(input=nnef_node_input,
axes=axes,
begin=list(begin),
end=list(end),
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Softmax(self, tfnode):
tf_inputs = {'x': 0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Softmax(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape,)
return nnef_node, tf_inputs, attrs
def import_Softplus(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Softplus(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Softsign(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Softsign(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Split(self, tfnode):
tf_inputs = {'value':1, 'axis': 0}
attrs = {'num_split': None}
nnef_node_value = self.get_node_from_pool(tfnode, tf_inputs['value'])
nnef_node_axis = self.get_node_from_pool(tfnode, tf_inputs['axis'])
self.remove_node_from_pool(nnef_node_axis)
split_axis = int(nnef_node_axis.parameters['value'][0])
num_split = tfnode.attr['num_split'].i
names = []
if num_split >= 1:
new_name = '['
for i in range(num_split):
if i == 0:
new_name = new_name + self.gen_node_name(tfnode.name) + ', '
names.append(self.gen_node_name(tfnode.name))
else:
new_name = new_name + self.gen_node_name(tfnode.name + ':' + str(i)) + ', '
names.append(self.gen_node_name(tfnode.name + ':' + str(i)))
new_name = new_name[:-2] + ']'
input_shape = nnef_node_value.output_shape[:]
ratio = math.floor(input_shape[split_axis]/num_split)
modu = input_shape[split_axis]%num_split
ratios = []
for i in range(len(names)):
rat_val = ratio
if modu != 0:
rat_val += 1
modu -= 1
ratios.append(int(rat_val))
nnef_node_split = node.Split(value=nnef_node_value,
axis=split_axis,
ratios=ratios,
_uid=new_name,
_output_shape=input_shape)
for i in range(len(names)):
out_shape = input_shape[:]
out_shape[split_axis] = ratios[i]
nnef_node = node.OutputVal(base_node=nnef_node_split,
base_index=i,
_uid=names[i],
_output_shape=out_shape)
self.node_pool[nnef_node.name] = nnef_node
return nnef_node_split, tf_inputs, attrs
def import_Sqrt(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Sqrt(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Square(self, tfnode):
tf_inputs = {'x':0}
attrs = {}
nnef_node_x = self.get_node_from_pool(tfnode, tf_inputs['x'])
output_shape = nnef_node_x.output_shape[:]
nnef_node = node.Sqr(x=nnef_node_x,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape)
return nnef_node, tf_inputs, attrs
def import_Squeeze(self, tfnode):
tf_inputs = {'input': 0}
attrs = {}
nnef_node_input = self.get_node_from_pool(tfnode, tf_inputs['input'])
input_shape = nnef_node_input.output_shape
output_shape = []
if tfnode.attr['squeeze_dims'].list.i:
for i in range(len(input_shape)):
if i not in tfnode.attr['squeeze_dims'].list.i:
output_shape.append(input_shape[i])
else:
for i in input_shape:
if i != 1:
output_shape.append(i)
output_shape = [int(v) for v in output_shape]
nnef_node = node.Reshape(input=nnef_node_input,
shape=output_shape,
_uid=self.gen_node_name(tfnode.name),
_output_shape=output_shape,
_maintain_format=False)
return nnef_node, tf_inputs, attrs
def import_StridedSlice(self, tfnode):
tf_inputs = {'input': 0, 'begin':1, 'end':2, 'strides':3}
attrs = {}
nnef_node_input = self.get_node_from_pool(tfnode, tf_inputs['input'])
nnef_node_begin = self.get_node_from_pool(tfnode, tf_inputs['begin'])
nnef_node_end = self.get_node_from_pool(tfnode, tf_inputs['end'])
nnef_node_strides = self.get_node_from_pool(tfnode, tf_inputs['strides'])
if nnef_node_begin.op == 'variable':
begin = nnef_node_begin.get_tensordatafile().get_data().get_array()[0][0]
elif nnef_node_begin.op == 'constant':
begin = np.reshape(np.asarray(nnef_node_begin.parameters['value'], dtype=np.int32), nnef_node_begin.parameters['shape'])
else:
begin = nnef_node_begin.get_value()
if nnef_node_end.op == 'variable':
end = nnef_node_end.get_tensordatafile().get_data().get_array()[0][0]
elif nnef_node_end.op == 'constant':
end = np.reshape(np.asarray(nnef_node_end.parameters['value'], dtype=np.int32), nnef_node_end.parameters['shape'])
else:
end = nnef_node_end.get_value()
if nnef_node_strides.op == 'variable':
strides = nnef_node_strides.get_tensordatafile().get_data().get_array()[0][0]
elif nnef_node_strides.op == 'constant':
strides = np.reshape(np.asarray(nnef_node_strides.parameters['value'], dtype=np.int32), nnef_node_strides.parameters['shape'])
else:
strides = nnef_node_strides.get_value()
for stride in strides:
assert stride == 1, "Slice operation uses | |
"""\
Run the benchmark multiple times with a range of settings,
and produce tables / graphs with these results
"""
import argparse
import sys
import logging
import subprocess
import datetime
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import math
import time
import pandas as pd
# Home dir should be continuum/
os.chdir('../')
def enable_logging(verbose):
"""Enable logging
"""
# Set parameters
level = logging.INFO
if verbose:
level = logging.DEBUG
format = "[%(asctime)s %(filename)20s:%(lineno)4s - %(funcName)25s() ] %(message)s"
logging.basicConfig(format=format,
level=level,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Logging has been enabled')
class Experiment():
"""Experiment template / super class
"""
def __init__(self, resume):
self.resume = resume
self.runs = []
def check_resume(self):
"""If the resume argument is given, get the first x log files >= the resume date,
and use their output instead of re-running the experiment.
"""
if self.resume == None:
return
log_location = './logs'
logs = [f for f in os.listdir(log_location) if f.endswith('.log')]
logs.sort()
exp_i = 0
for log in logs:
splits = log.split('_')
dt = splits[0] + '_' + splits[1]
dt = datetime.datetime.strptime(dt, '%Y-%m-%d_%H:%M:%S')
if dt >= self.resume:
path = log_location + '/' + log
logging.info('File %s for experiment run %i' % (path, exp_i))
f = open(path, 'r')
output = [line for line in f.readlines()]
f.close()
self.runs[exp_i]['output'] = output
exp_i += 1
# We have all logs needed
if exp_i == len(self.runs):
break
def run_commands(self):
"""Execute all generated commands
"""
for run in self.runs:
if run['command'] == []:
continue
# Skip runs where we got output with --resume
if run['output'] != None:
logging.info('Skip command: %s' % (' '.join(run['command'])))
continue
output, error = self.execute(run['command'])
logging.debug('------------------------------------')
logging.debug('OUTPUT')
logging.debug('------------------------------------')
logging.debug('\n' + ''.join(output))
if error != []:
logging.debug('------------------------------------')
logging.debug('ERROR')
logging.debug('------------------------------------')
logging.debug('\n' + ''.join(error))
sys.exit()
logging.debug('------------------------------------')
# Get output from log file
logpath = output[0].rstrip().split('and file at ')[-1]
f = open(logpath, 'r')
output = [line for line in f.readlines()]
run['output'] = output
def execute(self, command):
"""Execute a process using the subprocess library, and return the output/error or the process
Args:
command (list(str)): Command to be executed.
Returns:
(list(str), list(str)): Return the output and error generated by this process.
"""
logging.info(' '.join(command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = [line.decode('utf-8') for line in process.stdout.readlines()]
error = [line.decode('utf-8') for line in process.stderr.readlines()]
return output, error
class Figure4(Experiment):
"""Replicate figure 4:
System load with an increasing number of endpoints connected to a singleworker
So:
- Run with all 3 deployment modes
- Vary number of endpoints connected to a single worker
"""
def __init__(self, resume):
Experiment.__init__(self, resume)
self.modes = ['cloud', 'edge', 'endpoint']
self.cores = [4, 2, 1]
self.endpoints = [1, 2, 4, 8]
self.y = None
def __repr__(self):
"""Returns this string when called as print(object)
"""
return '''
APP image-classification
MODES %s
WORKERS 1
CLOUD_CORES %i
EDGE_CORES %i
ENDPOINT_CORES %i
ENDPOINTS/WORKER %s''' % (
','.join(self.modes), self.cores[0], self.cores[1], self.cores[2],
','.join([str(endpoint) for endpoint in self.endpoints]))
def generate(self):
"""Generate commands to run the benchmark based on the current settings
"""
# Differ in deployment modes
for mode in self.modes:
if mode == 'cloud':
config = 'cloud_endpoint'
cores = self.cores[0]
elif mode == 'edge':
config = 'edge_endpoint'
cores = self.cores[1]
else:
config = 'endpoint'
cores = self.cores[2]
# Differ in #endpoints per worker
for endpoint in self.endpoints:
# No sense to use more than 1 endpoint in endpoint-only deployment mode
if mode == 'endpoint' and endpoint > 1:
continue
command = ['python3', 'main.py', 'configuration/fig4/' + config + str(endpoint) + '.cfg']
command = [str(c) for c in command]
run = {'mode': mode,
'cores': cores,
'endpoints': endpoint,
'command': command,
'output': None,
'worker_time': None}
self.runs.append(run)
def parse_output(self):
"""For all runs, get the worker runtime
"""
# Get the line containing the metrics
for run in self.runs:
input = run['output'][-7][1:-2]
if 'Output in csv' in input:
input = run['output'][-6][1:-2]
# Split string into list
l = [x.split(',') for x in input.split('\\n')]
l = l[:-1]
l = [sub[1:] for sub in l]
# Split into header and data
header = l[0]
data = l[1:]
# Convert into dataframe
df = pd.DataFrame(data, columns=header)
df['proc/data (ms)'] = pd.to_numeric(df['proc/data (ms)'], downcast='float')
# Calculate num. of images processed per second by the cloud/edge/endpoint
processed_rate = df['proc/data (ms)'].mean()
processed_rate = 1000.0 / processed_rate
processed_rate *= run['cores']
# Calculate number of images generated per second
frequency = 5
requested_rate = float(frequency * run['endpoints'])
# Calculate usage of processors
run['usage'] = int((requested_rate / processed_rate) * 100)
def plot(self):
# set width of bar
plt.rcParams.update({'font.size': 22})
fig = plt.subplots(figsize =(12, 6))
barWidth = 0.2
bars = np.arange(len(self.modes))
colors = ['dimgray', 'gray', 'darkgray', 'lightgray']
y_total = []
for endpoint, color in zip(self.endpoints, colors):
# Get the x and y data
y = [run['usage'] for run in self.runs if run['endpoints'] == endpoint]
x = [x + math.log2(endpoint) * barWidth for x in bars]
# mode=endpoint only uses 1 endpoint
if endpoint > 1:
x = x[:-1]
# Plot the bar
plt.bar(x, y, color=color, width=barWidth*0.9, label='Endpoints: %s' % (endpoint))
y_total += y
# Add horizontal lines every 100 percent
plt.axhline(y=100, color='k', linestyle='-', linewidth=3)
plt.axhline(y=200, color='k', linestyle='-', linewidth=1, alpha=0.5)
plt.axhline(y=300, color='k', linestyle='-', linewidth=1, alpha=0.5)
# Adding titles
plt.xlabel('Deployment Mode')
plt.ylabel('System Load')
# Adding Xticks
label_ticks = [r + (len(self.modes) / 2) * barWidth for r in range(len(self.modes))]
label_ticks[-1] -= (len([endpoint for endpoint in self.endpoints if endpoint > 1]) / 2) * barWidth
plt.xticks(label_ticks, [mode.capitalize() for mode in self.modes])
plt.legend(loc='upper left', framealpha=1.0)
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter())
plt.ylim(0, 400)
plt.yticks(np.arange(0, 500, 100))
t = time.strftime("%Y-%m-%d_%H:%M:%S", time.gmtime())
plt.savefig('./logs/fig4_%s.png' % (t), bbox_inches='tight')
self.y = y_total
def print_result(self):
i = 0
for endpoint in self.endpoints:
for mode in self.modes:
if mode == 'endpoint' and endpoint > 1:
break
logging.info('Mode: %10s | Endpoints: %3s | System Load: %i%%' % (mode, endpoint, self.y[i]))
i += 1
class Figure5(Experiment):
"""Replicate figure 5:
System load with an increasing number of CPU cores per worker
So:
- Run with all 3 deployment modes
- Vary number of CPU cores per worker
"""
def __init__(self, resume):
Experiment.__init__(self, resume)
self.modes = ['cloud', 'edge', 'endpoint']
self.cores = [1, 2, 4]
self.y = None
def __repr__(self):
"""Returns this string when called as print(object)
"""
return '''
APP image-classificatiom
MODES %s
WORKERS 1
CORES %s
ENDPOINTS/WORKER 1''' % (
','.join(self.modes),
','.join([str(x) for x in self.cores]))
def generate(self):
"""Generate commands to run the benchmark based on the current settings
"""
# Differ in deployment modes
for mode in self.modes:
# Differ in #cores per worker
for core in self.cores:
command = []
if not (mode == 'cloud' and core == 1):
command = ['python3', 'main.py', 'configuration/fig5/%s_cores%i.cfg' % (mode, core)]
command = [str(c) for c in command]
run = {'mode': mode,
'cores': core,
'endpoints': 1,
'command': command,
'output': None,
'worker_time': None}
self.runs.append(run)
def parse_output(self):
"""For all runs, get the worker runtime
"""
# Get the line containing the metrics
for run in self.runs:
# Kubernetes does not work with only 1 core
if run['mode'] == 'cloud' and run['cores'] == 1:
run['usage'] = 0
continue
input = run['output'][-7][1:-2]
if 'Output in csv' in input:
input = run['output'][-6][1:-2]
# Split string into list
l = [x.split(',') for x in input.split('\\n')]
l = l[:-1]
l = [sub[1:] for sub in l]
# Split into header and data
header = l[0]
data = l[1:]
# Convert into dataframe
df = pd.DataFrame(data, columns=header)
df['proc/data (ms)'] = pd.to_numeric(df['proc/data (ms)'], downcast='float')
# Calculate num. of images processed per second by the cloud/edge/endpoint
processed_rate = df['proc/data (ms)'].mean()
processed_rate = 1000.0 / processed_rate
processed_rate *= run['cores']
# Calculate number of images generated per second
frequency = 5
requested_rate = float(frequency * run['endpoints'])
# Calculate usage of processors
run['usage'] = int((requested_rate / processed_rate) * 100)
def plot(self):
# set width of bar
plt.rcParams.update({'font.size': 22})
fig = plt.subplots(figsize =(12, 6))
barWidth = 0.2
bars = np.arange(len(self.modes))
colors = ['dimgray', 'gray', 'darkgray']
y_total = []
for core, color in zip(self.cores, colors):
# Get the x and y data
y = [run['usage'] for run in self.runs if run['cores'] == core]
x = [x + math.log2(core) * barWidth * 1.2 for x | |
<reponame>johncollinsai/post-high-frequency-data<filename>venv/lib/python3.8/site-packages/fontTools/pens/freetypePen.py
# -*- coding: utf-8 -*-
"""Pen to rasterize paths with FreeType."""
__all__ = ['FreeTypePen']
import os
import ctypes
import platform
import subprocess
import collections
import math
import freetype
from freetype.raw import FT_Outline_Get_Bitmap, FT_Outline_Get_BBox, FT_Outline_Get_CBox
from freetype.ft_types import FT_Pos
from freetype.ft_structs import FT_Vector, FT_BBox, FT_Bitmap, FT_Outline
from freetype.ft_enums import FT_OUTLINE_NONE, FT_OUTLINE_EVEN_ODD_FILL, FT_PIXEL_MODE_GRAY
from freetype.ft_errors import FT_Exception
from fontTools.pens.basePen import BasePen
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform
Contour = collections.namedtuple('Contour', ('points', 'tags'))
LINE = 0b00000001
CURVE = 0b00000011
OFFCURVE = 0b00000010
QCURVE = 0b00000001
QOFFCURVE = 0b00000000
class FreeTypePen(BasePen):
"""Pen to rasterize paths with FreeType. Requires `freetype-py` module.
Constructs ``FT_Outline`` from the paths, and renders it within a bitmap
buffer.
For ``array()`` and ``show()``, `numpy` and `matplotlib` must be installed.
For ``image()``, `Pillow` is required. Each module is lazily loaded when the
corresponding method is called.
Args:
glyphSet: a dictionary of drawable glyph objects keyed by name
used to resolve component references in composite glyphs.
:Examples:
If `numpy` and `matplotlib` is available, the following code will
show the glyph image of `fi` in a new window::
from fontTools.ttLib import TTFont
from fontTools.pens.freetypePen import FreeTypePen
from fontTools.misc.transform import Offset
pen = FreeTypePen(None)
font = TTFont('SourceSansPro-Regular.otf')
glyph = font.getGlyphSet()['fi']
glyph.draw(pen)
width, ascender, descender = glyph.width, font['OS/2'].usWinAscent, -font['OS/2'].usWinDescent
height = ascender - descender
pen.show(width=width, height=height, transform=Offset(0, -descender))
Combining with `uharfbuzz`, you can typeset a chunk of glyphs in a pen::
import uharfbuzz as hb
from fontTools.pens.freetypePen import FreeTypePen
from fontTools.pens.transformPen import TransformPen
from fontTools.misc.transform import Offset
en1, en2, ar, ja = 'Typesetting', 'Jeff', 'صف الحروف', 'たいぷせっと'
for text, font_path, direction, typo_ascender, typo_descender, vhea_ascender, vhea_descender, contain, features in (
(en1, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, False, {"kern": True, "liga": True}),
(en2, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, True, {"kern": True, "liga": True}),
(ar, 'NotoSansArabic-Regular.ttf', 'rtl', 1374, -738, None, None, False, {"kern": True, "liga": True}),
(ja, 'NotoSansJP-Regular.otf', 'ltr', 880, -120, 500, -500, False, {"palt": True, "kern": True}),
(ja, 'NotoSansJP-Regular.otf', 'ttb', 880, -120, 500, -500, False, {"vert": True, "vpal": True, "vkrn": True})
):
blob = hb.Blob.from_file_path(font_path)
face = hb.Face(blob)
font = hb.Font(face)
buf = hb.Buffer()
buf.direction = direction
buf.add_str(text)
buf.guess_segment_properties()
hb.shape(font, buf, features)
x, y = 0, 0
pen = FreeTypePen(None)
for info, pos in zip(buf.glyph_infos, buf.glyph_positions):
gid = info.codepoint
transformed = TransformPen(pen, Offset(x + pos.x_offset, y + pos.y_offset))
font.draw_glyph_with_pen(gid, transformed)
x += pos.x_advance
y += pos.y_advance
offset, width, height = None, None, None
if direction in ('ltr', 'rtl'):
offset = (0, -typo_descender)
width = x
height = typo_ascender - typo_descender
else:
offset = (-vhea_descender, -y)
width = vhea_ascender - vhea_descender
height = -y
pen.show(width=width, height=height, transform=Offset(*offset), contain=contain)
For Jupyter Notebook, the rendered image will be displayed in a cell if
you replace ``show()`` with ``image()`` in the examples.
"""
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self.contours = []
def outline(self, transform=None, evenOdd=False):
"""Converts the current contours to ``FT_Outline``.
Args:
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
"""
transform = transform or Transform()
if not hasattr(transform, 'transformPoint'):
transform = Transform(*transform)
nContours = len(self.contours)
n_points = sum((len(contour.points) for contour in self.contours))
points = []
for contour in self.contours:
for point in contour.points:
point = transform.transformPoint(point)
points.append(FT_Vector(FT_Pos(otRound(point[0] * 64)), FT_Pos(otRound(point[1] * 64))))
tags = []
for contour in self.contours:
for tag in contour.tags:
tags.append(tag)
contours = []
contours_sum = 0
for contour in self.contours:
contours_sum += len(contour.points)
contours.append(contours_sum - 1)
flags = FT_OUTLINE_EVEN_ODD_FILL if evenOdd else FT_OUTLINE_NONE
return FT_Outline(
(ctypes.c_short)(nContours),
(ctypes.c_short)(n_points),
(FT_Vector * n_points)(*points),
(ctypes.c_ubyte * n_points)(*tags),
(ctypes.c_short * nContours)(*contours),
(ctypes.c_int)(flags)
)
def buffer(self, width=None, height=None, transform=None, contain=False, evenOdd=False):
"""Renders the current contours within a bitmap buffer.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Returns:
A tuple of ``(buffer, size)``, where ``buffer`` is a ``bytes``
object of the resulted bitmap and ``size`` is a 2-tuple of its
dimension.
:Example:
.. code-block::
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> buf, size = pen.buffer(width=500, height=1000)
>> type(buf), len(buf), size
(<class 'bytes'>, 500000, (500, 1000))
"""
transform = transform or Transform()
if not hasattr(transform, 'transformPoint'):
transform = Transform(*transform)
contain_x, contain_y = contain or width is None, contain or height is None
width, height = width or 0, height or 0
if contain_x or contain_y:
bbox = self.bbox
bbox = transform.transformPoints((bbox[0:2], bbox[2:4]))
bbox = (*bbox[0], *bbox[1])
bbox_size = bbox[2] - bbox[0], bbox[3] - bbox[1]
dx, dy = transform.dx, transform.dy
if contain_x:
dx = min(-dx, bbox[0]) * -1.0
width = max(width, bbox_size[0])
if contain_y:
dy = min(-dy, bbox[1]) * -1.0
height = max(height, bbox_size[1])
transform = Transform(*transform[:4], dx, dy)
width, height = math.ceil(width), math.ceil(height)
buf = ctypes.create_string_buffer(width * height)
bitmap = FT_Bitmap(
(ctypes.c_int)(height),
(ctypes.c_int)(width),
(ctypes.c_int)(width),
(ctypes.POINTER(ctypes.c_ubyte))(buf),
(ctypes.c_short)(256),
(ctypes.c_ubyte)(FT_PIXEL_MODE_GRAY),
(ctypes.c_char)(0),
(ctypes.c_void_p)(None)
)
outline = self.outline(transform=transform, evenOdd=evenOdd)
err = FT_Outline_Get_Bitmap(freetype.get_handle(), ctypes.byref(outline), ctypes.byref(bitmap))
if err != 0:
raise FT_Exception(err)
return buf.raw, (width, height)
def array(self, width=None, height=None, transform=None, contain=False, evenOdd=False):
"""Returns the rendered contours as a numpy array. Requires `numpy`.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Returns:
A ``numpy.ndarray`` object with a shape of ``(height, width)``.
Each element takes a value in the range of ``[0.0, 1.0]``.
:Example:
.. code-block::
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> arr = pen.array(width=500, height=1000)
>> type(a), a.shape
(<class 'numpy.ndarray'>, (1000, 500))
"""
import numpy as np
buf, size = self.buffer(width=width, height=height, transform=transform, contain=contain, evenOdd=evenOdd)
return np.frombuffer(buf, 'B').reshape((size[1], size[0])) / 255.0
def show(self, width=None, height=None, transform=None, contain=False, evenOdd=False):
"""Plots the rendered contours with `pyplot`. Requires `numpy` and
`matplotlib`.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
:Example:
.. code-block::
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> pen.show(width=500, height=1000)
"""
from matplotlib import pyplot as plt
a = self.array(width=width, height=height, transform=transform, contain=contain, evenOdd=evenOdd)
plt.imshow(a, cmap='gray_r', vmin=0, vmax=1)
plt.show()
def image(self, width=None, height=None, transform=None, contain=False, evenOdd=False):
"""Returns the rendered contours as a PIL image. Requires `Pillow`.
Can be used to display a glyph image in Jupyter Notebook.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object | |
"""[summary]
Database models module
[description]
The dbmodels defines models for the databases for i.e. table User and AccessLog. Later, corresponding databases will be created from these models in the init module.
"""
from datetime import datetime
from app import db
from app import app
from flask_restful import request
from flask import jsonify, abort, Response
import json
from werkzeug.security import generate_password_hash, check_password_hash
import os
from sqlalchemy import exc
import sqlite3
from random import *
import string
import logging
import csv
from string import Template
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import io
from datetime import datetime
from datetime import timedelta
import re # regular expression
# Constants
PASSWD_LEN = 256 # bits
PASSWD_MIN_LEN = 8 # characters
PASSWD_MAX_LEN = 16 # characters
MAX_FAILS = 5
LOCKED = False
UNLOCKED = True
LOCK_DURATION = 6 # seconds
USER_ROLE = 0
ADMIN_ROLE = 2
USER_ROLE_LIST = ['user','admin']
# http codes
# Success
HTTP_CODE_OK = 200
# HTTP_CODE_CREATED = 201
# Clients's errors
HTTP_CODE_BAD_REQUEST = 400
HTTP_CODE_UNAUTHORIZED = 401
#HTTP_CODE_NOT_FOUND = 404
HTTP_CODE_LOCKED = 423
# Server error
HTTP_CODE_SERVER_ERR = 500
# Regular expression to ensure username should contain A-Z, a-z, 0-9, -, _, .
REG_EXP_USER_NAME = "^[a-zA-Z0-9_.-]+$" # A-Z, a-z, 0-9, -, _, .
REG_EXP_PASSWD = "^[a-zA-Z0-9]+$" # A-Z, a-z, 0-9
# "^(?=.*[A-Za-z])(?=.*\d)[A-Za-z\d]{3,}$" # Minimum 3 characters, at least one letter and one number
# "^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[$@$!%*?&])[A-Za-z\d$@$!%*?&]{8,}" # Minimum eight characters, at least one uppercase letter, one lowercase letter, one number and one special character
FROM_ADDRESS = app.config['MAIL_USERNAME']
MAIL_PWD = <PASSWORD>.config['MAIL_PASSWORD']
SEND_MAIL_RESET_PWD = app.config['MAIL_SEND_RESET_PWD'] # Send mail whenever password is reset
# import the resource of all messages
reader = csv.DictReader(open('resource.csv', 'r'))
msg_dict = {}
for row in reader:
msg_dict[row['Code']] = row['Message']
class User(db.Model):
"""[summary]
The class User defines the model for table User.
[description]
This class defines all fields of the table User, for i.e. id, username, password, etc.
Extends:
db.Model
Variables:
id {[type: Integer]} -- [description: identity]
username {[type: string]} -- [description: user name]
email {[type: string]} -- [description: email]
password_hash {[type: string]} -- [description: hash value of password]
accesslog {[type: relationship]} -- [description: relationship between the two databases User and AccessLog]
"""
# Field: id. It is auto-increment by default
id = db.Column(db.Integer, primary_key=True)
# Field: username
username = db.Column(db.String(64), index=True, unique=True)
# Field: email
email = db.Column(db.String(120), index=True) # Email is temporarily optional, it should not be unique because multiple users may all have empty emails. If it is required later, set 'unique=True'
# Field: password_hash
password_hash = db.Column(db.String(PASSWD_LEN))
# Field: user role
role = db.Column(db.Integer, default = USER_ROLE)
# define relationship with database model accesslog
accesslog = db.relationship('AccessLog', backref='author', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
def __init__(self, username, password, email='', role=USER_ROLE): # change TO_ADDR to ''
"""[summary]
Constructor
[description]
This constructor initalizes a user object with username and password
Arguments:
username {[type: string]} -- [description: user name]
password {[type: string]} -- [description: password]
"""
self.username = username
self.password_hash = password
self.email = email
self.role = role
class AccessLog(db.Model):
"""[summary]
The class AccessLog defines the model for table AccessLog.
[description]
This class defines all fields of the table AccessLog, for i.e. id, start_time, etc.
Extends:
db.Model
Variables:
id {[type: Integer]} -- [description: identity]
user_id {[type: Integer]} -- [description: Identity of the user. This is the foreign key to the table User]
start_time {[type: Datetime]} -- [description: Datetime of log-in]
"""
# Field: id
id = db.Column(db.Integer, primary_key=True)
# Field: user_id
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# Field: start_time
#start_time = db.Column(db.DateTime, index=True, default=datetime.utcnow)
# Field: lock_status
lock_status = db.Column(db.Boolean, default = UNLOCKED)
# Field: lock_start_time
lock_start_time = db.Column(db.DateTime)
# Field: no_fails as number of fails
no_fails = db.Column(db.Integer, default = 0)
def __repr__(self):
return '<AccessLog {}>'.format(self.body)
def __init__(self, uid):
"""[summary]
Constructor
[description]
This constructor initalizes a user object with username and password
Arguments:
username {[type: string]} -- [description: user name]
password {[type: string]} -- [description: password]
"""
self.user_id = uid
self.no_fails = 1
def hash_passwd(passwd):
"""[summary]
The function hashes a password.
[description]
This function creates a hash value with salf for an inputted password.
Arguments:
passwd {[type : string]} -- [description : Hashing using SHA 256 with salt of size 8 bits]
Returns:
[type : string] -- [description : hash value of size 256 bits]
"""
key = generate_password_hash(passwd) # default method='pbkdf2:sha256', default salt_length=8
return key
def generate_passwd():
"""[summary]
The function randomly generates a password.
[description]
This function generates randomly a password from ascii letters and digits. The length of password is limitted from PASSWD_MIN_LEN to PASSWD_MAX_LEN
Returns:
[type: String] -- [description: a generated password]
"""
characters = string.ascii_letters + string.digits # + string.punctuation
passwd = "".join(choice(characters) for x in range(randint(PASSWD_MIN_LEN, PASSWD_MAX_LEN)))
return passwd
def add_user(uname, passwd, email='', role=USER_ROLE):
"""[summary]
The function adds a user in the table User.
[description]
This function adds a user into the table User.
Arguments:
uname {[type: String]} -- [description: user name]
passwd {[type: String]} -- [description: password]
email {[type: String]} -- [description: email address]
role {[type: int]} -- [description: role of the user, one of USER_ROLE or ADMIN_ROLE]
"""
try:
# create new user
passwd_hash = hash_passwd(passwd)
new_user = User(uname, passwd_hash, email, role)
# add new user to database
db.session.add(new_user)
db.session.commit()
# Catch the exception
except exc.IntegrityError as e: # existed user
db.session.rollback()
raise
except exc.SQLAlchemyError as e:
# Roll back any change if something goes wrong
db.session.rollback()
raise # Raise error again so that it will be caught in create_user_api()'''
except Exception as e:
# Roll back any change if something goes wrong
db.session.rollback()
app.logger.error(e)
raise
finally:
# Close the db connection
db.session.close()
def create_user_api():
"""[summary]
[description]
The function add_user() inserts a user into the database
Input: username in URL request argument
Output: password (inputted or randomized), or a raised error
Body: the user's password is randomized or inputted from URL request
Returns:
[type: json] -- [description: code, message for the user, message for the developer]
"""
# parse parameters from http request. Use request.values instead of request.args to indicate parameters possibly come from argument or form
uname = request.values.get("username")
passwd = request.values.get("password")
email = request.values.get("email")
if(uname is None or uname==''): # verify parameters
data = {
'code' : HTTP_CODE_BAD_REQUEST,
'user message' : msg_dict['lack_of_input'],#'Add user successfully',
'developer message' : msg_dict['lack_of_input']
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_BAD_REQUEST, mimetype='application/json')
return resp
if(re.match(REG_EXP_USER_NAME,uname)==None): # if name does not follow the rule (only contains a-z, A-Z, 0-9)
data = {
'code' : HTTP_CODE_BAD_REQUEST,
'user message' : msg_dict['wrong_user_name_format'],#'Add user successfully',
'developer message' : msg_dict['wrong_user_name_format']
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_BAD_REQUEST, mimetype='application/json')
return resp
if(email is None):
email=''
msg_passwd = msg_dict['pwd_setby_user'] # Password is set by inputted value from the user
if(passwd is None):
# random a default password
passwd = generate_passwd()
msg_passwd = msg_dict['pwd_generated'] + passwd # Password is auto-generated. Its value is:
if(re.match(REG_EXP_PASSWD,passwd)==None): # if password does not follow the rule
data = {
'code' : HTTP_CODE_BAD_REQUEST,
'user message' : msg_dict['wrong_password_rule'],#'Add user successfully',
'developer message' : msg_dict['wrong_password_rule']
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_BAD_REQUEST, mimetype='application/json')
return resp
# create a user
app.logger.info(msg_dict['add_user_progress']) # Trying to add a user to database
try:
add_user(uname, passwd, email)
data = {
'code' : HTTP_CODE_OK,
'user message' : msg_dict['add_user_success'],#'Add user successfully',
'developer message' : msg_passwd
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_OK, mimetype='application/json')
return resp
except exc.IntegrityError as e: # existed user
data = {
'code' : HTTP_CODE_BAD_REQUEST,
'user message' : msg_dict['add_existed_user'], #Add existed user
'developer message' : msg_dict['add_existed_user']
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_BAD_REQUEST, mimetype='application/json')
return resp
except exc.SQLAlchemyError as e:
app.logger.error(e)
abort(HTTP_CODE_SERVER_ERR,msg_dict['sqlalchemy_error']) # SQLAlchemyError
except Exception as e:
app.logger.error(e)
abort(HTTP_CODE_BAD_REQUEST,msg_dict['error_undefined'])
def reset_passwd_api():
"""[summary]
This function is for resetting password of a user.
[description]
Only administrator is allowed to call this API.
Returns:
[type: json] -- [description: code, message for the developer, new password if resetting successfully]
"""
uname = request.values.get("username")
if(uname==None or uname==''): # Verify parameters
data = {
'code' : HTTP_CODE_BAD_REQUEST,
'user message' : msg_dict['lack_of_input'],
'result' : msg_dict['lack_of_input'] # Lack of user name or password
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_BAD_REQUEST, mimetype='application/json')
return resp
app.logger.info(msg_dict['reset_pwd_progress'])#Reset password of user"
try:
user = db.session.query(User.email).filter_by(username=uname).first()
if(user == None):
data = {
'code' : HTTP_CODE_BAD_REQUEST,
'developer message' : msg_dict['uname_notexist'], # User name does not exist
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_BAD_REQUEST, mimetype='application/json')
else:
passwd = generate_passwd()
passwd_hash = generate_password_hash(passwd)
db.session.query(User).filter_by(username=uname).update({User.password_hash: passwd_hash})
db.session.commit()
data = {
'code' : HTTP_CODE_OK,
'developer message' : msg_dict['reset_pwd_success'], # Reset password successfully
'new password' : <PASSWORD>
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_OK, mimetype='application/json')
if(SEND_MAIL_RESET_PWD == True): # NOTE: Add try...catch error from sending email
to_addr = user[0]
try:
if(to_addr!=''): # send email only email address exists
send_reset_pwd_mail(uname,passwd, FROM_ADDRESS,to_addr,'template/reset_pwd_mail.html');
except Exception as e:
app.logger.error(e)
return resp
except exc.SQLAlchemyError as e:
db.session.rollback()
app.logger.error(e)
abort(HTTP_CODE_SERVER_ERR,msg_dict['sqlalchemy_error'])
except Exception as e:
db.session.rollback()
app.logger.error(e)
abort(HTTP_CODE_SERVER_ERR,msg_dict['error_undefined'])
finally:
db.session.close()
# API to verify a user
def verify_user_api():
"""[summary]
This function is for verifying a user.
[description]
The function retrieves the user name and password from the request, then check if they exists in the database or not.
Returns:
[type: json] -- [description: code, message for the user, authentication result]
"""
try:
uname = request.values.get("username")
passwd = request.values.get("password")
if(uname==None or passwd==None or uname=='' or passwd==''): # Verify parameters
data = {
'code' : HTTP_CODE_BAD_REQUEST,
'user message' : msg_dict['lack_of_input'],
'result' : msg_dict['lack_of_input'] # Lack of user name or password
}
js = json.dumps(data)
resp = Response(js, status=HTTP_CODE_BAD_REQUEST, mimetype='application/json')
return resp
# Check password
uid_pwd = db.session.query(User.id, User.password_hash, User.email, User.role).filter_by(username=uname).first()
# If there does not exist the inputted user name 'uname'
if(uid_pwd == None):
data = {
'code' : HTTP_CODE_UNAUTHORIZED,
'user message' : msg_dict['uname_pwd_wrong'],
'result' : msg_dict['uname_notexist'] # User name does not exist
}
else: # If the user exists
# Check lock status
uid = uid_pwd[0]
latest_log = db.session.query(AccessLog.lock_status, AccessLog.no_fails, AccessLog.lock_start_time).filter_by(user_id=uid).first()
if(latest_log == None): # if there's no log
lock_status = 'Not locked'
number_fails = 0
else:
if (latest_log[0] == UNLOCKED): # check lock status
lock_status = 'Not locked'
else:
lock_status = 'Locked'
number_fails = latest_log[1]
# If user is locked, check if time | |
<filename>auto/src/filter.py<gh_stars>10-100
#!/usr/bin/env python
import rospy
import roslib
import tf
import sys
import math
import numpy as np
import struct
import copy
from visualization_msgs.msg import Marker
from custom_msgs.msg import WorldObject
from custom_msgs.msg import ObjectList
from rtabmap_ros.msg import MapGraph
from filtered_instances import FilteredInstances
# Param namespace: hardcoded here in order to use a single param file, otherwise would need 2 yaml param files.
param_ns = "/object_positioner"
# Classes param [in]
classes_param = param_ns + "/classes"
# TOPICS [in]
object_list_topic_raw = '/objects_raw/list'
graph_list = '/rtabmap/mapGraph'
# TOPICS [out]
objects_topic_filtered = '/objects_filtered'
# Perform graph node update
doGraphUpdate = False
printCovariances = False
printPositions = False
printMeanError = True
# FILTER
process_cov = 0.3
meas_cov = 5
min_obs = 6.0
# Classes
doors = None
benches = None
trashes = None
fires = None
waters = None
# Association threshold
door_radius = 3
bench_radius = 4
trash_radius = 2.5
fire_radius = 2.3
water_radius = 2.3
# ground truths
# DCC dataset
door_gt = [(-3.668, 4.993), (-2.594, 4.192), (-0.181, -5.362), (-1.544, -3.578), (1.686, -3.738), (0.711, -5.175), (-0.579, -2.481), (11.019, 9.995), (11.988, 11.240), (17.875, 20.793), (17.020, 19.669), (24.580, 29.927), (22.345, 26.843), (21.484, 25.707), (32.509, 37.543), (31.188, 39.274), (30.447, 39.114), (19.669, 40.091) , (20.922, 39.260)]
bench_gt = [(13.155, 15.224)]
fire_gt = [(-12.526, 10.987), (-13.137, 10.125), (1.587, 0.750), (5.818, -2.313), (9.383, 9.687), (19.569, 23.267), (27.030, 34.989), (28.657, 31.067), (18.904, 40.735), (-1.096, 3.207)]
water_gt = [(3.291, -4.051),(3.283, -3.955),(32.363, 34.662),(31.863, 34.990)]
trash_gt = [(-0.965, -4.654), (15.345, 18.127), (29.407, 37.500)]
'''
# 7th floor dataset
door_gt = [(-2.366, 5.426), (-0.315, 5.599), (-2.514, 9.147), (-2.595, 10.372), (-0.543, 9.169), (-0.517, 10.415) , (-2.667, 14.286), (-2.761, 15.512) , (-0.733, 14.223), (-0.718, 15.558), (-6.053, 27.606), (-5.518, 28.116), (-2.637, -25.815), (-2.707, -24.231), (-3.355, 27.537), (-0.822, -15.9), (-0.931, -14.628), (1.095, -14.492) , (1.199, -15.694), (0.798, -9.588), (0.901, -10.769) , (-1.125, -10.927), (-1.171, -9.644) , (0.638, -5.836), (-1.41, -5.938), (-1.521, -4.765), (0.579, -4.601), (-2.914, 18.011), (-0.813, 18.077) ]
bench_gt = []
fire_gt = [(-0.906, 17.208), (-1.803, 22.333), (-1.385, -5.353), (-3.248, -20.58), (2.117, -20.233) ]
water_gt = [(-2.387, 25.07), (-3.059, 25.191), (2.762, -22.522), (0.364, -23.018), (1.117, -22.965) ]
trash_gt = [(-0.983, 20.499), (-5.628, 25.476) , (-1.979, -18.521), (-0.533, -20.988) ]
'''
# Debug
markers_topic = '/markers'
def object_list_callback(object_list):
door_list = []
bench_list = []
trash_list = []
fire_list = []
water_list = []
for obj in object_list.objects:
if obj.objClass == 'door':
door_list.append((obj.x,obj.y, obj.angle))
elif obj.objClass == 'bench':
bench_list.append((obj.x,obj.y, obj.angle))
elif obj.objClass == 'trash' :
trash_list.append((obj.x,obj.y, obj.angle))
elif obj.objClass == 'fire':
fire_list.append((obj.x,obj.y, obj.angle))
elif obj.objClass == 'water':
water_list.append((obj.x,obj.y, obj.angle))
doors.addMeasurementList(door_list)
benches.addMeasurementList(bench_list)
trashes.addMeasurementList(trash_list)
fires.addMeasurementList(fire_list)
waters.addMeasurementList(water_list)
def graph_list_callback(graph_list):
global doors, benches, trashes, fires, waters
doors.updateGraphList(graph_list.poses, graph_list.posesId)
benches.updateGraphList(graph_list.poses, graph_list.posesId)
trashes.updateGraphList(graph_list.poses, graph_list.posesId)
fires.updateGraphList(graph_list.poses, graph_list.posesId)
waters.updateGraphList(graph_list.poses, graph_list.posesId)
def getTextMarker(label, x, y, height, namespace, id, frame, size, R, G, B, lifeTime):
marker = Marker()
marker.header.stamp = rospy.Time.now()
# Frame (map)
marker.header.frame_id = frame
# Object type
marker.ns = namespace+"_label"
# Marker identifier
marker.id = id
marker.text = label
# Text
marker.type = Marker.TEXT_VIEW_FACING
marker.action = Marker.ADD
# Size
marker.scale.z = size
# Position
marker.pose.position.x = x
marker.pose.position.y = y
marker.pose.position.z = height
# Color
marker.color.a = 1.0
marker.color.r = R
marker.color.g = G
marker.color.b = B
# Lifetime
marker.lifetime = rospy.Duration(lifeTime)
return marker
def getMarker(x, y, z, angle, namespace, id, frame, size_x=0.4, size_y=0.4, size_z=0.4, R=1.0,G=0.0,B=0.0, lifeTime=5.0):
marker = Marker()
marker.header.stamp = rospy.Time.now()
# Frame (map)
marker.header.frame_id = frame
# Object type
marker.ns = namespace
# Marker identifier
marker.id = id
# Sphere
marker.type = Marker.CUBE
marker.action = Marker.ADD
# Size
marker.scale.x = size_x
marker.scale.y = size_y
marker.scale.z = size_z
# Position
q = tf.transformations.quaternion_from_euler(0, 0, angle)
marker.pose.position.x = x
marker.pose.position.y = y
marker.pose.position.z = z
marker.pose.orientation.x = q[0]
marker.pose.orientation.y = q[1]
marker.pose.orientation.z = q[2]
marker.pose.orientation.w = q[3]
# Color
marker.color.a = 1.0
marker.color.r = R
marker.color.g = G
marker.color.b = B
# Lifetime
marker.lifetime = rospy.Duration(lifeTime)
return marker
def getMarkerArrow(x, y, angle, namespace, id, frame, size=0.4, R=1.0,G=0.0,B=0.0, lifeTime=5.0):
marker = Marker()
marker.header.stamp = rospy.Time.now()
# Frame (map)
marker.header.frame_id = frame
# Object type
marker.ns = namespace
# Marker identifier
marker.id = id
# Sphere
marker.type = Marker.ARROW
marker.action = Marker.ADD
q = tf.transformations.quaternion_from_euler(0, 0, angle)
# Position
marker.pose.position.x = x
marker.pose.position.y = y
marker.pose.position.z = 0.6
marker.pose.orientation.x = q[0]
marker.pose.orientation.y = q[1]
marker.pose.orientation.z = q[2]
marker.pose.orientation.w = q[3]
# Size
marker.scale.x = size
marker.scale.y = 0.05
marker.scale.z = 0.05
# Color
marker.color.a = 1.0
marker.color.r = R
marker.color.g = G
marker.color.b = B
# Lifetime
marker.lifetime = rospy.Duration(lifeTime)
return marker
def main(args):
global object_list_topic_raw, object_list_topic_raw, process_cov, meas_cov, doGraphUpdate
global doors, benches, trashes, fires, waters
global door_radius, bench_radius, trash_radius, fire_radius, water_radius
global door_gt, bench_gt, trash_gt, fire_gt, water_gt
global min_obs
# Initialize node
rospy.init_node('object_marker', anonymous=True)
rate = rospy.Rate(5)
rospy.Subscriber(object_list_topic_raw, ObjectList, object_list_callback)
if doGraphUpdate:
rospy.Subscriber(graph_list, MapGraph, graph_list_callback)
obj_pub = rospy.Publisher(objects_topic_filtered, WorldObject, queue_size=10)
marker_pub = rospy.Publisher(markers_topic, Marker, queue_size=10)
# Object instance lists
doors = FilteredInstances('door', door_radius, process_cov, meas_cov, min_obs, door_gt)
benches = FilteredInstances('bench', bench_radius, process_cov, meas_cov, min_obs, bench_gt)
trashes = FilteredInstances('trash bin', trash_radius, process_cov, meas_cov, min_obs, trash_gt)
fires = FilteredInstances('fire extinguisher', fire_radius, process_cov, meas_cov, min_obs, fire_gt)
waters = FilteredInstances('water fountain', water_radius, process_cov, meas_cov, min_obs, water_gt)
while not rospy.is_shutdown():
life_time = 0
# Publish doors
for i in range(len(doors.instances)):
pred = doors.predictions[i]
obj_filtered = WorldObject()
obj_filtered.objClass = 'door'
obj_filtered.x = pred[0]
obj_filtered.y = pred[1]
obj_filtered.angle = doors.angles[i]
obj_filtered.prob = float(i)
if doors.observations[i] > min_obs:
obj_pub.publish(obj_filtered)
class_name = 'door'
height = 0.9
frame = 'map'
size_x = 0.05
size_y = 0.6
size_z = 1.8
R = 0.0
G = 1.0
B = 0.0
# Publish marker
marker = getMarker(obj_filtered.x, obj_filtered.y, height, obj_filtered.angle, class_name, i, frame, size_x, size_y, size_z, R, G, B, life_time)
marker_pub.publish(marker)
text = getTextMarker(class_name, obj_filtered.x, obj_filtered.y, height+size_z/2.0 + 0.5, class_name, i, frame, 0.3, R, G, B, life_time)
marker_pub.publish(text)
# Publish benches
for i in range(len(benches.instances)):
pred = benches.predictions[i]
obj_filtered = WorldObject()
obj_filtered.objClass = 'bench'
obj_filtered.x = pred[0]
obj_filtered.y = pred[1]
obj_filtered.angle = benches.angles[i]
obj_filtered.prob = float(i)
if benches.observations[i] > min_obs:
obj_pub.publish(obj_filtered)
class_name = 'bench'
height = 0.4
frame = 'map'
size_x = 0.4
size_y = 0.4
size_z = 0.5
R = 0.0
G = 0.1
B = 0.8
# Publish marker
marker = getMarker(obj_filtered.x, obj_filtered.y, height, obj_filtered.angle, class_name, i, frame, size_x, size_y, size_z, R, G, B, life_time)
marker_pub.publish(marker)
text = getTextMarker(class_name, obj_filtered.x, obj_filtered.y, height+size_z/2.0 + 0.5, class_name, i, frame, 0.3, R, G, B, life_time)
marker_pub.publish(text)
# Publish trashes
for i in range(len(trashes.instances)):
pred = trashes.predictions[i]
obj_filtered = WorldObject()
obj_filtered.objClass = 'trash'
obj_filtered.x = pred[0]
obj_filtered.y = pred[1]
obj_filtered.angle = trashes.angles[i]
obj_filtered.prob = float(i)
if trashes.observations[i] > min_obs:
obj_pub.publish(obj_filtered)
class_name = 'trash'
height = 0.21
frame = 'map'
size_x = 0.25
size_y = 0.25
size_z = 0.4
R = 0.8
G = 0.7
B = 0.1
# Publish marker
marker = getMarker(obj_filtered.x, obj_filtered.y, height, obj_filtered.angle, class_name, i, frame, size_x, size_y, size_z, R, G, B, life_time)
marker_pub.publish(marker)
text = getTextMarker('trash bin', obj_filtered.x, obj_filtered.y, height+size_z/2.0 + 0.5, class_name, i, frame, 0.3, R, G, B, life_time)
marker_pub.publish(text)
# Publish fires
for i in range(len(fires.instances)):
pred = fires.predictions[i]
obj_filtered = WorldObject()
obj_filtered.objClass = 'fire'
obj_filtered.x = pred[0]
obj_filtered.y = pred[1]
obj_filtered.angle = fires.angles[i]
obj_filtered.prob = float(i)
if fires.observations[i] > min_obs:
obj_pub.publish(obj_filtered)
class_name = 'fire'
height = 1.4
frame = 'map'
size_x = 0.3
size_y = 0.3
size_z = 0.4
R = 1.0
G = 0.1
B = 0.1
# Publish marker
marker = getMarker(obj_filtered.x, obj_filtered.y, height, obj_filtered.angle, class_name, i, frame, size_x, size_y, size_z, R, G, B, life_time)
marker_pub.publish(marker)
text = getTextMarker('fire extinguisher', obj_filtered.x, obj_filtered.y, height+size_z/2.0 + 0.5, class_name, i, frame, 0.3, R, G, B, life_time)
marker_pub.publish(text)
# Publish waters
for i in range(len(waters.instances)):
pred = waters.predictions[i]
obj_filtered = WorldObject()
obj_filtered.objClass = 'water'
obj_filtered.x = pred[0]
obj_filtered.y = pred[1]
obj_filtered.angle = waters.angles[i]
obj_filtered.prob = float(i)
if waters.observations[i] > min_obs:
obj_pub.publish(obj_filtered)
class_name = 'water'
height = 1.0
frame = 'map'
size_x = 0.3
size_y = 0.3
size_z = 0.5
R = 0.4
G = 0.8
B = 1.0
# Publish marker
marker = getMarker(obj_filtered.x, obj_filtered.y, height, obj_filtered.angle, class_name, i, frame, size_x, size_y, size_z, R, G, B, life_time)
marker_pub.publish(marker)
text = getTextMarker('water fountain', obj_filtered.x, obj_filtered.y, height+size_z/2.0 + 0.5, class_name, i, frame, 0.3, R, G, B, life_time)
| |
<gh_stars>0
import discord
from discord.ext import commands, tasks
import aiosqlite
import random
import time
from datetime import datetime
from cogs.mute import Mute
from utils.ids import TGChannelIDs, AdminVars
import utils.check
class Warn(commands.Cog):
"""
Contains our custom warning system.
"""
def __init__(self, bot):
self.bot = bot
self.warnloop.start()
def cog_unload(self):
self.warnloop.cancel()
async def add_warn(self, author: discord.Member, member: discord.Member, reason):
"""
Adds a warning to the database.
Also logs it to our infraction-logs channel.
"""
# assigning each warning a random 6 digit number, hope thats enough to not get duplicates
warn_id = random.randint(100000, 999999)
warndate = time.strftime("%A, %B %d %Y @ %H:%M:%S %p")
async with aiosqlite.connect("./db/database.db") as db:
await db.execute(
"""INSERT INTO warnings VALUES (:user_id, :warn_id, :mod_id, :reason, :timestamp)""",
{
"user_id": member.id,
"warn_id": warn_id,
"mod_id": author.id,
"reason": reason,
"timestamp": warndate,
},
)
await db.commit()
# and this second part here logs the warn into the warning log discord channel
channel = self.bot.get_channel(TGChannelIDs.INFRACTION_LOGS)
embed = discord.Embed(title="⚠️New Warning⚠️", color=discord.Color.dark_red())
embed.add_field(name="Warned User", value=member.mention, inline=True)
embed.add_field(name="Moderator", value=author.mention, inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
embed.add_field(name="ID", value=warn_id, inline=True)
embed.timestamp = discord.utils.utcnow()
await channel.send(embed=embed)
async def check_warn_count(
self, guild: discord.Guild, channel: discord.TextChannel, member: discord.Member
):
"""
Checks the amount of warnings a user has and executes the according action.
3 warnings:
User gets muted indefinitely.
5 warnings:
User gets kicked from the Server.
7 warnings:
User gets banned from the Server.
Also DMs them informing the User of said action.
"""
async with aiosqlite.connect("./db/database.db") as db:
user_warnings = await db.execute_fetchall(
"""SELECT * FROM warnings WHERE user_id = :user_id""",
{"user_id": member.id},
)
warns = len(user_warnings)
if warns > 6:
try:
await member.send(
f"You have been automatically banned from the {guild.name} Server for reaching warning #***{warns}***.\n"
f"Please contact {AdminVars.GROUNDS_KEEPER} for an appeal.\n{AdminVars.BAN_RECORDS}"
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.warn")
logger.warning(
f"Tried to message automatic ban reason to {str(member)}, but it failed: {exc}"
)
await channel.send(
f"{member.mention} has reached warning #{warns}. They have been automatically banned."
)
await member.ban(reason=f"Automatic ban for reaching {warns} warnings")
elif warns > 4:
try:
await member.send(
f"You have been automatically kicked from the {guild.name} Server for reaching warning #***{warns}***.\n"
f"If you would like to discuss your punishment, please contact {AdminVars.GROUNDS_GENERALS}."
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.warn")
logger.warning(
f"Tried to message automatic kick reason to {str(member)}, but it failed: {exc}"
)
await channel.send(
f"{member.mention} has reached warning #{warns}. They have been automatically kicked."
)
await member.kick(reason=f"Automatic kick for reaching {warns} warnings")
elif warns > 2:
await Mute.add_mute(self, member)
await channel.send(
f"{member.mention} has reached warning #{warns}. They have been automatically muted."
)
try:
await member.send(
f"You have been automatically muted in the {guild.name} Server for reaching warning #***{warns}***.\n"
f"If you would like to discuss your punishment, please contact {AdminVars.GROUNDS_GENERALS}."
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.warn")
logger.warning(
f"Tried to message automatic mute reason to {str(member)}, but it failed: {exc}"
)
@commands.command()
@utils.check.is_moderator()
async def warn(self, ctx, member: discord.Member, *, reason):
"""
Warns a user.
"""
if member.bot:
await ctx.send("You can't warn bots, silly.")
return
# adds the warning
await self.add_warn(ctx.author, member, reason)
# tries to dm user
try:
await member.send(
f"You have been warned in the {ctx.guild.name} Server for the following reason: \n"
f"```{reason}```\n"
f"If you would like to discuss your punishment, please contact {AdminVars.GROUNDS_GENERALS}."
)
except discord.HTTPException as exc:
logger = self.bot.get_logger("bot.warn")
logger.warning(
f"Tried to message warn reason to {str(member)}, but it failed: {exc}"
)
await ctx.send(f"{member.mention} has been warned!")
# checks warn count for further actions
await self.check_warn_count(ctx.guild, ctx.channel, member)
@commands.command(aliases=["warnings", "infractions"])
async def warns(self, ctx, member: discord.Member = None):
"""
Checks the warnings of a user, or yourself.
"""
if member is None:
member = ctx.author
async with aiosqlite.connect("./db/database.db") as db:
user_warnings = await db.execute_fetchall(
"""SELECT * FROM warnings WHERE user_id = :user_id""",
{"user_id": member.id},
)
warns = len(user_warnings)
if warns == 0:
await ctx.send(f"{member.mention} doesn't have any warnings (yet).")
else:
await ctx.send(f"{member.mention} has {warns} warning(s).")
@commands.command()
@utils.check.is_moderator()
async def clearwarns(self, ctx, member: discord.Member):
"""
Deletes all warnings of a user from the database.
"""
async with aiosqlite.connect("./db/database.db") as db:
await db.execute(
"""DELETE FROM warnings WHERE user_id = :user_id""",
{"user_id": member.id},
)
await db.commit()
await ctx.send(f"Cleared all warnings for {member.mention}.")
@commands.command()
@utils.check.is_moderator()
async def warndetails(self, ctx, member: discord.Member):
"""
Gets you the details of a Users warnings.
"""
async with aiosqlite.connect("./db/database.db") as db:
user_warnings = await db.execute_fetchall(
"""SELECT * FROM warnings WHERE user_id = :user_id""",
{"user_id": member.id},
)
if len(user_warnings) == 0:
await ctx.send(f"{member.mention} doesn't have any active warnings (yet).")
return
embed_list = []
i = 1
for warning in user_warnings:
# the first one is the user id, but we dont need it here
(_, warn_id, mod_id, reason, timestamp) = warning
new_timestamp = datetime.strptime(timestamp, "%A, %B %d %Y @ %H:%M:%S %p")
embed = discord.Embed(title=f"Warning #{i}", colour=discord.Colour.red())
embed.add_field(name="Moderator: ", value=f"<@{mod_id}>")
embed.add_field(name="Reason: ", value=f"{reason}")
embed.add_field(name="ID:", value=f"{warn_id}")
embed.add_field(
name="Warning given out at:",
value=discord.utils.format_dt(new_timestamp, style="F"),
)
embed_list.append(embed)
i += 1
# the maximum amount of embeds you can send is 10,
# we do ban people at 7 warnings but you never know what might happen
try:
await ctx.send(
f"Active warnings for {member.mention}: {len(user_warnings)}",
embeds=embed_list,
)
except discord.HTTPException:
await ctx.send(
f"Active warnings for {member.mention}: {len(user_warnings)}\nCannot list warnings for this user!"
)
@commands.command()
@utils.check.is_moderator()
async def deletewarn(self, ctx, member: discord.Member, warn_id):
"""
Deletes a specific warning of a user, by the randomly generated warning ID.
Use warndetails to see these warning IDs.
"""
async with aiosqlite.connect("./db/database.db") as db:
warning = await db.execute_fetchall(
"""SELECT * FROM warnings WHERE user_id = :user_id AND warn_id = :warn_id""",
{"user_id": member.id, "warn_id": warn_id},
)
if len(warning) == 0:
await ctx.send(
f"I couldnt find a warning with the ID {warn_id} for {member.mention}."
)
return
await db.execute(
"""DELETE FROM warnings WHERE user_id = :user_id AND warn_id = :warn_id""",
{"user_id": member.id, "warn_id": warn_id},
)
await db.commit()
await ctx.send(f"Deleted warning {warn_id} for {member.mention}")
# basic error handling for the above
@warn.error
async def warn_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to specify a member and a reason!")
elif isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a member!")
elif isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
else:
raise error
@warns.error
async def warns_error(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a member, or just leave it blank.")
else:
raise error
@clearwarns.error
async def clearwarns_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to mention a member!")
elif isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
else:
raise error
@deletewarn.error
async def deletewarn_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to mention a member and specify a warn_id.")
elif isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a valid member.")
elif isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
else:
raise error
@warndetails.error
async def warndetails_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to mention a valid member.")
elif isinstance(error, commands.MemberNotFound):
await ctx.send("You need to mention a valid member.")
elif isinstance(error, commands.MissingPermissions):
await ctx.send("Nice try, but you don't have the permissions to do that!")
else:
raise error
@tasks.loop(hours=24)
async def warnloop(self):
"""
This here checks if a warning is older than 30 days and has expired,
if that is the case, deletes the expired warnings.
"""
logger = self.bot.get_logger("bot.warn")
async with aiosqlite.connect("./db/database.db") as db:
every_warning = await db.execute_fetchall("""SELECT * FROM warnings""")
# we check for every warning if it is older than 30 days
for warning in every_warning:
# the underscores are mod_id and reason
(user_id, warn_id, _, _, timestamp) = warning
timediff = datetime.utcnow() - datetime.strptime(
timestamp, "%A, %B %d %Y @ %H:%M:%S %p"
)
if timediff.days > 29:
# user id and warn id should be enough to identify each warning (hopefully)
await db.execute(
"""DELETE FROM warnings WHERE user_id = :user_id AND warn_id = :warn_id""",
{"user_id": user_id, "warn_id": warn_id},
)
logger.info(
f"Deleted Warning #{warn_id} for user {user_id} after 30 days."
)
await db.commit()
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The following is an example of a test suite.
A test suite contains one or more test cases that are recognized by pytest,
functions whose name begins with test_.
"""
from time import sleep
from re import search
TOPOLOGY = """
# This string is known as a SZN string, it describes the topology that is to be
# used by every test case in this suite.
# Lines that begin with a # sign (as this one) are comments and are ignored by
# the SZN parser.
#
# This is a diagram of the topology, it consists of two hosts that are
# connected to a switch:
#
# +-------+ +-------+
# | | +--------+ | |
# | hs1 <-----> ops1 <-----> hs2 |
# | | +--------+ | |
# +-------+ +-------+
#
# Every element in the topology is known as a node. The node type defines its
# behavior and capabilities.
#
# The available node types depend on the platform engine that is running the
# topology, for example, the topology_docker platform engine supports "host"
# and "openswitch" types among others.
#
# Please consult the documentation of the platform engine for available node
# types.
#
# Nodes are defined like this:
#
# [attribute_0=value_0, attribute_1=value_1, ...] node_identifier
#
# At least, the attribute list should contain the "type" attribute. The
# attribute types are dependent on the platform engine too, please consult its
# documentation for available attributes.
#
# Here an openswitch node is defined:
#
[type=openswitch name="OpenSwitch 1"] ops1
#
# And now, the two hosts are defined too:
#
[type=host name="Host 1"] hs1
[type=host name="Host 2"] hs2
#
# Nodes are connected together using links, which are defined like this:
#
# node_0_identifier:port_label -- node_1_identifier:port_label
#
# Please be advised that the value that exists in "port_label" is not
# necesarily the exact port identifier that the node will be using when
# the test case is executed.
#
# Is the responsibility of the platform engine to decide which exact port will
# be used to create the link. Because of this, the value of "port_label" may be
# any string.
#
hs1:port1 -- ops1:port6
ops1:port3 -- hs2:port2
"""
# Here is a test case. This particular one receives 2 pytest fixtures:
# 1. topology
# 2. step
# A pytest fixture is a function that defines what is to be executed at the
# beginning of a test case (and possibly also at the end of the test case too).
#
# The topology fixture is provided by the Topology Modular Framework and it
# takes care of creating and destroying the topology, so the test engineer does
# not have to worry about adding code that does this in the test case.
#
# This fixture has a scope that affects the whole suite, so the topology is
# created at the beginning of the first test case in the test suite and
# destroyed at the end of the last test case of the suite.
#
# The test cases in this repo are executed by pytest in a random order, so keep
# that in mind when writing your test cases. Do not assume that the state of a
# device at the end of a test case will be the same at the beginning of
# another. Keep your test cases atomic by putting together in the same test
# case all the code that is to be executed in a certain order.
# The step fixture provides a function that allows the test engineer to inject
# comments in the test execution logs. The execution logs can be found in
# .tox/py34/tmp/tests.xml.
#
# The step-provided function is useful to introduce parts of the test case that
# have a logical meaning for the engineer, for example, to show that the
# following lines configure the nodes, it could be used like this:
#
# ...
# step('Configuring the switch for VLAN usage.')
# ...
def test_vlan(topology, step):
"""
The documentation for the test case goes here. The following lines are an
example:
Test that a VLAN configuration is functional with an OpenSwitch switch.
Build a topology of one switch and two hosts and connect the hosts to the
switch. Setup a VLAN for the ports connected to the hosts and ping from
host 1 to host 2. Check that the ping and its reply were sent and received
correctly.
"""
# The topology fixture allows the engineer to create Python objects that
# represent the topology devices. Use its get method with a string with the
# node identifier as the parameter:
ops1 = topology.get('ops1')
hs1 = topology.get('hs1')
hs2 = topology.get('hs2')
# Every node object has an attribute named ports, a dictionary whose keys
# are the port labels defined in the test case, its values are the actual
# port labels defined by the platform engine for that specific node.
p3 = ops1.ports['port3']
p6 = ops1.ports['port6']
# Here, step is being used to mark a logical section of the test case,
# adding configuration for switch interfaces.
step('Configuring switch interfaces.')
# To execute a command in the node, you can use this syntax:
#
# node_identifier('command', shell='name_of_the_shell')
#
# Shells are Python objects that encapsulate a way of communicating with a
# node. Each node has a default shell and at least one shell. The default
# shell will be used if the "shell" argument is not specified in the call.
# For example, the following commands are equivalent:
#
# node_identifier('command', shell='name_of_the_default_shell')
# node_identifier('command')
#
# Shells are defined for every node type, please consult their
# documentation for available shells.
# Even when any command can be sent to a node using the previous syntax,
# libraries are the preferred way of doing so because they offer several
# advantages, for example:
# 1. Automating command-related tasks (as switching contexts, for example).
# 2. Handling command output.
# 3. Checking for command correct execution.
# A library is a set of functions that call commands of a certain command
# family. Since each library is tailored for a single command family,
# each one has its own characteristics and architecture.
# Here the library for OpenSwitch vtysh is being used. vtysh has contexts,
# commands are supposed to return output and commands that only return
# some output if something wrong or unexpected happens when they are
# called. The vtysh library handles all these 3 situations automatically so
# they don't have to be handled manually in the test case.
# In this library, contexts are hanlded by using with, like in this line:
with ops1.libs.vtysh.ConfigInterface('port3') as ctx:
# This will take care of sending the necessary commands to enter the
# relevant context to configure the 'port3' interface. This library
# also takes care of using the actual port that matches the port3
# label. Once that line is executed, the ctx object is created, an
# instance of the ConfigInterface class. This class has methods that
# match the methods available in the corresponding vtysh context, two
# of them are these ones:
ctx.no_routing()
ctx.no_shutdown()
# ctx.no_routing sends the vtysh no routing command and asserts that
# output was not received, because this command is not supposed to
# send output if everything works fine. ctx.no_shutdown is analogous.
# Once the last indentation level is exited, the library takes care of
# sending the vtysh end command to return to the vtysh root context. In
# this way, the entering and exiting of contexts is handled automatically.
# Libraries just "wrap" commands, so the past lines are | |
if fonts field only has
unique "full_name" values.
"""
fonts = {}
for f in family_metadata.fonts:
fonts[f.full_name] = f
if len(set(fonts.keys())) != len(family_metadata.fonts):
yield FAIL,\
Message("duplicated",
'Found duplicated "full_name" values'
' in METADATA.pb fonts field.')
else:
yield PASS, ('METADATA.pb "fonts" field only has'
' unique "full_name" values.')
@check(
id = 'com.google.fonts/check/metadata/unique_weight_style_pairs',
conditions = ['family_metadata'],
proposal = 'legacy:check/084'
)
def com_google_fonts_check_metadata_unique_weight_style_pairs(family_metadata):
"""METADATA.pb: check if fonts field
only contains unique style:weight pairs.
"""
pairs = {}
for f in family_metadata.fonts:
styleweight = f"{f.style}:{f.weight}"
pairs[styleweight] = 1
if len(set(pairs.keys())) != len(family_metadata.fonts):
yield FAIL,\
Message("duplicated",
"Found duplicated style:weight pair"
" in METADATA.pb fonts field.")
else:
yield PASS, ("METADATA.pb \"fonts\" field only has"
" unique style:weight pairs.")
@check(
id = 'com.google.fonts/check/metadata/license',
conditions = ['family_metadata'],
proposal = 'legacy:check/085'
)
def com_google_fonts_check_metadata_license(family_metadata):
"""METADATA.pb license is "APACHE2", "UFL" or "OFL"?"""
expected_licenses = ["APACHE2", "OFL", "UFL"]
if family_metadata.license in expected_licenses:
yield PASS, (f'Font license is declared in METADATA.pb'
f' as "{family_metadata.license}"')
else:
yield FAIL,\
Message("bad-license",
f'METADATA.pb license field ("{family_metadata.license}")'
f' must be one of the following: {expected_licenses}')
@check(
id = 'com.google.fonts/check/metadata/menu_and_latin',
conditions = ['family_metadata'],
proposal = ['legacy:check/086',
'https://github.com/googlefonts/fontbakery/issues/912#issuecomment-237935444']
)
def com_google_fonts_check_metadata_menu_and_latin(family_metadata):
"""METADATA.pb should contain at least "menu" and "latin" subsets."""
missing = []
for s in ["menu", "latin"]:
if s not in list(family_metadata.subsets):
missing.append(s)
if missing != []:
if len(missing) == 2:
missing = "both"
else:
missing = f'"{missing[0]}"'
yield FAIL,\
Message("missing",
f'Subsets "menu" and "latin" are mandatory,'
f' but METADATA.pb is missing {missing}.')
else:
yield PASS, 'METADATA.pb contains "menu" and "latin" subsets.'
@check(
id = 'com.google.fonts/check/metadata/subsets_order',
conditions = ['family_metadata'],
proposal = 'legacy:check/087'
)
def com_google_fonts_check_metadata_subsets_order(family_metadata):
"""METADATA.pb subsets should be alphabetically ordered."""
expected = list(sorted(family_metadata.subsets))
if list(family_metadata.subsets) != expected:
yield FAIL,\
Message("not-sorted",
("METADATA.pb subsets are not sorted "
"in alphabetical order: Got ['{}']"
" and expected ['{}']"
"").format("', '".join(family_metadata.subsets),
"', '".join(expected)))
else:
yield PASS, "METADATA.pb subsets are sorted in alphabetical order."
@check(
id = 'com.google.fonts/check/metadata/includes_production_subsets',
conditions = ['family_metadata',
'production_metadata',
'listed_on_gfonts_api'],
rationale = """
Check METADATA.pb file includes the same subsets as the family in production.
""",
proposal = 'https://github.com/googlefonts/fontbakery/issues/2989'
)
def com_google_fonts_check_metadata_includes_production_subsets(family_metadata,
production_metadata):
"""Check METADATA.pb includes production subsets."""
prod_families_metadata = {i['family']: i for i in production_metadata["familyMetadataList"]}
prod_family_metadata = prod_families_metadata[family_metadata.name]
prod_subsets = set(prod_family_metadata["subsets"])
local_subsets = set(family_metadata.subsets)
missing_subsets = prod_subsets - local_subsets
if len(missing_subsets) > 0:
yield FAIL,\
Message("missing-subsets",
f"The following subsets are missing [{', '.join(sorted(missing_subsets))}]")
else:
yield PASS, "No missing subsets"
@check(
id = 'com.google.fonts/check/metadata/copyright',
conditions = ['family_metadata'],
proposal = 'legacy:check/088'
)
def com_google_fonts_check_metadata_copyright(family_metadata):
"""METADATA.pb: Copyright notice is the same in all fonts?"""
copyright = None
fail = False
for f in family_metadata.fonts:
if copyright and f.copyright != copyright:
fail = True
copyright = f.copyright
if fail:
yield FAIL,\
Message("inconsistency",
"METADATA.pb: Copyright field value"
" is inconsistent across family")
else:
yield PASS, "Copyright is consistent across family"
@check(
id = 'com.google.fonts/check/metadata/familyname',
conditions = ['family_metadata'],
proposal = 'legacy:check/089'
)
def com_google_fonts_check_metadata_familyname(family_metadata):
"""Check that METADATA.pb family values are all the same."""
name = ""
fail = False
for f in family_metadata.fonts:
if name and f.name != name:
fail = True
name = f.name
if fail:
yield FAIL,\
Message("inconsistency",
'METADATA.pb: Family name is not the same'
' in all metadata "fonts" items.')
else:
yield PASS, ('METADATA.pb: Family name is the same'
' in all metadata "fonts" items.')
@check(
id = 'com.google.fonts/check/metadata/has_regular',
conditions = ['family_metadata'],
proposal = 'legacy:check/090'
)
def com_google_fonts_check_metadata_has_regular(family_metadata):
"""METADATA.pb: According to Google Fonts standards,
families should have a Regular style.
"""
from .googlefonts_conditions import has_regular_style
if has_regular_style(family_metadata):
yield PASS, "Family has a Regular style."
else:
yield FAIL,\
Message("lacks-regular",
"This family lacks a Regular"
" (style: normal and weight: 400)"
" as required by Google Fonts standards."
" If family consists of a single-weight non-Regular style only,"
" consider the Google Fonts specs for this case:"
" https://github.com/googlefonts/gf-docs/tree/main/Spec#single-weight-families")
@check(
id = 'com.google.fonts/check/metadata/regular_is_400',
conditions = ['family_metadata',
'has_regular_style'],
proposal = 'legacy:check/091'
)
def com_google_fonts_check_metadata_regular_is_400(family_metadata):
"""METADATA.pb: Regular should be 400."""
badfonts = []
for f in family_metadata.fonts:
if f.full_name.endswith("Regular") and f.weight != 400:
badfonts.append(f"{f.filename} (weight: {f.weight})")
if len(badfonts) > 0:
yield FAIL,\
Message("not-400",
f'METADATA.pb: Regular font weight must be 400.'
f' Please fix these: {", ".join(badfonts)}')
else:
yield PASS, "Regular has weight = 400."
@check(
id = 'com.google.fonts/check/metadata/nameid/family_name',
conditions = ['font_metadata'],
proposal = 'legacy:check/092'
)
def com_google_fonts_check_metadata_nameid_family_name(ttFont, font_metadata):
"""Checks METADATA.pb font.name field matches
family name declared on the name table.
"""
from fontbakery.utils import get_name_entry_strings
familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME)
if not familynames:
familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
if len(familynames) == 0:
yield FAIL,\
Message("missing",
(f"This font lacks a FONT_FAMILY_NAME entry"
f" (nameID = {NameID.FONT_FAMILY_NAME})"
f" in the name table."))
else:
if font_metadata.name not in familynames:
yield FAIL,\
Message("mismatch",
(f'Unmatched family name in font:'
f' TTF has "{familynames[0]}" while METADATA.pb'
f' has "{font_metadata.name}"'))
else:
yield PASS, (f'Family name "{font_metadata.name}" is identical'
f' in METADATA.pb and on the TTF file.')
@check(
id = 'com.google.fonts/check/metadata/nameid/post_script_name',
conditions = ['font_metadata'],
proposal = 'legacy:093'
)
def com_google_fonts_check_metadata_nameid_post_script_name(ttFont, font_metadata):
"""Checks METADATA.pb font.post_script_name matches
postscript name declared on the name table.
"""
failed = False
from fontbakery.utils import get_name_entry_strings
postscript_names = get_name_entry_strings(ttFont, NameID.POSTSCRIPT_NAME)
if len(postscript_names) == 0:
failed = True
yield FAIL,\
Message("missing",
(f"This font lacks a POSTSCRIPT_NAME entry"
f" (nameID = {NameID.POSTSCRIPT_NAME})"
f" in the name table."))
else:
for psname in postscript_names:
if psname != font_metadata.post_script_name:
failed = True
yield FAIL,\
Message("mismatch",
(f'Unmatched postscript name in font:'
f' TTF has "{psname}" while METADATA.pb has'
f' "{font_metadata.post_script_name}".'))
if not failed:
yield PASS, (f'Postscript name "{font_metadata.post_script_name}"'
f' is identical in METADATA.pb and on the TTF file.')
@check(
id = 'com.google.fonts/check/metadata/nameid/full_name',
conditions = ['font_metadata'],
proposal = 'legacy:check/094'
)
def com_google_fonts_check_metadata_nameid_full_name(ttFont, font_metadata):
"""METADATA.pb font.full_name value matches
fullname declared on the name table?
"""
from fontbakery.utils import get_name_entry_strings
full_fontnames = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)
if len(full_fontnames) == 0:
yield FAIL,\
Message("lacks-entry",
(f"This font lacks a FULL_FONT_NAME entry"
f" (nameID = {NameID.FULL_FONT_NAME})"
f" in the name table."))
else:
for full_fontname in full_fontnames:
if full_fontname != font_metadata.full_name:
yield FAIL,\
Message("mismatch",
(f'Unmatched fullname in font:'
f' TTF has "{full_fontname}" while METADATA.pb'
f' has "{font_metadata.full_name}".'))
else:
yield PASS, (f'Font fullname "{full_fontname}" is identical'
f' in METADATA.pb and on the TTF file.')
@check(
id = 'com.google.fonts/check/metadata/nameid/font_name',
conditions = ['font_metadata',
'style'],
proposal = 'legacy:check/095'
)
def com_google_fonts_check_metadata_nameid_font_name(ttFont, style, font_metadata):
"""METADATA.pb font.name value should be same as
the family name declared on the name table.
"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
font_familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
nameid = NameID.FONT_FAMILY_NAME
else:
font_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME)
nameid = NameID.TYPOGRAPHIC_FAMILY_NAME
if len(font_familynames) == 0:
yield FAIL,\
Message("lacks-entry",
f"This font lacks a {NameID(nameid).name} entry"
f" (nameID = {nameid}) in the name table.")
else:
for font_familyname in font_familynames:
if font_familyname != font_metadata.name:
yield FAIL,\
Message("mismatch",
f'Unmatched familyname in font:'
f' TTF has familyname = "{font_familyname}" while'
f' METADATA.pb has font.name = "{font_metadata.name}".')
else:
yield PASS, (f'OK: Family name "{font_metadata.name}" is identical'
f' in METADATA.pb and on the TTF file.')
@check(
id = 'com.google.fonts/check/metadata/match_fullname_postscript',
conditions = ['font_metadata'],
proposal = 'legacy:check/096'
)
def com_google_fonts_check_metadata_match_fullname_postscript(font_metadata):
"""METADATA.pb font.full_name and font.post_script_name
fields have equivalent values ?
"""
import re
regex = re.compile(r"\W")
post_script_name = regex.sub("", font_metadata.post_script_name)
fullname = regex.sub("", font_metadata.full_name)
if fullname != post_script_name:
yield FAIL,\
Message("mismatch",
f'METADATA.pb font full_name = "{font_metadata.full_name}"'
f' does not match'
f' post_script_name = "{font_metadata.post_script_name}"')
else:
yield PASS, ('METADATA.pb font fields "full_name" and'
' "post_script_name" have equivalent values.')
@check(
id = 'com.google.fonts/check/metadata/match_filename_postscript',
conditions = ['font_metadata',
'not is_variable_font'],
# FIXME: We'll want to review this once
# naming rules for varfonts are settled.
proposal = 'legacy:check/097'
)
def com_google_fonts_check_metadata_match_filename_postscript(font_metadata):
"""METADATA.pb font.filename and font.post_script_name
fields have equivalent values?
"""
post_script_name = font_metadata.post_script_name
filename = os.path.splitext(font_metadata.filename)[0]
if filename != post_script_name:
yield FAIL,\
Message("mismatch",
f'METADATA.pb font filename = "{font_metadata.filename}"'
f' does not match'
f' post_script_name="{font_metadata.post_script_name}".')
else:
yield PASS, ('METADATA.pb font fields "filename" and'
' "post_script_name" have equivalent values.')
@check(
id = 'com.google.fonts/check/metadata/valid_name_values',
conditions = ['style',
'font_metadata'],
proposal = 'legacy:check/098'
)
def com_google_fonts_check_metadata_valid_name_values(style,
font_metadata,
font_familynames,
typographic_familynames):
"""METADATA.pb font.name field contains font name in right format?"""
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
familynames = font_familynames
else:
familynames = typographic_familynames
failed = False
for font_familyname in familynames:
if font_familyname not in font_metadata.name:
failed = True
yield FAIL,\
Message("mismatch",
f'METADATA.pb font.name field ("{font_metadata.name}")'
f' does not match'
f' correct font name format ("{font_familyname}").')
if not failed:
yield PASS, ("METADATA.pb font.name | |
<filename>k1lib/selector.py
# AUTOGENERATED FILE! PLEASE DON'T EDIT
"""
This module is for selecting a subnetwork using CSS so that you can do special
things to them. Checkout the tutorial section for a walkthrough. This is exposed
automatically with::
from k1lib.imports import *
selector.select # exposed
"""
from torch import nn; import k1lib, re, torch
from typing import List, Tuple, Dict, Union, Any, Iterator, Callable
from contextlib import contextmanager; from functools import partial
__all__ = ["ModuleSelector", "preprocess", "select"]
def preprocess(selectors:str, defaultProp="*") -> List[str]:
r"""Removes all quirkly features allowed by the css
language, and outputs nice lines. Example::
# returns ["a:f", "a:g,h", "b:g,h", "t:*"]
selector.preprocess("a:f; a, b: g,h; t")
:param selectors: single css selector string. Statements separated
by "\\n" or ";"
:param defaultProp: default property, if statement doesn't have one"""
# filtering unwanted characters and quirky spaces
lines = [e for l in selectors.split("\n") for e in l.split(";")]
selectors = [re.sub("(^\s+)|(\s+$)", "", re.sub("\s\s+", " ", line)).replace(" >", ">").replace("> ", ">").replace(" :", ":").replace(": ", ":").replace(" ,", ",").replace(", ", ",").replace(";", "\n").replace(" \n", "\n").replace("\n ", "\n") for line in lines if line != ""]
# adding "*" to all selectors with no props specified
selectors = [selector if ":" in selector else f"{selector}:{defaultProp}" for selector in selectors]
# expanding comma-delimited selectors
return [f"{segment}:{selector.split(':')[1]}" for selector in selectors for segment in selector.split(":")[0].split(",")]
def _getParts(s:str): return [a for elem in s.split(":")[0].split(">") if elem for a in elem.split(" ") if a]
def _getProps(s:str): return [elem for elem in s.split(":")[1].split(",") if elem]
_idxAuto = k1lib.AutoIncrement()
class ModuleSelector: # empty methods so that Sphinx generates the docs in order
props:List[str]
"""Properties of this :class:`ModuleSelector`"""
idx:int
"""Unique id of this :class:`ModuleSelector` in the entire script. May be useful
for module recognition"""
nn:"torch.nn.Module"
"""The associated :class:`torch.nn.Module` of this :class:`ModuleSelector`"""
def __init__(self, parent:"ModuleSelector", name:str, nn:"torch.nn.Module"):
self.parent = parent; self.name = name; self.nn = nn
self._children:Dict["ModuleSelector"] = {}
self.props:List[str] = []; self.depth:int = 0
self.directSelectors:List[str] = []
self.indirectSelectors:List[str] = []
self.displayF:Callable[["ModuleSelector"], str] = lambda mS: ', '.join(mS.props)
self.idx = _idxAuto()
def deepestDepth(self): pass
def highlight(self, prop:str):
"""Highlights the specified prop when displaying the object."""
self.displayF = lambda self: (k1lib.fmt.txt.red if prop in self else k1lib.fmt.txt.identity)(', '.join(self.props))
return self
def __call__(self, *args, **kwargs):
"""Calls the internal :class:`torch.nn.Module`"""
return self.nn(*args, **kwargs)
def __contains__(self): pass
def named_children(self): pass
def children(self): pass
def named_modules(self): pass
def modules(self): pass
def directParams(self): pass
def parse(self): pass
def apply(self): pass
def clearProps(self): pass
@property
def displayF(self):
"""Function to display each ModuleSelector's lines.
Default is just::
lambda mS: ", ".join(mS.props) """
return self._displayF
@displayF.setter
def displayF(self, f):
def applyF(self): self._displayF = f
self.apply(applyF)
def __getattr__(self, attr):
if attr.startswith("_"): raise AttributeError(attr)
if attr in self._children: return self._children[attr]
return self.directParams[attr]
def __getitem__(self, idx): return getattr(self, str(idx))
@staticmethod
def sample() -> "ModuleSelector":
"""Create a new example :class:`ModuleSelector` that has a bit of
hierarchy to them, with no css."""
return nn.Sequential(nn.Linear(3, 4), nn.Sequential(nn.Conv2d(3, 8, 3, 2), nn.ReLU(), nn.Linear(5, 6)), nn.Linear(7, 8)).select("")
def hookF(self): pass
def hookFp(self): pass
def hookB(self): pass
def freeze(self): pass
def unfreeze(self): pass
@k1lib.patch(nn.Module)
def select(model:"torch.nn.Module", css:str="*") -> "k1lib.selector.ModuleSelector":
"""Creates a new ModuleSelector, in sync with a model.
Example::
mS = selector.select(nn.Linear(3, 4), "#root:propA")
Or, you can do it the more direct way::
mS = nn.Linear(3, 4).select("#root:propA")
:param model: the :class:`torch.nn.Module` object to select from
:param css: the css selectors"""
root = ModuleSelector(None, "root", model)
root.parse(preprocess(css)); return root
@k1lib.patch(ModuleSelector, name="apply")
def _apply(self, f:Callable[[ModuleSelector], None]):
"""Applies a function to self and all child :class:`ModuleSelector`"""
f(self)
for child in self._children.values(): child.apply(f)
@k1lib.patch(ModuleSelector, name="parse")
def _parse(self, selectors:Union[List[str], str]) -> ModuleSelector:
"""Parses extra selectors. Clears all old selectors, but retain
the props. Returns self. Example::
mS = selector.ModuleSelector.sample().parse("Conv2d:propA")
# returns True
"propA" in mS[1][0]
:param selectors: can be the preprocessed list, or the unprocessed css string"""
if isinstance(selectors, str): selectors = preprocess(selectors)
self.directSelectors = []; self.indirectSelectors = []
ogSelectors = selectors
if self.parent != None:
selectors = [] + selectors + self.parent.indirectSelectors + self.parent.directSelectors
self.indirectSelectors += self.parent.indirectSelectors
self.depth = self.parent.depth + 1
for selector in selectors:
parts = _getParts(selector)
matches = parts[0] == self.nn.__class__.__name__ or parts[0] == "#" + self.name or parts[0] == "*"
if len(parts) == 1:
if matches: self.props += _getProps(selector)
else:
a = selector.find(">"); a = a if a > 0 else float("inf")
b = selector.find(" "); b = b if b > 0 else float("inf")
direct = a < b
if matches:
if direct: self.directSelectors.append(selector[a+1:])
else: self.indirectSelectors.append(selector[b+1:])
for name, mod in self.nn.named_children():
if name not in self._children:
self._children[name] = ModuleSelector(self, name, mod)
self._children[name].parse(ogSelectors)
self.props = list(set(self.props)); return self
@k1lib.patch(ModuleSelector)
def __contains__(self, prop:str=None) -> bool:
"""Whether this :class:`ModuleSelector` has a specific prop.
Example::
# returns True
"b" in nn.Linear(3, 4).select("*:b")
# returns False
"h" in nn.Linear(3, 4).select("*:b")
# returns True, "*" here means the ModuleSelector has any properties at all
"*" in nn.Linear(3, 4).select("*:b")"""
if "*" in self.props: return True
if prop in self.props: return True
if prop == "*" and len(self.props) > 0: return True
return False
@k1lib.patch(ModuleSelector)
def named_children(self, prop:str=None) -> Iterator[Tuple[str, ModuleSelector]]:
"""Get all named direct childs.
:param prop: Filter property. See also: :meth:`__contains__`"""
if prop is None: return self._children.items()
return ((k, v) for k, v in self._children.items() if prop in v)
@k1lib.patch(ModuleSelector)
def children(self, prop:str=None) -> Iterator[ModuleSelector]:
"""Get all direct childs.
:param prop: Filter property. See also: :meth:`__contains__`"""
return (x for _, x in self.named_children(prop))
@k1lib.patch(ModuleSelector, "directParams")
@property
def directParams(self) -> Dict[str, nn.Parameter]:
"""Dict params directly under this module"""
return {name: param for name, param in self.nn.named_parameters() if "." not in name}
@k1lib.patch(ModuleSelector)
def named_modules(self, prop:str=None) -> Iterator[Tuple[str, ModuleSelector]]:
"""Get all named child recursively.
Example::
modules = list(nn.Sequential(nn.Linear(3, 4), nn.ReLU()).select().named_modules())
# return 3
len(modules)
# return tuple ('0', <ModuleSelector of Linear>)
modules[1]
:param prop: Filter property. See also: :meth:`__contains__`"""
if prop != None:
yield from ((name, m) for name, m in self.named_modules() if prop in m)
return
yield self.name, self
for child in self._children.values(): yield from child.named_modules()
@k1lib.patch(ModuleSelector)
def modules(self, prop:str=None) -> Iterator[ModuleSelector]:
"""Get all child recursively.
:param prop: Filter property. See also: :meth:`__contains__`"""
for name, x in self.named_modules(prop): yield x
@k1lib.patch(ModuleSelector)
def clearProps(self) -> "ModuleSelector":
"""Clears all existing props of this and all descendants
:class:`ModuleSelector`. Example::
# returns False
"b" in nn.Linear(3, 4).select("*:b").clearProps()"""
def applyF(self): self.props = []
self.apply(applyF); return self
@k1lib.patch(ModuleSelector, name="deepestDepth")
@property
def deepestDepth(self):
"""Deepest depth of the tree. If self doesn't
have any child, then depth is 0"""
if len(self._children) == 0: return 0
return 1 + max([child.deepestDepth for child in self._children.values()])
@k1lib.patch(ModuleSelector)
def __repr__(self, intro:bool=True, header:Union[str, Tuple[str]]="", footer="", tabs:int=None):
"""
:param intro: whether to include a nice header and footer info
:param header:
str: include a header that starts where `displayF` will start
Tuple[str, str]: first one in tree, second one in displayF section
:param footer: same thing with header, but at the end
:param header: include a header that starts where `displayF` will start
:param tabs: number of tabs at the beginning. Best to leave this empty
"""
if tabs == None: tabs = 5 + self.deepestDepth
answer = "ModuleSelector:\n" if intro else ""
if header:
h1, h2 = ("", header) if isinstance(header, str) else header
answer += h1.ljust(tabs*4, " ") + h2 + "\n"
answer += f"{self.name}: {self.nn.__class__.__name__}".ljust(tabs*4, " ")
answer += self.displayF(self) + ("\n" if len(self._children) > 0 else "")
answer += k1lib.tab("\n".join([child.__repr__(tabs=tabs-1, intro=False) for name, child in self._children.items()]))
if footer:
f1, f2 = ("", footer) if isinstance(footer, str) else footer
answer += "\n" + f1.ljust(tabs*4, " ") + f2
if intro: answer += f"""\n\nCan...
- mS.deepestDepth: get deepest depth possible
- mS.nn: get the underlying nn.Module object
- mS.apply(f): apply to self and all descendants
- "HookModule" in mS: whether this module has a specified prop
- mS.highlight(prop): highlights all modules with specified prop
- mS.parse([..., ...]): parses extra css
- mS.directParams: get Dict[str, nn.Parameter] that are directly under this module"""
return answer
def _strTensor(t): return "None" if t is None else f"{t.shape}"
def strTensorTuple(ts):
if len(ts) > 1:
shapes = "\n".join(f"- {_strTensor(t)}" for t in ts)
return f"tensors ({len(ts)} total) shapes:\n{shapes}"
else:
return f"tensor shape: {_strTensor(ts[0])}"
@k1lib.patch(ModuleSelector)
@contextmanager
def hookF(self, f:Callable[[ModuleSelector, "torch.nn.Module", Tuple[torch.Tensor], torch.Tensor], None]=None, prop:str="*"):
"""Context manager for applying forward hooks.
Example::
def f(mS, m, i, o):
print(i, o)
m = nn.Linear(3, 4)
with m.select().hookF(f):
m(torch.randn(2, 3))
:param f: hook callback, should accept :class:`ModuleSelector`, inputs and output
:param prop: filter property of module to hook | |
B : B's namespace=##local, foo,
bar, ##targetNamespace, R's namespace=##targetNamespace, ##local
"""
assert_bindings(
schema="msData/particles/particlesOb056.xsd",
instance="msData/particles/particlesOb056.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob055_particles_ob055_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##local, foo,
bar, ##targetNamespace, R's namespace=##local, foo, bar,
##targetNamespace
"""
assert_bindings(
schema="msData/particles/particlesOb055.xsd",
instance="msData/particles/particlesOb055.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob054_particles_ob054_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##local, foo,
bar, ##targetNamespace, R's namespace=foo bar' Added schemaDoc for
xsi:schemaLoc'd additional schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb054.xsd",
instance="msData/particles/particlesOb054.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob053_particles_ob053_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##local, foo,
bar, ##targetNamespace, R's namespace=##targetNamespace
"""
assert_bindings(
schema="msData/particles/particlesOb053.xsd",
instance="msData/particles/particlesOb053.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob052_particles_ob052_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##local, foo,
bar, ##targetNamespace, R's namespace=##local Added schemaDoc for
xsi:schemaLoc'd additional schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb052.xsd",
instance="msData/particles/particlesOb052.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob048_particles_ob048_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=foo bar', R's
namespace=bar Added schemaDoc for xsi:schemaLoc'd additional schema,
per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb048.xsd",
instance="msData/particles/particlesOb048.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob047_particles_ob047_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=foo bar', R's
namespace=foo Added schemaDoc for xsi:schemaLoc'd additional schema,
per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb047.xsd",
instance="msData/particles/particlesOb047.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob042_particles_ob042_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=foo bar', R's
namespace=foo bar' Added schemaDoc for xsi:schemaLoc'd additional
schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb042.xsd",
instance="msData/particles/particlesOb042.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob032_particles_ob032_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's
namespace=##targetNamespace, R's namespace=##targetNamespace
"""
assert_bindings(
schema="msData/particles/particlesOb032.xsd",
instance="msData/particles/particlesOb032.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob022_particles_ob022_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##local, R's
namespace=##local Added schemaDoc for xsi:schemaLoc'd additional
schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb022.xsd",
instance="msData/particles/particlesOb022.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob015_particles_ob015_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##other, R's
namespace=foo bar' Added schemaDoc for xsi:schemaLoc'd additional
schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb015.xsd",
instance="msData/particles/particlesOb015.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob012_particles_ob012_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##other, R's
namespace=##other Added schemaDoc for xsi:schemaLoc'd additional
schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb012.xsd",
instance="msData/particles/particlesOb012.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob007_particles_ob007_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##any, R's
namespace=##local, foo, bar, ##targetNamespace Added schemaDoc for
xsi:schemaLoc'd additional schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb007.xsd",
instance="msData/particles/particlesOb007.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob006_particles_ob006_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##any, R's
namespace=foo bar' Added schemaDoc for xsi:schemaLoc'd additional
schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb006.xsd",
instance="msData/particles/particlesOb006.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob005_particles_ob005_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##any, R's
namespace=##targetNamespace
"""
assert_bindings(
schema="msData/particles/particlesOb005.xsd",
instance="msData/particles/particlesOb005.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ob003_particles_ob003_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's namespace=##any, R's
namespace=##other Added schemaDoc for xsi:schemaLoc'd additional
schema, per WG decision
"""
assert_bindings(
schema="msData/particles/particlesOb003.xsd",
instance="msData/particles/particlesOb003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_oa014_particles_oa014_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's minOccurs=2, B's
maxOccurs=4, R's minOccurs=3, R's maxOccurs=3
"""
assert_bindings(
schema="msData/particles/particlesOa014.xsd",
instance="msData/particles/particlesOa014.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_oa013_particles_oa013_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's minOccurs=2, B's
maxOccurs=4, R's minOccurs=2, R's maxOccurs=3
"""
assert_bindings(
schema="msData/particles/particlesOa013.xsd",
instance="msData/particles/particlesOa013.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_oa012_particles_oa012_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's minOccurs=2, B's
maxOccurs=4, R's minOccurs=3, R's maxOccurs=4
"""
assert_bindings(
schema="msData/particles/particlesOa012.xsd",
instance="msData/particles/particlesOa012.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_oa011_particles_oa011_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's minOccurs=2, B's
maxOccurs=4, R's minOccurs=2, R's maxOccurs=4
"""
assert_bindings(
schema="msData/particles/particlesOa011.xsd",
instance="msData/particles/particlesOa011.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_oa006_particles_oa006_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's minOccurs=absent, B's
maxOccurs=absent, R's minOccurs=absent, R's maxOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesOa006.xsd",
instance="msData/particles/particlesOa006.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_oa003_particles_oa003_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's minOccurs=absent, B's
maxOccurs=absent, R's minOccurs=1, R's maxOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesOa003.xsd",
instance="msData/particles/particlesOa003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_oa001_particles_oa001_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Any:Any- NSSubset) (any)
R drived by restriction from (any) B : B's minOccurs=absent, B's
maxOccurs=absent, R's minOccurs=absent, R's maxOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesOa001.xsd",
instance="msData/particles/particlesOa001.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_m035_particles_m035_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Sequence -
RecurseAsIfGroup) element R drived by restriction from (Sequence) B :
B's parent is choice, B=(a*, b+), R=(b+)
"""
assert_bindings(
schema="msData/particles/particlesM035.xsd",
instance="msData/particles/particlesM035.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
| |
path. For example, domainName queries for Dossier will use a path <hostname>/host.
While ipv4 queries for Dossier will use a path of <hostname>/ip.
:param stix_object: STIX object
:type stix_object: str
:param stix_field: STIX field
:type stix_field: str
:param final_expression: native query expression
:type final_expression: str
:param value: value (eg 127.0.0.1)
:type value: int/str
:return: threat type (host, url, email, ip)
:rtype: str
"""
# NOTE: for the Dossier and TIDE apis, threat_type must be provided. Using the provided query, determine the appropriate type.
stix_map = {
'dossierData': [
{
'stix_object': ['domain-name', 'x-infoblox-dossier-event-result-pdns'],
'stix_field': ['value', 'hostname_ref.value'],
'threat_type': 'host'
},
{
'stix_object': ['ipv4-addr', 'ipv6-addr', 'x-infoblox-dossier-event-result-pdns'],
'stix_field': ['value', 'ip_ref.value'],
'threat_type': 'ip'
}
],
'tideDbData': [
{
'stix_object': ['domain-name', 'x-infoblox-threat'],
'stix_field': ['value', 'host_name', 'domain_ref.value'],
'threat_type': 'host'
},
{
'stix_object': ['ipv4-addr', 'ipv6-addr', 'x-infoblox-threat'],
'stix_field': ['value', 'ip_ref.value'],
'threat_type': 'ip'
},
{
'stix_object': ['x-infoblox-threat'],
'stix_field': ['url'],
'threat_type': 'url'
},
{
'stix_object': ['email-addr', 'x-infoblox-threat'],
'stix_field': ['value', 'email_ref.value'],
'threat_type': 'email'
}
]
}
if self.dialect not in stix_map:
return
for mapping in stix_map[self.dialect]:
threat_type = None
if stix_object in mapping['stix_object'] and stix_field in mapping['stix_field']:
threat_type = mapping['threat_type']
if stix_object == 'x-infoblox-threat' and stix_field == 'threat_type':
threat_type = value.lower()
if threat_type:
return threat_type
return
def _merge_queries_in_expression(self, expression_01, expression_02, operator):
"""
Merge two query expressions into a single expression.
NOTE: For complex queries, this parser class recursively traverses the ANTLR expressions until it reaches a simple comparison
operations. After translating the expression, a list of queries is generated. This method helps stitch the potentially multiple
lists of queries into a single list of native queries to transmit. During the merge operation, the threat type of the individual
components are compared. This module currently does not support queries that translate to multiple different threat types (eg ip and host).
:param expression_01: LH query expression
:type expression_01: list
:param expression_02: RH query expression
:type expression_02: list
:param operator: comparison operator for the two expressions, determines how they should be joined (AND or OR)
:type operator: str
:return: query expression
:rtype: list
:throw: RuntimeError if multiple different threat type queries provided
:throw: assertError if both expressions have more than one query, in that situation the queries are too large.
"""
assert not (len(expression_01) > 1 and len(expression_02) > 1), "Failed to merge queries, expressions too complex"
expression_small = expression_01 if len(expression_01) == 1 else expression_02
expression_large = expression_02 if expression_small == expression_01 else expression_01
# determine threat_type from individual queries
threat_type_array = [i['threatType'] for i in (expression_01 + expression_02) if i['threatType']]
threat_type_set = set(threat_type_array)
if len(threat_type_set) > 1:
raise RuntimeError("Conflicting threat_type found, {}".format(sorted(threat_type_set)))
for query in expression_large:
merging_expression = expression_small[0]
query['query'] = operator.join([merging_expression['query'], query['query']])
query['threatType'] = merging_expression['threatType'] if merging_expression['threatType'] else query['threatType']
return expression_large
def _parse_expression(self, expression, qualifier=None, intersection_fields=None) -> str:
"""
Recursively called method for parsing an ANTLR expression. For complex expressions, like ObservationExpression, LH & RH expressions
are separated and processed independently before being merged back together.
Merge two query expressions into a single expression.
NOTE: For complex queries, this parser class recursively traverses the ANTLR expressions until it reaches a simple comparison
operations. After translating the expression, a list of queries is generated. This method helps stitch the potentially multiple
lists of queries into a single list of native queries to transmit. During the merge operation, the threat type of the individual
components are compared. This module currently does not support queries that translate to multiple different threat types (eg ip and host).
:param expression: STIX expression to parse
:type expression: stix_shifter_utils.stix_translation.src.patterns.pattern_object.*
:param qualifier: STIX qualifier, propagates START/STOP qualifier (on complex expressions) to the necessary basic comparison expression
:type qualifier: stix_shifter_utils.stix_translation.src.patterns.pattern_objects import StartStopQualifier
:param intersection_fields: map maintaining already processed field, used by _calculate_intersection to determine multiple criteria
of a single field.
:type intersection_fields: map
:return: native query expression
:rtype: str
:throw: RuntimeError if unknown expression type provided
"""
if isinstance(expression, ComparisonExpression):
# Resolve STIX Object Path to a field in the target Data Model
stix_object, stix_field = expression.object_path.split(':')
# Multiple data source fields may map to the same STIX Object
mapped_fields_array = self.dmm.map_field(stix_object, stix_field)
if intersection_fields is not None:
self._calculate_intersection(mapped_fields_array, stix_field, intersection_fields)
else:
assigned_fields = dict()
self._calculate_intersection(mapped_fields_array, stix_field, assigned_fields)
# Resolve the comparison symbol to use in the query string (usually just ':')
comparator = self._lookup_comparison_operator(expression.comparator)
# Some values are formatted differently based on how they're being compared
if expression.comparator == ComparisonComparators.Like:
value = self._format_like(expression.value)
else:
value = self._format_equality(expression.value)
final_expression = self._parse_mapped_fields(expression, value, comparator, stix_field, mapped_fields_array)
threatType = self._set_threat_type(stix_object, stix_field, final_expression, value)
return [{'query': final_expression, 'threatType': threatType, 'startStopTime': qualifier}]
elif isinstance(expression, CombinedComparisonExpression):
operator = self._lookup_comparison_operator(expression.operator)
# NOTE: APIs do not support duplicate criteria (example domain-name=d1.com AND domain-name=d2.com). As a workaround, the expression
# will be split into multiple independent queries.
exp1_fields = dict()
use_two_queries = True
try:
# Process LHS of expression, intersections here is an invalid query, stop processing.
expression_01 = self._parse_expression(expression.expr1, qualifier, exp1_fields)
except DuplicateFieldException as error:
logger.error("%s", error)
raise NotImplementedError("{}".format(error))
try:
# Process RHS of expression, if intersections are found re-attempt parsing but as two separate queries.
expression_02 = self._parse_expression(expression.expr2, qualifier, exp1_fields)
except DuplicateFieldException as error:
try:
exp2_fields = dict()
expression_02 = self._parse_expression(expression.expr2, qualifier, exp2_fields)
use_two_queries = False
except DuplicateFieldException as error:
logger.error("%s", error)
raise NotImplementedError("{}".format(error))
assert expression_01 and expression_02, "Failed to parse one side of the expression"
# NOTE: Merging the two list of queries this would be for expressions with `OR` or `AND` (with duplicate criteria). For
# expressions with `AND` (but with different criteria), then the list of queries on one side of the expression will be concatenated together.
result = expression_01 + expression_02
if expression.operator == ComparisonExpressionOperators.And and use_two_queries:
result = self._merge_queries_in_expression(expression_01, expression_02, operator)
return result
elif isinstance(expression, ObservationExpression):
result = self._parse_expression(expression.comparison_expression, qualifier, intersection_fields)
return result
elif isinstance(expression, StartStopQualifier) and hasattr(expression, 'observation_expression'):
return self._parse_expression(getattr(expression, 'observation_expression'), expression.qualifier, intersection_fields)
elif isinstance(expression, CombinedObservationExpression):
exp1_fields = dict()
exp2_fields = dict()
expression_01 = self._parse_expression(expression.expr1, qualifier, exp1_fields)
expression_02 = self._parse_expression(expression.expr2, qualifier, exp2_fields)
result = expression_01 + expression_02
return result
elif isinstance(expression, Pattern):
result = self._parse_expression(expression.expression)
return result
else:
raise RuntimeError("Unknown Recursion Case for expression={}, type(expression)={}".format(
expression, type(expression)))
def parse_expression(self, pattern: Pattern):
"""
Entry point for parsing an ANTLR pattern.
:param pattern: STIX expression to parse
:type pattern: Pattern
:return: native query expression
:rtype: str
"""
return self._parse_expression(pattern)
def _test_or_add_milliseconds(timestamp) -> str:
"""
Validates and reformats (if necessary) timestamp from the START/STOP qualifier
:param timestamp: timestamp string (eg 1234-56-78T00:00:00.123Z)
:type timestamp: str
:return: sanatized timestamp string
:rtype: str
"""
# remove single quotes around timestamp
timestamp = re.sub("'", "", timestamp)
# check for 3-decimal milliseconds
if not bool(re.search(TIMESTAMP_MILLISECONDS, timestamp)):
timestamp = re.sub('Z$', '.000Z', timestamp)
return timestamp
def _test_start_stop_format(query_string) -> bool:
"""
Checks if query_string contains START/STOP qualifier.
:param query_string: query string
:type query_string: str
:return: True if provided query_string contains START/STOP qualifier.
:rtype: bool
"""
# Matches STARTt'1234-56-78T00:00:00.123Z'STOPt'1234-56-78T00:00:00.123Z'
# or START 1234567890123 STOP 1234567890123
return bool(re.search(START_STOP_STIX_QUALIFIER, query_string))
def _get_parts_start_stop(query):
"""
Checks if query_string contains START/STOP qualifier.
:param query_string: query string
:type query_string: str
:return: True if provided query_string contains START/STOP qualifier.
:rtype: bool
"""
# Remove leading 't' before timestamps
query = re.sub("(?<=START)t|(?<=STOP)t", "", query)
# Split individual query to isolate timestamps
query_parts = re.split("(START)|(STOP)", query)
# Remove None array entries
query_parts = list(map(lambda x: x.strip(), list(filter(None, query_parts))))
return query_parts
def _format_query_with_timestamp(dialect:str, query: str, time_range, start_stop_time) -> str:
"""
Based on dialect, format time range for the query.
NOTE:
- If START/STOP qualifier not provided, default configuration `time_range` is used.
- If START/STOP qualifier is provided, the timestamps are parsed out and formatted.
- Each dialect has a different way to represent a time range in the native query (DnsEvent uses t0/t1 while TideDB uses from_date/to_date)
:param dialect: dialect for the query (eg dnsEventData, tideDbData, etc)
:type dialect: str
:param query: original query string
:type query: str
:param time_range: default time range to query over (in minutes).
:type time_range: int
:param start_stop_time: qualifier start/stop time (eg | |
sol, order=2, solve_for_func=False) == (True, 0)
assert dsolve(eqn, f(x)) in (sol, sols)
assert dsolve(eqn, f(x), hint="nth_order_reducible") in (sol, sols)
eqn = f(x).diff(x, 4) - 2 * f(x).diff(x, 2)
sol = Eq(f(x), C1 + C2 * x + C3 * exp(x * sqrt(2)) + C4 * exp(-x * sqrt(2)))
sols = constant_renumber(sol)
assert checkodesol(eqn, sol, order=2, solve_for_func=False) == (True, 0)
assert dsolve(eqn, f(x)) in (sol, sols)
assert dsolve(eqn, f(x), hint="nth_order_reducible") in (sol, sols)
eqn = f(x).diff(x, 4) + 4 * f(x).diff(x, 2)
sol = Eq(f(x), C1 + C2 * sin(2 * x) + C3 * cos(2 * x) + C4 * x)
sols = constant_renumber(sol)
assert checkodesol(eqn, sol, order=2, solve_for_func=False) == (True, 0)
assert dsolve(eqn, f(x)) in (sol, sols)
assert dsolve(eqn, f(x), hint="nth_order_reducible") in (sol, sols)
eqn = f(x).diff(x, 5) + 2 * f(x).diff(x, 3) + f(x).diff(x)
# These are equivalent:
sol1 = Eq(f(x), C1 + (C2 + C3 * x) * sin(x) + (C4 + C5 * x) * cos(x))
sol2 = Eq(
f(x),
C1
+ C2 * (x * sin(x) + cos(x))
+ C3 * (-x * cos(x) + sin(x))
+ C4 * sin(x)
+ C5 * cos(x),
)
sol1s = constant_renumber(sol1)
sol2s = constant_renumber(sol2)
assert checkodesol(eqn, sol1, order=2, solve_for_func=False) == (True, 0)
assert checkodesol(eqn, sol2, order=2, solve_for_func=False) == (True, 0)
assert dsolve(eqn, f(x)) in (sol1, sol1s)
assert dsolve(eqn, f(x), hint="nth_order_reducible") in (sol2, sol2s)
# In this case the reduced ODE has two distinct solutions
eqn = f(x).diff(x, 2) - f(x).diff(x) ** 3
sol = [
Eq(f(x), C2 - sqrt(2) * I * (C1 + x) * sqrt(1 / (C1 + x))),
Eq(f(x), C2 + sqrt(2) * I * (C1 + x) * sqrt(1 / (C1 + x))),
]
sols = constant_renumber(sol)
assert checkodesol(eqn, sol, order=2, solve_for_func=False) == [
(True, 0),
(True, 0),
]
assert dsolve(eqn, f(x)) in (sol, sols)
assert dsolve(eqn, f(x), hint="nth_order_reducible") in (sol, sols)
def test_nth_algebraic():
eqn = Eq(Derivative(f(x), x), Derivative(g(x), x))
sol = Eq(f(x), C1 + g(x))
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint="nth_algebraic"), dsolve(
eqn, f(x), hint="nth_algebraic"
)
assert sol == dsolve(eqn, f(x))
eqn = (diff(f(x)) - x) * (diff(f(x)) + x)
sol = [Eq(f(x), C1 - x ** 2 / 2), Eq(f(x), C1 + x ** 2 / 2)]
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert set(sol) == set(dsolve(eqn, f(x), hint="nth_algebraic"))
assert set(sol) == set(dsolve(eqn, f(x)))
eqn = (1 - sin(f(x))) * f(x).diff(x)
sol = Eq(f(x), C1)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint="nth_algebraic")
assert sol == dsolve(eqn, f(x))
M, m, r, t = symbols("M m r t")
phi = Function("phi")
eqn = Eq(
-M * phi(t).diff(t),
Rational(3, 2) * m * r ** 2 * phi(t).diff(t) * phi(t).diff(t, t),
)
solns = [Eq(phi(t), C1), Eq(phi(t), C1 + C2 * t - M * t ** 2 / (3 * m * r ** 2))]
assert checkodesol(eqn, solns[0], order=2, solve_for_func=False)[0]
assert checkodesol(eqn, solns[1], order=2, solve_for_func=False)[0]
assert set(solns) == set(dsolve(eqn, phi(t), hint="nth_algebraic"))
assert set(solns) == set(dsolve(eqn, phi(t)))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x)
sol = Eq(f(x), C1 + C2 * x)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint="nth_algebraic")
assert sol == dsolve(eqn, f(x))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x) * (f(x) - 1)
sol = Eq(f(x), C1 + C2 * x)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint="nth_algebraic")
assert sol == dsolve(eqn, f(x))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x) * (f(x) - 1) * (f(x).diff(x) - x)
solns = [Eq(f(x), C1 + x ** 2 / 2), Eq(f(x), C1 + C2 * x)]
assert checkodesol(eqn, solns[0], order=2, solve_for_func=False)[0]
assert checkodesol(eqn, solns[1], order=2, solve_for_func=False)[0]
assert set(solns) == set(dsolve(eqn, f(x), hint="nth_algebraic"))
assert set(solns) == set(dsolve(eqn, f(x)))
def test_nth_algebraic_issue15999():
eqn = f(x).diff(x) - C1
sol = Eq(f(x), C1 * x + C2) # Correct solution
assert checkodesol(eqn, sol, order=1, solve_for_func=False) == (True, 0)
assert dsolve(eqn, f(x), hint="nth_algebraic") == sol
assert dsolve(eqn, f(x)) == sol
def test_nth_algebraic_redundant_solutions():
# This one has a redundant solution that should be removed
eqn = f(x) * f(x).diff(x)
soln = Eq(f(x), C1)
assert checkodesol(eqn, soln, order=1, solve_for_func=False)[0]
assert soln == dsolve(eqn, f(x), hint="nth_algebraic")
assert soln == dsolve(eqn, f(x))
# This has two integral solutions and no algebraic solutions
eqn = (diff(f(x)) - x) * (diff(f(x)) + x)
sol = [Eq(f(x), C1 - x ** 2 / 2), Eq(f(x), C1 + x ** 2 / 2)]
assert all(c[0] for c in checkodesol(eqn, sol, order=1, solve_for_func=False))
assert set(sol) == set(dsolve(eqn, f(x), hint="nth_algebraic"))
assert set(sol) == set(dsolve(eqn, f(x)))
eqn = f(x) + f(x) * f(x).diff(x)
solns = [Eq(f(x), 0), Eq(f(x), C1 - x)]
assert all(c[0] for c in checkodesol(eqn, solns, order=1, solve_for_func=False))
assert set(solns) == set(dsolve(eqn, f(x)))
solns = [Eq(f(x), exp(x)), Eq(f(x), C1 * exp(C2 * x))]
solns_final = _remove_redundant_solutions(eqn, solns, 2, x)
assert solns_final == [Eq(f(x), C1 * exp(C2 * x))]
# This one needs a substitution f' = g.
eqn = -exp(x) + (x * Derivative(f(x), (x, 2)) + Derivative(f(x), x)) / x
sol = Eq(f(x), C1 + C2 * log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x))
#
# These tests can be combined with the above test if they get fixed
# so that dsolve actually works in all these cases.
#
# prep = True breaks this
def test_nth_algebraic_noprep1():
eqn = Derivative(x * f(x), x, x, x)
sol = Eq(f(x), (C1 + C2 * x + C3 * x ** 2) / x)
assert checkodesol(eqn, sol, order=3, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=False, hint="nth_algebraic")
@XFAIL
def test_nth_algebraic_prep1():
eqn = Derivative(x * f(x), x, x, x)
sol = Eq(f(x), (C1 + C2 * x + C3 * x ** 2) / x)
assert checkodesol(eqn, sol, order=3, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=True, hint="nth_algebraic")
assert sol == dsolve(eqn, f(x))
# prep = True breaks this
def test_nth_algebraic_noprep2():
eqn = Eq(Derivative(x * Derivative(f(x), x), x) / x, exp(x))
sol = Eq(f(x), C1 + C2 * log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=False, hint="nth_algebraic")
@XFAIL
def test_nth_algebraic_prep2():
eqn = Eq(Derivative(x * Derivative(f(x), x), x) / x, exp(x))
sol = Eq(f(x), C1 + C2 * log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=True, hint="nth_algebraic")
assert sol == dsolve(eqn, f(x))
# Needs to be a way to know how to combine derivatives in the expression
def test_factoring_ode():
from sympy import Mul
eqn = Derivative(x * f(x), x, x, x) + Derivative(f(x), x, x, x)
# 2-arg Mul!
soln = Eq(f(x), C1 + C2 * x + C3 / Mul(2, (x + 1), evaluate=False))
assert checkodesol(eqn, soln, order=2, solve_for_func=False)[0]
assert soln == dsolve(eqn, f(x))
def test_issue_11542():
m = 96
g = 9.8
k = 0.2
f1 = g * m
t = Symbol("t")
v = Function("v")
v_equation = dsolve(f1 - k * (v(t) ** 2) - m * Derivative(v(t)), 0)
assert (
str(v_equation) == "Eq(v(t), -68.585712797929/tanh(C1 - 0.142886901662352*t))"
)
def test_issue_15913():
eq = -C1 / x - 2 * x * f(x) - f(x) + Derivative(f(x), x)
sol = C2 * exp(x ** 2 + x) + exp(x ** 2 + x) * Integral(
C1 * exp(-(x ** 2) - x) / x, x
)
assert checkodesol(eq, sol) == (True, 0)
sol = C1 + C2 * exp(-x * y)
eq = Derivative(y * f(x), x) + f(x).diff(x, 2)
assert checkodesol(eq, sol, f(x)) == (True, 0)
def test_issue_16146():
raises(ValueError, lambda: dsolve([f(x).diff(x), g(x).diff(x)], [f(x), g(x), h(x)]))
raises(ValueError, lambda: dsolve([f(x).diff(x), g(x).diff(x)], [f(x)]))
def test_dsolve_remove_redundant_solutions():
eq = (f(x) - 2) * f(x).diff(x)
sol = Eq(f(x), C1)
assert dsolve(eq) == sol
eq = (f(x) - sin(x)) * (f(x).diff(x, 2))
sol = {Eq(f(x), C1 + C2 * x), Eq(f(x), sin(x))}
assert set(dsolve(eq)) == sol
eq = (f(x) ** 2 - 2 * f(x) + 1) * f(x).diff(x, 3)
sol = Eq(f(x), C1 + C2 * x + | |
groups contained inside other groups to the larger group
passed_groups = []
for idx, group in enumerate(search_groups):
try:
other = next(x for x in reversed(search_groups[:idx])
if x[0].start <= group[0].start
and x[-1].end >= group[-1].end)
for event in group:
event.link_event(other[0])
except StopIteration:
passed_groups.append(group)
return passed_groups
def calculate_shifts(src_stream, dst_stream, groups_list, normal_window, max_window, rewind_thresh):
def log_shift(state):
logging.info('{0}-{1}: shift: {2:0.10f}, diff: {3:0.10f}'
.format(format_time(state["start_time"]), format_time(state["end_time"]), state["shift"], state["diff"]))
def log_uncommitted(state, shift, left_side_shift, right_side_shift, search_offset):
logging.debug('{0}-{1}: shift: {2:0.5f} [{3:0.5f}, {4:0.5f}], search offset: {5:0.6f}'
.format(format_time(state["start_time"]), format_time(state["end_time"]),
shift, left_side_shift, right_side_shift, search_offset))
small_window = 1.5
idx = 0
committed_states = []
uncommitted_states = []
window = normal_window
while idx < len(groups_list):
search_group = groups_list[idx]
tv_audio = src_stream.get_substream(search_group[0].start, search_group[-1].end)
original_time = search_group[0].start
group_state = {"start_time": search_group[0].start, "end_time": search_group[-1].end, "shift": None, "diff": None}
last_committed_shift = committed_states[-1]["shift"] if committed_states else 0
diff = new_time = None
if not uncommitted_states:
if original_time + last_committed_shift > dst_stream.duration_seconds:
# event outside of audio range, all events past it are also guaranteed to fail
for g in groups_list[idx:]:
committed_states.append({"start_time": g[0].start, "end_time": g[-1].end, "shift": None, "diff": None})
logging.info("{0}-{1}: outside of audio range".format(format_time(g[0].start), format_time(g[-1].end)))
break
if small_window < window:
diff, new_time = dst_stream.find_substream(tv_audio, original_time + last_committed_shift, small_window)
if new_time is not None and abs_diff(new_time - original_time, last_committed_shift) <= ALLOWED_ERROR:
# fastest case - small window worked, commit the group immediately
group_state.update({"shift": new_time - original_time, "diff": diff})
committed_states.append(group_state)
log_shift(group_state)
if window != normal_window:
logging.info("Going back to window {0} from {1}".format(normal_window, window))
window = normal_window
idx += 1
continue
left_audio_half, right_audio_half = np.split(tv_audio, [len(tv_audio[0])/2], axis=1)
right_half_offset = len(left_audio_half[0]) / float(src_stream.sample_rate)
terminate = False
# searching from last committed shift
if original_time + last_committed_shift < dst_stream.duration_seconds:
diff, new_time = dst_stream.find_substream(tv_audio, original_time + last_committed_shift, window)
left_side_time = dst_stream.find_substream(left_audio_half, original_time + last_committed_shift, window)[1]
right_side_time = dst_stream.find_substream(right_audio_half, original_time + last_committed_shift + right_half_offset, window)[1] - right_half_offset
terminate = abs_diff(left_side_time, right_side_time) <= ALLOWED_ERROR and abs_diff(new_time, left_side_time) <= ALLOWED_ERROR
log_uncommitted(group_state, new_time - original_time, left_side_time - original_time,
right_side_time - original_time, last_committed_shift)
if not terminate and uncommitted_states and uncommitted_states[-1]["shift"] is not None \
and original_time + uncommitted_states[-1]["shift"] < dst_stream.duration_seconds:
start_offset = uncommitted_states[-1]["shift"]
diff, new_time = dst_stream.find_substream(tv_audio, original_time + start_offset, window)
left_side_time = dst_stream.find_substream(left_audio_half, original_time + start_offset, window)[1]
right_side_time = dst_stream.find_substream(right_audio_half, original_time + start_offset + right_half_offset, window)[1] - right_half_offset
terminate = abs_diff(left_side_time, right_side_time) <= ALLOWED_ERROR and abs_diff(new_time, left_side_time) <= ALLOWED_ERROR
log_uncommitted(group_state, new_time - original_time, left_side_time - original_time,
right_side_time - original_time, start_offset)
shift = new_time - original_time
if not terminate:
# we aren't back on track yet - add this group to uncommitted
group_state.update({"shift": shift, "diff": diff})
uncommitted_states.append(group_state)
idx += 1
if rewind_thresh == len(uncommitted_states) and window < max_window:
logging.warn("Detected possibly broken segment starting at {0}, increasing the window from {1} to {2}"
.format(format_time(uncommitted_states[0]["start_time"]), window, max_window))
window = max_window
idx = len(committed_states)
del uncommitted_states[:]
continue
# we're back on track - apply current shift to all broken events
if uncommitted_states:
logging.warning("Events from {0} to {1} will most likely be broken!".format(
format_time(uncommitted_states[0]["start_time"]),
format_time(uncommitted_states[-1]["end_time"])))
uncommitted_states.append(group_state)
for state in uncommitted_states:
state.update({"shift": shift, "diff": diff})
log_shift(state)
committed_states.extend(uncommitted_states)
del uncommitted_states[:]
idx += 1
for state in uncommitted_states:
log_shift(state)
for idx, (search_group, group_state) in enumerate(izip(groups_list, chain(committed_states, uncommitted_states))):
if group_state["shift"] is None:
for group in reversed(groups_list[:idx]):
link_to = next((x for x in reversed(group) if not x.linked), None)
if link_to:
for e in search_group:
e.link_event(link_to)
break
else:
for e in search_group:
e.set_shift(group_state["shift"], group_state["diff"])
def check_file_exists(path, file_title):
if path and not os.path.exists(path):
raise SushiError("{0} file doesn't exist".format(file_title))
def format_full_path(temp_dir, base_path, postfix):
if temp_dir:
return os.path.join(temp_dir, os.path.basename(base_path) + postfix)
else:
return base_path + postfix
def create_directory_if_not_exists(path):
if path and not os.path.exists(path):
os.makedirs(path)
def run(args):
ignore_chapters = args.chapters_file is not None and args.chapters_file.lower() == 'none'
write_plot = plot_enabled and args.plot_path
if write_plot:
plt.clf()
plt.ylabel('Shift, seconds')
plt.xlabel('Event index')
# first part should do all possible validation and should NOT take significant amount of time
check_file_exists(args.source, 'Source')
check_file_exists(args.destination, 'Destination')
check_file_exists(args.src_timecodes, 'Source timecodes')
check_file_exists(args.dst_timecodes, 'Source timecodes')
check_file_exists(args.script_file, 'Script')
if not ignore_chapters:
check_file_exists(args.chapters_file, 'Chapters')
if args.src_keyframes not in ('auto', 'make'):
check_file_exists(args.src_keyframes, 'Source keyframes')
if args.dst_keyframes not in ('auto', 'make'):
check_file_exists(args.dst_keyframes, 'Destination keyframes')
if (args.src_timecodes and args.src_fps) or (args.dst_timecodes and args.dst_fps):
raise SushiError('Both fps and timecodes file cannot be specified at the same time')
src_demuxer = Demuxer(args.source)
dst_demuxer = Demuxer(args.destination)
if src_demuxer.is_wav and not args.script_file:
raise SushiError("Script file isn't specified")
if (args.src_keyframes and not args.dst_keyframes) or (args.dst_keyframes and not args.src_keyframes):
raise SushiError('Either none or both of src and dst keyframes should be provided')
create_directory_if_not_exists(args.temp_dir)
# selecting source audio
if src_demuxer.is_wav:
src_audio_path = args.source
else:
src_audio_path = format_full_path(args.temp_dir, args.source, '.sushi.wav')
src_demuxer.set_audio(stream_idx=args.src_audio_idx, output_path=src_audio_path, sample_rate=args.sample_rate)
# selecting destination audio
if dst_demuxer.is_wav:
dst_audio_path = args.destination
else:
dst_audio_path = format_full_path(args.temp_dir, args.destination, '.sushi.wav')
dst_demuxer.set_audio(stream_idx=args.dst_audio_idx, output_path=dst_audio_path, sample_rate=args.sample_rate)
# selecting source subtitles
if args.script_file:
src_script_path = args.script_file
else:
stype = src_demuxer.get_subs_type(args.src_script_idx)
src_script_path = format_full_path(args.temp_dir, args.source, '.sushi'+ stype)
src_demuxer.set_script(stream_idx=args.src_script_idx, output_path=src_script_path)
script_extension = get_extension(src_script_path)
if script_extension not in ('.ass', '.srt'):
raise SushiError('Unknown script type')
# selection destination subtitles
if args.output_script:
dst_script_path = args.output_script
dst_script_extension = get_extension(args.output_script)
if dst_script_extension != script_extension:
raise SushiError("Source and destination script file types don't match ({0} vs {1})"
.format(script_extension, dst_script_extension))
else:
dst_script_path = format_full_path(args.temp_dir, args.destination, '.sushi' + script_extension)
# selecting chapters
if args.grouping and not ignore_chapters:
if args.chapters_file:
if get_extension(args.chapters_file) == '.xml':
chapter_times = chapters.get_xml_start_times(args.chapters_file)
else:
chapter_times = chapters.get_ogm_start_times(args.chapters_file)
elif not src_demuxer.is_wav:
chapter_times = src_demuxer.chapters
output_path = format_full_path(args.temp_dir, src_demuxer.path, ".sushi.chapters.txt")
src_demuxer.set_chapters(output_path)
else:
chapter_times = []
else:
chapter_times = []
# selecting keyframes and timecodes
if args.src_keyframes:
def select_keyframes(file_arg, demuxer):
auto_file = format_full_path(args.temp_dir, demuxer.path, '.sushi.keyframes.txt')
if file_arg in ('auto', 'make'):
if file_arg == 'make' or not os.path.exists(auto_file):
if not demuxer.has_video:
raise SushiError("Cannot make keyframes for {0} because it doesn't have any video!"
.format(demuxer.path))
demuxer.set_keyframes(output_path=auto_file)
return auto_file
else:
return file_arg
def select_timecodes(external_file, fps_arg, demuxer):
if external_file:
return external_file
elif fps_arg:
return None
elif demuxer.has_video:
path = format_full_path(args.temp_dir, demuxer.path, '.sushi.timecodes.txt')
demuxer.set_timecodes(output_path=path)
return path
else:
raise SushiError('Fps, timecodes or video files must be provided if keyframes are used')
src_keyframes_file = select_keyframes(args.src_keyframes, src_demuxer)
dst_keyframes_file = select_keyframes(args.dst_keyframes, dst_demuxer)
src_timecodes_file = select_timecodes(args.src_timecodes, args.src_fps, src_demuxer)
dst_timecodes_file = select_timecodes(args.dst_timecodes, args.dst_fps, dst_demuxer)
# after this point nothing should fail so it's safe to start slow operations
# like running the actual demuxing
src_demuxer.demux()
dst_demuxer.demux()
try:
if args.src_keyframes:
src_timecodes = Timecodes.cfr(args.src_fps) if args.src_fps else Timecodes.from_file(src_timecodes_file)
src_keytimes = [src_timecodes.get_frame_time(f) for f in keyframes.parse_keyframes(src_keyframes_file)]
dst_timecodes = Timecodes.cfr(args.dst_fps) if args.dst_fps else Timecodes.from_file(dst_timecodes_file)
dst_keytimes = [dst_timecodes.get_frame_time(f) for f in keyframes.parse_keyframes(dst_keyframes_file)]
script = AssScript.from_file(src_script_path) if script_extension == '.ass' else SrtScript.from_file(src_script_path)
script.sort_by_time()
src_stream = WavStream(src_audio_path, sample_rate=args.sample_rate, sample_type=args.sample_type)
dst_stream = WavStream(dst_audio_path, sample_rate=args.sample_rate, sample_type=args.sample_type)
search_groups = prepare_search_groups(script.events,
source_duration=src_stream.duration_seconds,
chapter_times=chapter_times,
max_ts_duration=args.max_ts_duration,
max_ts_distance=args.max_ts_distance)
calculate_shifts(src_stream, dst_stream, search_groups,
normal_window=args.window,
max_window=args.max_window,
rewind_thresh=args.rewind_thresh if args.grouping else 0)
events = script.events
if write_plot:
plt.plot([x.shift for x in events], label='From audio')
if args.grouping:
if not ignore_chapters and chapter_times:
groups = groups_from_chapters(events, chapter_times)
for g in groups:
fix_near_borders(g)
smooth_events([x for x in g if not x.linked], args.smooth_radius)
groups = split_broken_groups(groups)
else:
fix_near_borders(events)
smooth_events([x for x in events if not x.linked], args.smooth_radius)
groups = detect_groups(events)
if write_plot:
plt.plot([x.shift for x in events], label='Borders fixed')
for g in groups:
start_shift = g[0].shift
end_shift = g[-1].shift
avg_shift = average_shifts(g)
logging.info(u'Group (start: {0}, end: {1}, lines: {2}), '
u'shifts (start: {3}, end: {4}, average: {5})'
.format(format_time(g[0].start), format_time(g[-1].end), len(g), start_shift, end_shift,
avg_shift))
if args.src_keyframes:
for e in (x for x in events if x.linked):
e.resolve_link()
for g in groups:
snap_groups_to_keyframes(g, chapter_times, args.max_ts_duration, args.max_ts_distance, src_keytimes,
dst_keytimes, src_timecodes, dst_timecodes, args.max_kf_distance, args.kf_mode)
else:
fix_near_borders(events)
if write_plot:
plt.plot([x.shift for x in events], label='Borders fixed')
if args.src_keyframes:
for e in (x for x in events if x.linked):
e.resolve_link()
snap_groups_to_keyframes(events, chapter_times, args.max_ts_duration, args.max_ts_distance, src_keytimes,
dst_keytimes, src_timecodes, dst_timecodes, args.max_kf_distance, args.kf_mode)
for event in events:
event.apply_shift()
script.save_to_file(dst_script_path)
if write_plot:
plt.plot([x.shift + (x._start_shift + x._end_shift)/2.0 for x in events], label='After correction')
plt.legend(fontsize=5, frameon=False, fancybox=False)
plt.savefig(args.plot_path, dpi=300)
finally:
if args.cleanup:
src_demuxer.cleanup()
dst_demuxer.cleanup()
def create_arg_parser():
parser = argparse.ArgumentParser(description='Sushi - Automatic Subtitle Shifter')
parser.add_argument('--window', default=10, type=int, metavar='<size>', dest='window',
help='Search window size. [%(default)s]')
parser.add_argument('--max-window', default=30, type=int, metavar='<size>', dest='max_window',
help="Maximum search size Sushi is allowed to use when trying to recover from errors. [%(default)s]")
parser.add_argument('--rewind-thresh', default=5, type=int, metavar='<events>', dest='rewind_thresh',
help="Number of consecutive errors Sushi has to encounter to | |
[('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('only', [[Property]])]
self.Offers._rules = [('only', [[Service]])]
self.Accomplishes._instance_identifier = self.get_identifier()
self.Consists_Of._instance_identifier = self.get_identifier()
self.Controls_Property._instance_identifier = self.get_identifier()
self.Has_Function._instance_identifier = self.get_identifier()
self.Has_Profile._instance_identifier = self.get_identifier()
self.Has_State._instance_identifier = self.get_identifier()
self.Has_Typical_Consumption._instance_identifier = self.get_identifier()
self.Is_Used_For._instance_identifier = self.get_identifier()
self.Makes_Measurement._instance_identifier = self.get_identifier()
self.Measures_Property._instance_identifier = self.get_identifier()
self.Offers._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
self.Has_Manufacturer._instance_identifier = self.get_identifier()
self.Has_Model._instance_identifier = self.get_identifier()
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
Has_Manufacturer: DataField = DataField(
name='Has_Manufacturer',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Manufacturer Of An Entity (E.G., Device)
"""
Has_Model: DataField = DataField(
name='Has_Model',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Model Of An Entity (E.G., Device)
"""
# Relation fields
Accomplishes: RelationField = RelationField(
name='Accomplishes',
rule='min 1 Task',
inverse_of=['Is_Accomplished_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Certain Entity (E.G., A Device) And The Task It
Accomplishes
"""
Consists_Of: RelationField = RelationField(
name='Consists_Of',
rule='only Device',
semantic_manager=semantic_manager)
"""
A Relationship Indicating A Composite Entity That Consists Of Other Entities
(E.G., A Temperature/Humidity Sensor That Consists Of A Temperature Sensor
And A Humidity Sensor)
"""
Controls_Property: RelationField = RelationField(
name='Controls_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Controlled By A Certain
Device
"""
Has_Function: RelationField = RelationField(
name='Has_Function',
rule='min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of Function Of A Device
"""
Has_Profile: RelationField = RelationField(
name='Has_Profile',
rule='only Profile',
semantic_manager=semantic_manager)
"""
A Relationship Associating A Profile To A Certain Entity (E.G., A Device)
"""
Has_State: RelationField = RelationField(
name='Has_State',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Gertrude(Class1, Class2):
"""
Generated SemanticClass without description
Source:
http://www.semanticweb.org/redin/ontologies/2020/11/untitled-ontology-25 (ParsingTesterOntology)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.attributeProp._rules = [('some', [['customDataType1']])]
self.dataProp2._rules = [('value', [[]])]
self.oProp1._rules = [('min|1', [[Class1]]), ('some', [[Class2], [Class4]])]
self.objProp2._rules = [('only', [[Thing]]), ('some', [[Class1, Class2]])]
self.objProp3._rules = [('some', [[Class3]])]
self.objProp4._rules = [('some', [[Class1, Class2, Class3]])]
self.objProp5._rules = [('some', [[Class1, Class2], [Class1, Class3]]), ('value', [[Individual1]])]
self.oProp1._instance_identifier = self.get_identifier()
self.objProp2._instance_identifier = self.get_identifier()
self.objProp3._instance_identifier = self.get_identifier()
self.objProp4._instance_identifier = self.get_identifier()
self.objProp5._instance_identifier = self.get_identifier()
self.attributeProp._instance_identifier = self.get_identifier()
self.dataProp2._instance_identifier = self.get_identifier()
# Data fields
attributeProp: DataField = DataField(
name='attributeProp',
rule='some customDataType1',
semantic_manager=semantic_manager)
dataProp2: DataField = DataField(
name='dataProp2',
rule='value 2',
semantic_manager=semantic_manager)
# Relation fields
oProp1: RelationField = RelationField(
name='oProp1',
rule='min 1 Class1, some (Class2 or Class4)',
inverse_of=['objProp3'],
semantic_manager=semantic_manager)
objProp2: RelationField = RelationField(
name='objProp2',
rule='only Thing, some (Class1 and Class2)',
semantic_manager=semantic_manager)
objProp3: RelationField = RelationField(
name='objProp3',
rule='some Class3',
inverse_of=['oProp1'],
semantic_manager=semantic_manager)
objProp4: RelationField = RelationField(
name='objProp4',
rule='some (Class1 and Class2) and Class3)',
semantic_manager=semantic_manager)
objProp5: RelationField = RelationField(
name='objProp5',
rule='some (Class1 and (Class2 or Class3)), value Individual1',
semantic_manager=semantic_manager)
class Get_Command(Command):
"""
A Type Of Command
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Acts_Upon._rules = [('only', [[State]])]
self.Is_Command_Of._rules = [('min|1', [[Function]])]
self.Acts_Upon._instance_identifier = self.get_identifier()
self.Is_Command_Of._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
# Relation fields
Acts_Upon: RelationField = RelationField(
name='Acts_Upon',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A State
"""
Is_Command_Of: RelationField = RelationField(
name='Is_Command_Of',
rule='min 1 Function',
inverse_of=['Has_Command'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A Function.
"""
class Get_Current_Meter_Value_Command(Get_Command):
"""
A Type Of Get Command
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Acts_Upon._rules = [('only', [[State]])]
self.Is_Command_Of._rules = [('min|1', [[Function]])]
self.Acts_Upon._instance_identifier = self.get_identifier()
self.Is_Command_Of._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
# Relation fields
Acts_Upon: RelationField = RelationField(
name='Acts_Upon',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A State
"""
Is_Command_Of: RelationField = RelationField(
name='Is_Command_Of',
rule='min 1 Function',
inverse_of=['Has_Command'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A Function.
"""
class Get_Meter_Data_Command(Get_Command):
"""
A Type Of Get Command
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Acts_Upon._rules = [('only', [[State]])]
self.Is_Command_Of._rules = [('min|1', [[Function]])]
self.Acts_Upon._instance_identifier = self.get_identifier()
self.Is_Command_Of._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
# Relation fields
Acts_Upon: RelationField = RelationField(
name='Acts_Upon',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A State
"""
Is_Command_Of: RelationField = RelationField(
name='Is_Command_Of',
rule='min 1 Function',
inverse_of=['Has_Command'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A Function.
"""
class Get_Meter_History_Command(Get_Command):
"""
A Type Of Get Command
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Acts_Upon._rules = [('only', [[State]])]
self.Is_Command_Of._rules = [('min|1', [[Function]])]
self.Acts_Upon._instance_identifier = self.get_identifier()
self.Is_Command_Of._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
# Relation fields
Acts_Upon: RelationField = RelationField(
name='Acts_Upon',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A State
"""
Is_Command_Of: RelationField = RelationField(
name='Is_Command_Of',
rule='min 1 Function',
inverse_of=['Has_Command'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A Function.
"""
class Get_Sensing_Data_Command(Get_Command):
"""
A Type Of Get Command
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Acts_Upon._rules = [('only', [[State]])]
self.Is_Command_Of._rules = [('min|1', [[Function]])]
self.Acts_Upon._instance_identifier = self.get_identifier()
self.Is_Command_Of._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
# Relation fields
Acts_Upon: RelationField = RelationField(
name='Acts_Upon',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A State
"""
Is_Command_Of: RelationField = RelationField(
name='Is_Command_Of',
rule='min 1 Function',
inverse_of=['Has_Command'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Command And A Function.
"""
class Hvac(Function_Related):
"""
Heating, Ventilation And Air Conditioning (Hvac) Device That Provides Indoor
Environmental Comfort
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Has_Manufacturer._rules = [('max|1', [['string']])]
self.Has_Model._rules = [('max|1', [['string']])]
self.Accomplishes._rules = [('value', [[Comfort]]), ('min|1', [[Task]])]
self.Consists_Of._rules = [('only', [[Device]])]
self.Controls_Property._rules = [('only', [[Property]])]
self.Has_Function._rules = [('min|1', [[Function]])]
self.Has_Profile._rules = [('only', [[Profile]])]
self.Has_State._rules = [('only', [[State]])]
self.Has_Typical_Consumption._rules = [('only', [[Energy], [Power]])]
self.Is_Used_For._rules = [('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('only', [[Property]])]
self.Offers._rules = [('only', [[Service]])]
self.Accomplishes._instance_identifier = self.get_identifier()
self.Consists_Of._instance_identifier = self.get_identifier()
self.Controls_Property._instance_identifier = self.get_identifier()
self.Has_Function._instance_identifier = self.get_identifier()
self.Has_Profile._instance_identifier = self.get_identifier()
self.Has_State._instance_identifier = self.get_identifier()
self.Has_Typical_Consumption._instance_identifier = self.get_identifier()
self.Is_Used_For._instance_identifier = self.get_identifier()
self.Makes_Measurement._instance_identifier = self.get_identifier()
self.Measures_Property._instance_identifier = self.get_identifier()
self.Offers._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
self.Has_Manufacturer._instance_identifier = self.get_identifier()
self.Has_Model._instance_identifier = self.get_identifier()
self.Accomplishes.add(Comfort())
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
Has_Manufacturer: DataField = DataField(
name='Has_Manufacturer',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Manufacturer Of An Entity (E.G., Device)
"""
Has_Model: DataField = DataField(
name='Has_Model',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Model Of An Entity (E.G., Device)
"""
# Relation fields
Accomplishes: RelationField = RelationField(
name='Accomplishes',
rule='value Comfort, min 1 Task',
inverse_of=['Is_Accomplished_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Certain Entity (E.G., A Device) And The Task It
Accomplishes
"""
Consists_Of: RelationField = RelationField(
name='Consists_Of',
rule='only Device',
semantic_manager=semantic_manager)
"""
A Relationship Indicating A Composite Entity That Consists Of Other Entities
(E.G., A Temperature/Humidity Sensor That Consists Of A Temperature Sensor
And A Humidity Sensor)
"""
Controls_Property: RelationField = RelationField(
name='Controls_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Controlled By A Certain
Device
"""
Has_Function: RelationField = RelationField(
name='Has_Function',
rule='min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of Function Of A Device
"""
Has_Profile: RelationField = RelationField(
name='Has_Profile',
rule='only Profile',
semantic_manager=semantic_manager)
"""
A Relationship Associating A Profile To A Certain Entity (E.G., A Device)
"""
Has_State: RelationField = RelationField(
name='Has_State',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Level_Control_Function(Actuating_Function):
"""
An Actuating Function That Allows To Do Level Adjustments Of An Actuator In
A Certain Range (E.G., 0%-100%), Such As Dimming A Light Or Set The Speed Of
An Electric Motor.
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Command._rules = [('only', [[Set_Absolute_Level_Command], [Set_Relative_Level_Command], [Step_Down_Command], [Step_Up_Command]]), ('min|1', [[Command]])]
self.Has_Command._instance_identifier = self.get_identifier()
# Relation fields
Has_Command: RelationField = RelationField(
name='Has_Command',
rule='only (Set_Absolute_Level_Command or Set_Relative_Level_Command) or Step_Down_Command) or Step_Up_Command), min 1 Command',
inverse_of=['Is_Command_Of'],
semantic_manager=semantic_manager)
"""
A Relationship Between An Entity (Such As A Function) And A Command
"""
class Lighting_Device(Function_Related):
"""
A Device Used For Illumination, Irradiation, Signaling, Or Projection
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Has_Manufacturer._rules = [('max|1', [['string']])]
self.Has_Model._rules = [('max|1', [['string']])]
self.Accomplishes._rules = [('value', [[Comfort]]), ('min|1', [[Task]])]
self.Consists_Of._rules = [('only', [[Device]])]
self.Controls_Property._rules = [('only', [[Property]])]
self.Has_Function._rules = [('min|1', [[Function]])]
self.Has_Profile._rules = [('only', [[Profile]])]
self.Has_State._rules = [('only', [[State]])]
self.Has_Typical_Consumption._rules = [('only', [[Energy], [Power]])]
self.Is_Used_For._rules = [('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('only', [[Property]])]
self.Offers._rules = [('only', [[Service]])]
self.Accomplishes._instance_identifier = self.get_identifier()
self.Consists_Of._instance_identifier = self.get_identifier()
self.Controls_Property._instance_identifier = self.get_identifier()
self.Has_Function._instance_identifier = self.get_identifier()
self.Has_Profile._instance_identifier = self.get_identifier()
self.Has_State._instance_identifier = self.get_identifier()
self.Has_Typical_Consumption._instance_identifier = self.get_identifier()
self.Is_Used_For._instance_identifier = self.get_identifier()
self.Makes_Measurement._instance_identifier = self.get_identifier()
self.Measures_Property._instance_identifier = self.get_identifier()
self.Offers._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
self.Has_Manufacturer._instance_identifier = self.get_identifier()
self.Has_Model._instance_identifier = self.get_identifier()
self.Accomplishes.add(Comfort())
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., | |
<filename>geospacelab/visualization/mpl/axis_ticks.py
# Licensed under the BSD 3-Clause License
# Copyright (C) 2021 GeospaceLab (geospacelab)
# Author: <NAME>, Space Physics and Astronomy, University of Oulu
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import datetime as dt
from matplotlib.ticker import LogFormatterSciNotation
import geospacelab.toolbox.utilities.pydatetime as dttool
import os
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
class DatetimeMajorLocator(mdates.AutoDateLocator):
def __init__(self, tz=None, minticks=4, maxticks=7, interval_multiples=True):
super().__init__(tz=tz, minticks=minticks, maxticks=maxticks, interval_multiples=interval_multiples)
class DatetimeMinorLocator(mdates.AutoDateLocator):
def __init__(self, ax=None, majorlocator=None, tz=None, minticks=12, maxticks=50, interval_multiples=True):
if majorlocator is not None:
tz = majorlocator.tz
# if ax is not None:
# minticks = 2 * len(ax.xaxis.get_major_ticks())
# maxticks = 8 * len(ax.xaxis.get_major_ticks())
super().__init__(tz=tz, minticks=minticks, maxticks=maxticks, interval_multiples=interval_multiples)
class DatetimeMajorFormatter(mdates.AutoDateFormatter):
def __init__(self, locator, scaled: dict or None = None, tz=None, defaultfmt='%Y-%m-%d', *, usetex=True):
super().__init__(locator, tz=None, defaultfmt='%Y-%m-%d', usetex=True)
self.scaled[1 / HOURS_PER_DAY] = formatter_hour_per_day
self.scaled[1 / MINUTES_PER_DAY] = formatter_minute_per_day
if scaled is not None:
self.scaled.update(**scaled)
def formatter_hour_per_day(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day)
if delta.total_seconds() == 0:
fmt1 = "%b %d"
else:
fmt1 = "%H:%M"
return dtx.strftime(fmt1)
def formatter_minute_per_day(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day)
if delta.total_seconds() == 0:
fmt1 = "%b %d"
else:
fmt1 = "%H:%M"
return dtx.strftime(fmt1)
def set_timeline(dt_start, dt_stop, **kwargs):
mdlocators = {
1: 'mdates.MicrosecondLocator',
2: 'mdates.SecondLocator',
3: 'mdates.MinuteLocator',
4: 'mdates.HourLocator',
5: 'mdates.DayLocator',
6: 'mdates.MonthLocator',
7: 'mdates.YearLocator',
}
default_second = {
'range': [1, 2, 3, 5, 10, 15, 20, 30, 40, 60],
'majorscale': [10/60, 15/60, 30/60, 1, 2, 3, 4, 5, 5, 10],
'minorscale': [2/60, 2/60, 5/60, 10/60, 20/60, 30/60, 1, 1, 1, 2],
'scale': 1000
}
default_minute = {
'range': [1, 2, 3, 5, 8, 15, 20, 30, 40, 60],
'majorscale': [15/60, 20/60, 30/60, 1, 2, 3, 4, 5, 10],
'minorscale': [3/60, 5/60, 5/60, 10/60, 20/60, 30/60, 1, 1, 2],
'scale': 60
}
default_hour = {
'range': [1, 2, 3, 5, 8, 12, 18, 24],
'majorscale': [15/60, 20/60, 30/60, 1, 2, 3, 4],
'minorscale': [3/60, 5/60, 5/60, 10/60, 20/60, 30/60, 1],
'scale': 60
}
default_day = {
'range': [1, 2, 3, 5, 8, 14, 21, 30, 50, 80, 122],
'majorscale': [6/24, 8/24, 12/24, 1, 2, 3, 5, 10, 15],
'minorscale': [1/24, 2/24, 3/24, 4/24, 8/24, 12/24, 1, 2, 3],
'scale': 24
}
default_month = {
'range': [3, 4, 7, 12],
'majorscale': [1/2, 1, 2],
'minorscale': [1/6, 1/3, 1/2],
'scale': 30
}
default_year = {
'range': [1, 2, 3, 5, 10, 15, 20, 30, 50, 100, 200, 400, 800],
'majorscale': [3/12, 4/12, 6/12, 1, 2, 3, 5, 5, 10, 20, 50, 100],
'minorscale': [1/12, 1/12, 2/12, 3/12, 4/12, 6/12, 1, 1, 2, 5, 10, 20],
'scale': 12
}
if 'my_second' in kwargs.keys():
default_second.update(kwargs['my_second'])
if 'my_minute' in kwargs.keys():
default_second.update(kwargs['my_minute'])
if 'my_hour' in kwargs.keys():
default_second.update(kwargs['my_hour'])
if 'my_day' in kwargs.keys():
default_second.update(kwargs['my_day'])
if 'my_month' in kwargs.keys():
default_second.update(kwargs['my_month'])
if 'my_year' in kwargs.keys():
default_second.update(kwargs['my_year'])
default_settings = {
1: {},
2: default_second,
3: default_minute,
4: default_hour,
5: default_day,
6: default_month,
7: default_year
}
tdelta = dt_stop - dt_start
diff = tdelta.total_seconds()
for ind in range(2, 8):
range_ = default_settings[ind]['range']
if (diff >= range_[0]) and (diff < range_[-1]):
break
else:
if ind == 2:
diff = diff/60
elif ind ==3:
diff = diff/60
elif ind == 4:
diff = diff/24
elif ind == 5:
diff = dttool.get_diff_months(dt_start, dt_stop)
elif ind == 6:
diff = dt_stop.year - dt_start.year
setting = default_settings[ind]
range_ = setting['range']
for range_ind, value in enumerate(range_):
if diff < value:
break
range_ind = range_ind - 1
majorscale = setting['majorscale'][range_ind]
minorscale = setting['minorscale'][range_ind]
if majorscale < range_[0]:
majorlocatorclass = eval(mdlocators[ind - 1])
majorscale = majorscale * setting['scale']
else:
majorlocatorclass = eval(mdlocators[ind])
if minorscale < range_[0]:
minorlocatorclass = eval(mdlocators[ind - 1])
minorscale = minorscale * setting['scale']
else:
minorlocatorclass = eval(mdlocators[ind])
majorscale = int(majorscale)
minorscale = int(minorscale)
# for microseconds,
if majorlocatorclass is mdates.MicrosecondLocator:
interval = majorscale
majorlocator = majorlocatorclass(interval=majorscale)
if dt_start.minute != dt_stop.minute:
fmt = "%M:%S.%f"
else:
fmt = "%S.%f"
def formatter_microsecond(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dt.month, dtx.day, dtx.hour, dtx.minute, 0)
if delta.total_seconds() == 0:
fmt1 = "%M:%S.%f"
else:
fmt1 = "%S.%f"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_microsecond)
if minorlocatorclass is mdates.MicrosecondLocator:
interval = minorscale
minorlocator = minorlocatorclass(interval=minorscale)
# for second
if majorlocatorclass is mdates.SecondLocator:
by1 = range(0, 60, majorscale)
majorlocator = majorlocatorclass(bysecond=by1, interval=1)
if dt_start.hour != dt_stop.hour:
fmt = "%H:%M:%S"
else:
fmt = "%M:%S"
def formatter_second(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day, dtx.hour, 0, 0)
if delta.total_seconds() == 0:
fmt1 = "%H:%M:%S"
else:
fmt1 = "%M:%S"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_second)
if minorlocatorclass is mdates.SecondLocator:
by1 = range(0, 60, minorscale)
minorlocator = minorlocatorclass(bysecond=by1, interval=1)
# for minute
if majorlocatorclass is mdates.MinuteLocator:
by1 = range(0, 60, majorscale)
majorlocator = majorlocatorclass(byminute=by1, interval=1)
if dt_start.day != dt_stop.day:
fmt = "%d %H:%M"
else:
fmt = "%H:%M"
def formatter_minute(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day, 0, 0, 0)
if delta.total_seconds() == 0:
fmt1 = "%b %d"
else:
fmt1 = "%H:%M"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_minute)
if minorlocatorclass is mdates.MinuteLocator:
by1 = range(0, 60, minorscale)
minorlocator = minorlocatorclass(byminute=by1, interval=1)
# for hour
if majorlocatorclass is mdates.HourLocator:
by1 = range(0, 24, majorscale)
majorlocator = majorlocatorclass(byhour=by1, interval=1)
if dt_start.month != dt_stop.month:
fmt = "%d/%m %H"
elif dt_start.day != dt_stop.day:
fmt = "%d %H:%M"
else:
fmt = "%H:%M"
def formatter_hour(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, dtx.month, dtx.day)
if delta.total_seconds() == 0:
fmt1 = "%b %d"
else:
fmt1 = "%H:%M"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_hour)
if minorlocatorclass is mdates.HourLocator:
by1 = range(0, 24, minorscale)
minorlocator = minorlocatorclass(byhour=by1, interval=1)
# for day
if majorlocatorclass is mdates.DayLocator:
temp = int(np.floor(31.5/majorscale))
by1 = range(1, temp*majorscale, majorscale)
majorlocator = majorlocatorclass(bymonthday=by1, interval=1)
if dt_start.year != dt_stop.year:
fmt = "%Y-%m-%d"
else:
fmt = "%b %d"
def formatter_day(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, 1, 1)
if delta.total_seconds() == 0:
fmt1 = "%Y-%m-%d"
else:
fmt1 = "%m-%d"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_day)
if minorlocatorclass is mdates.DayLocator:
temp = int(np.floor(31.5 / minorscale))
by1 = range(1, temp * minorscale, minorscale)
minorlocator = minorlocatorclass(bymonthday=by1, interval=1)
# for month
if majorlocatorclass is mdates.MonthLocator:
by1 = range(1, 13, majorscale)
majorlocator = majorlocatorclass(bymonth=by1, interval=1)
fmt = "%Y-%m"
def formatter_month(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
delta = dtx - dt.datetime(dtx.year, 1, 1)
if delta.total_seconds() == 0:
fmt1 = "%Y-%m-%d"
else:
fmt1 = "%m-%d"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_month)
if minorlocatorclass is mdates.MonthLocator:
by1 = range(1, 13, minorscale)
minorlocator = minorlocatorclass(bymonth=by1, interval=1)
# for year
if majorlocatorclass is mdates.YearLocator:
majorlocator = majorlocatorclass(base=majorscale)
def formatter_year(x, pos):
dtx = mpl.dates.num2date(x)
dtx = dtx.replace(tzinfo=None)
fmt1 = "%Y"
return dtx.strftime(fmt1)
func_formatter = mpl.ticker.FuncFormatter(formatter_year)
if minorlocatorclass is mdates.YearLocator:
minorlocator = minorlocatorclass(base=minorscale)
majorformatter = mdates.DateFormatter(fmt)
majorformatter = func_formatter
return majorlocator, minorlocator, majorformatter
# def timeline_format_function(x, pos=None):
# x = mpl.dates.num2date(x)
# if pos == 0:
# # fmt = '%D %H:%M:%S.%f'
# fmt = '%H:%M'
# else:
# fmt = '%H:%M'
# label = x.strftime(fmt)
# #label = label.rstrip("0")
# #label = label.rstrip(".")
# return label
#
# def set_ticks_datetime(fig, ax, axis='x', locator=None, formatter=None, interval=None, visable='on'):
# xlim = getattr(ax, 'get_' + axis + 'lim')()
# dxlim = np.diff(xlim) * 86400.
# dsec = dxlim
# dmin = dsec / 60.
# dhour = dsec / 60.
# dday = dhour / 24.
# dmonth = dday / 30.
# dyear = dday / 365.
# if locator is None:
# pass
# locObj = getattr(mdates, locator)
# majorlocator = mdates.MinuteLocator(interval=1)
# formatter = mdates.AutoDateFormatter(dtlocator)
# formatter.scaled[1 / (24. * 60.)] = matplotlib.ticker.FuncFormatter(my_format_function)
# if iax < nax - 1:
# formatter = matplotlib.ticker.NullFormatter()
# ax.xaxis.set_major_locator(dtlocator)
# ax.xaxis.set_major_formatter(formatter)
# ax.xaxis.set_minor_locator(mdates.SecondLocator(interval=10))
# ax.xaxis.set_tick_params(labelsize='small')
# class CustomTicker(LogFormatterSciNotation):
# # https://stackoverflow.com/questions/43923966/logformatter-tickmarks-scientific-format-limits
# def __call__(self, x, pos=None):
# if x not in [0.1,1,10]:
# return LogFormatterSciNotation.__call__(self,x, pos=None)
# else:
# return "{x:g}".format(x=x)
# fig = | |
"""
Interface for handling the context of emulation: registers, memory, variables, actions, etc.
"""
from __future__ import annotations
from copy import deepcopy
import collections
import logging
from typing import List, Tuple, Optional, TYPE_CHECKING
import dragodis
from .instruction import Instruction
from .memory import Memory
from .registers import RegisterMap
from .variables import VariableMap
from .operands import Operand, OperandLite
from .functions import FunctionSignature, FunctionArgument
from .objects import File, RegKey, Service, ObjectMap
from .actions import ActionList
if TYPE_CHECKING:
from .emulator import Emulator
logger = logging.getLogger(__name__)
class JccContext:
"""
Stores information pertaining to a Jcc instruction encountered when tracing.
When a Jcc instruction is encountered, several pieces of information inherently need to be tracked since
we are blindly taking every branch to ensure we get all possible data at any given address. It turns out
we need to know the target of the Jcc instruction for the condition as emulated
(condition_target_ea). We also need to know the value of the branch we would NOT have taken (at least as
best of a guess as we can make in some cases) and where that value would have been set. In order to
calculate the value, we need to know what kind of test instruction was used, so that mnem is tracked as well.When
we trace our condition_target_ea branch, we need not modify the context. Whenever we trace the alternative branch,
we'll need to modify the context as specified.
"""
def __init__(self):
self.condition_target_ea = None # The branch actually taken
self.alt_branch_data_dst = None # The location which was tested (typically opnd 0 of the condition test)
self.alt_branch_data = None # The data stored in _alt_branc_data_dst
self.flag_opnds = {} # Dictionary containing the operands at a particular instruction which set
# specific flags. Dictionary is keyed on flag registery names.
def __deepcopy__(self, memo):
copy = JccContext()
copy.condition_target_ea = self.condition_target_ea
copy.alt_branch_data_dst = self.alt_branch_data_dst
copy.alt_branch_data = self.alt_branch_data
copy.flag_opnds = {flag: list(operands) for flag, operands in list(self.flag_opnds.items())}
return copy
def update_flag_opnds(self, flags, opnds):
"""
Set the operands which last changed the specified flags.
:param flags: list of flags which were modified utilizing the supplied opnds
:param opnds: list of operands (instance of Operand) at the instruction which modified the flags
"""
for flag in flags:
# Converting Operand classes to OperandLite classes to help speed up deepcopies.
self.flag_opnds[flag] = [OperandLite(opnd.ip, opnd.idx, opnd.text, opnd.value) for opnd in opnds]
def get_flag_opnds(self, flags):
"""
Extracts all the operands of for the list of flags and reduces the set. However, since the operands
need to remain in order, we can't use set operations. In all actuality, assuming our code is correct and
the compiler isn't doing something funky, any more than 1 flag should really just be a duplicate list.
:param flags: list of flags for which to extract operands
:return: list of operands which were utilized in the instruction that modified the requested flags
"""
# TODO: Is there a better way to do this?
operands = []
for flag in flags:
for operand in self.flag_opnds.get(flag, []):
if operand not in operands:
operands.append(operand)
return operands
def is_alt_branch(self, ip):
"""
Test our IP against the branch information to determine if we are in the branch that would have been
emulated or in the alternate branch.
"""
return self.condition_target_ea and self.condition_target_ea != ip
class ProcessorContext:
"""
Stores the context of the processor during execution.
:param emulator: Instance of Emulator to use during emulation.
:param registers: Instance of an initialized RegisterMap object used to store register values
for the given architecture.
:param instruction_pointer: Name of the register used to point to the current instruction
being currently executed or to-be executed.
:param stack_pointer: Name of the register used to hold the stack pointer.
:param return_register: Name of the register used to return results from a function.
"""
# Must be set by inherited classes.
OPCODES = {} # Map of opcode mnemonics to functions that emulate them.
# Class used to generate instructions.
_instruction_class = Instruction
# Cache for keeping track of instructions and their operand indexes.
_operand_indices = {}
def __init__(
self,
emulator: Emulator,
registers: RegisterMap,
instruction_pointer: str,
stack_pointer: str,
return_register: str
):
self.emulator = emulator
self.registers = registers
self.jcccontext = JccContext()
self.memory = Memory(self)
self.func_calls = {} # Keeps track of function calls.
self.executed_instructions = [] # Keeps track of the instructions that have been executed.
# TODO: Should memory_copies be stored in Memory object?
self.memory_copies = collections.defaultdict(list) # Keeps track of memory moves.
self.bitness = emulator.disassembler.bit_size
self.byteness = self.bitness // 8
self.byteorder = "big" if emulator.disassembler.is_big_endian else "little"
self.variables = VariableMap(self)
self.objects = ObjectMap(self)
self.actions = ActionList()
# Function start address of a function we are currently hooking.
self.hooking_call = None
self._sp = stack_pointer
self._ip = instruction_pointer
self._ret = return_register
self._sp_start = self.sp
def __deepcopy__(self, memo):
"""Implementing our own deepcopy to improve speed."""
# Create class, but avoid calling __init__()
# so we don't trigger the unnecessary initialization of Memory and JccContext
klass = self.__class__
copy = klass.__new__(klass)
memo[id(self)] = copy
copy.emulator = self.emulator # This is a reference, don't create a new instance.
copy.hooking_call = self.hooking_call
copy.registers = deepcopy(self.registers, memo)
copy.jcccontext = deepcopy(self.jcccontext, memo)
copy.memory = deepcopy(self.memory, memo)
copy.variables = deepcopy(self.variables, memo)
copy.objects = deepcopy(self.objects, memo)
copy.actions = deepcopy(self.actions, memo)
copy.func_calls = dict(self.func_calls)
copy.executed_instructions = list(self.executed_instructions)
copy.memory_copies = self.memory_copies.copy()
copy.bitness = self.bitness
copy.byteness = self.byteness
copy.byteorder = self.byteorder
copy._sp = self._sp
copy._ip = self._ip
copy._ret = self._ret
copy._sp_start = self._sp_start
return copy
@property
def ip(self) -> int:
"""Alias for retrieving instruction pointer."""
return self.registers[self._ip]
@ip.setter
def ip(self, value):
"""Alias for setting instruction pointer."""
self.registers[self._ip] = value
@property
def sp(self) -> int:
"""Alias for retrieving stack pointer."""
return self.registers[self._sp]
@sp.setter
def sp(self, value):
"""Alias for setting stack pointer."""
self.registers[self._sp] = value
@property
def sp_diff(self) -> int:
"""
The difference between the current stack pointer and the
stack pointer at the beginning of the function.
This helps with debugging since this number should match the number
shown in the IDA disassembly.
"""
return self._sp_start - self.sp
# TODO: A subroutine in ARM can technically pass in larger values, in which
# case the value spans multiple registers r0-r3
@property
def ret(self) -> int:
"""Alias for retrieving the return value."""
return self.registers[self._ret]
@ret.setter
def ret(self, value):
"""Alias for setting return value."""
logger.debug("Setting 0x%X into %s", value, self._ret)
self.registers[self._ret] = value
@property
def prev_instruction(self):
"""That last instruction that was executed or None if no instructions have been executed."""
if self.executed_instructions:
return self.executed_instructions[-1]
else:
return None
def execute(self, start=None, end=None, max_instructions=10000):
"""
"Execute" the instruction at IP and store results in the context.
The instruction pointer register will be set to the value supplied in .ip so that
it is correct.
:param start: instruction address to start execution (defaults to currently set ip)
:param end: instruction to stop execution (not including)
(defaults to only run start)
:param max_instructions: Maximum number of instructions to execute before
raising an RuntimeError
:raises RuntimeError: If maximum number of instructions get hit.
"""
if not start:
start = self.ip
# Set instruction pointer to where we are currently executing.
self.ip = start
# If end is provided, recursively run execute() until ip is end.
if end is not None:
count = max_instructions
while self.ip != end:
instruction = self.instruction
if instruction.is_terminal:
return # TODO: Should we be executing the terminal instruction?
instruction.execute()
count -= 1
if not count:
raise RuntimeError('Hit maximum number of instructions.')
return
else:
self.instruction.execute()
def get_call_history(self, func_name_or_ea) -> List[Tuple[int, List]]:
"""
Returns the call history for a specific function name.
:returns: List of tuples containing: (ea of call, list of function arguments)
"""
if isinstance(func_name_or_ea, str):
func_name = func_name_or_ea
else:
ea = func_name_or_ea
func_name = self.emulator.disassembler.get_function_signature(ea).name
return [(ea, args) for ea, (_func_name, args) in list(self.func_calls.items()) if _func_name == func_name]
def prep_for_branch(self, bb_start_ea):
"""
Modify this current context in preparation for a specific path.
"""
if self.jcccontext.is_alt_branch(bb_start_ea):
logger.debug("Modifying context for branch at 0x%08X", bb_start_ea)
# Set the destination operand relative to the | |
a "
f"Domme can decide whether you deserve to be disowned, how pathetic!!",
color=0xFF2030)
elif member_is == 2: # Domme self disowning
embed = discord.Embed(title='Nah',
description=f'{ctx.author.mention}, I am so confused WHAT ARE YOU DOING.',
color=0xF2A2C0)
elif member_is == 202: # Domme disowning Domme
embed = discord.Embed(title='Nah',
description=f"Are you out of your mind, {member.mention} is a domme, So you can\' disown her",
color=0xF2A2C0)
elif member_is == 201: # Domme disowning Free slave
embed = discord.Embed(title='Nah',
description=f"Don\'t worry, {ctx.author.mention}, you did not own {member.mention} in the first place",
color=0xF2A2C0)
elif member_is == 200: # Domme disowning Owned slave
await action.disown()
return
elif member_is > 300: # Domme disowning other dommes owned slave
embed = discord.Embed(title='Nah',
description=f"You can't disown {member.mention}, is "
f"owned by another Domme, <@{member_is}>. ||but you can block {member.mention} <:giggle:897777342791942225>||",
color=0xFF2030)
elif member_is == 101: # Slave disowning Slave
embed = discord.Embed(title='Pathetic...',
description=f"You dumbass slave. You think you can disown when you are a slave, {ctx.author.mention}!"
f"how Pathetic, Ahahahaha I need to tell this joke to Shaman, he will love it. he is also a pathetic bitch.",
color=0xF2A2C0)
elif member_is == 102: # slave disowning Domme
embed = discord.Embed(title='Nah',
description=f"You shall not try such thing!, {ctx.author.mention} you are a slave,"
f" you are not as powerful as a domme and you will never be! How could you even consider trying something so foolish!!"
f" {member.mention} I think someone needs to learn a lesson!!!, brainless slave",
color=0xFF2030)
elif member_is == 222 or member_is == 111: # when mentioned member does't have slave or domme role
embed = discord.Embed(
description=f"{member.mention} should have any of the following roles \n{self.list_roles(database.get_config('domme', member.guild.id))}\n{self.list_roles(database.get_config('slave', member.guild.id))}",
color=0xF2A2C0)
elif member_is == 0: # when the author doesn't have domme or slave role.
embed = discord.Embed(
description=f"{ctx.author.mention}, you should have any of the following roles \n{self.list_roles(database.get_config('domme', member.guild.id))}\n{self.list_roles(database.get_config('slave', member.guild.id))}",
color=0xF2A2C0)
elif member_is == -1: # when member is bot banned
embed = discord.Embed(title='Bot ban',
description=f"{member.mention} is banned from using {self.bot.user.mention}",
color=0xF2A2C0)
elif member_is < -1: # when author is bot banned
embed = discord.Embed(title='Bot ban',
description=f"{ctx.author.mention} you are banned from using {self.bot.user.mention} till <t:{member_is * -1}:F>",
color=0xF2A2C0)
await action.react('n')
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def gag(self, ctx, member: discord.Member):
if ctx.author.bot: # when author is a bot.
return
if not database.is_config(ctx.guild.id): # if bot is not configred in the server
embed = discord.Embed(title='I am not ready yet.',
description=f"Ask the Admins to run the command **`t.setup`** and try again",
color=0xF2A2C0)
await ctx.send(embed=embed)
return
if member.id == self.bot.user.id: # temptress ID, trying to kitty gag temptress.
embed = discord.Embed(description=f"Pff.. are you dumb like Shaman, Ahahaha!!",
color=0xF2A2C0)
await ctx.send(embed=embed)
elif member.bot: # when mentioning a random bot
embed = discord.Embed(description=f"{member.mention} is a bot not your slut!",
color=0xF2A2C0)
await ctx.send(embed=embed)
else:
action = Action(self.bot, ctx, member)
member_is = who_is(ctx.author, member)
if member_is == 1: # slave self gag
embed = discord.Embed(title='Pathetic...',
description=f"This Pathetic slave is trying to gag himself! {ctx.author.mention} only a "
f"Domme can gag you.",
color=0xF2A2C0)
elif member_is == 2 or member_is == 202: # Domme gag on self or Domme on Domme
embed = discord.Embed(title='Nah',
description=f"I am sorry {ctx.author.mention}, but I can't do such a thing. It's unbearable to see "
f"a Domme being gagged.",
color=0xF2A2C0)
elif member_is == 201: # Domme gag in Free slave
if database.get_money(ctx.author.id, ctx.guild.id)[3] <= 0:
embed = discord.Embed(title='Nah',
description=f"{ctx.author.mention}, you don't have magic gem, you need magic gem <a:gems:920237002484494366> "
f"to gag/ungag because {member.mention} is a free slave!",
color=0xF2A2C0)
else:
embed = discord.Embed(title="What should I do?", color=0xF2A2C0)
gag = database.get_slave_from_DB(member.id, ctx.guild.id)[0][2]
m = await ctx.reply(embed=embed, components=[[Button(style=ButtonStyle.blue, label='Kitty Gag', emoji='🐱', disabled=(gag == 'kitty')),
Button(style=ButtonStyle.blue, label='Puppy Gag', emoji='🐶', disabled=(gag == 'puppy')),
Button(style=ButtonStyle.blue, label='Cow Gag', emoji='🐮', disabled=(gag == 'cow')),
Button(style=ButtonStyle.blue, label='Pig Gag', emoji='🐷', disabled=(gag == 'pig')),
Button(style=ButtonStyle.red, label='Ungag', disabled=(gag == 'off'))],])
try:
def check(res):
return ctx.author == res.user and res.channel == ctx.channel
response = await self.bot.wait_for('button_click', timeout=30, check=check)
await response.respond(type=6)
if response.component.label == 'Kitty Gag':
await action.gag('kitty', m, temp=True)
elif response.component.label == 'Puppy Gag':
await action.gag('puppy', m, temp=True)
elif response.component.label == 'Cow Gag':
await action.gag('cow', m, temp=True)
elif response.component.label == 'Pig Gag':
await action.gag('pig', m, temp=True)
else:
await action.ungag(m)
database.remove_money(ctx.author.id, ctx.guild.id, 0, 10)
except asyncio.TimeoutError:
embed = discord.Embed(description=f"{ctx.author.mention} you got only 30 secs to make a choice, I can't wait for long.", color=0xF2A2C0)
await m.edit(embed=embed, components=[])
return
elif member_is == 200: # Domme kitty gag on Owned slave
embed = discord.Embed(title="What should I do?", color=0xF2A2C0)
gag = database.get_slave_from_DB(member.id, ctx.guild.id)[0][2]
m = await ctx.reply(embed=embed, components=[[Button(style=ButtonStyle.blue, label='Kitty Gag', emoji='🐱', disabled=(gag == 'kitty')),
Button(style=ButtonStyle.blue, label='Puppy Gag', emoji='🐶', disabled=(gag == 'puppy')),
Button(style=ButtonStyle.blue, label='Cow Gag', emoji='🐮', disabled=(gag == 'cow')),
Button(style=ButtonStyle.blue, label='Pig Gag', emoji='🐷', disabled=(gag == 'pig')),
Button(style=ButtonStyle.red, label='Ungag', disabled=(gag == 'off'))],])
try:
def check(res):
return ctx.author == res.user and res.channel == ctx.channel
response = await self.bot.wait_for('button_click', timeout=30, check=check)
await response.respond(type=6)
if response.component.label == 'Kitty Gag':
await action.gag('kitty', m)
elif response.component.label == 'Puppy Gag':
await action.gag('puppy', m)
elif response.component.label == 'Cow Gag':
await action.gag('cow', m)
elif response.component.label == 'Pig Gag':
await action.gag('pig', m)
else:
await action.ungag(m)
except asyncio.TimeoutError:
embed = discord.Embed(description=f"{ctx.author.mention} you got only 30 secs to make a choice, I can't wait for long.", color=0xF2A2C0)
await m.edit(embed=embed, components=[])
return
elif member_is > 300: # Domme gag on other domme's owned slave
embed = discord.Embed(title='Nah',
description=f"I am sorry {member.mention} is owned by <@{member_is}>, it's her property.",
color=0xFF2030)
elif member_is == 101: # Slave kitty gag on Slave
embed = discord.Embed(title='Pathetic...',
description=f"You foolish slave. You think you can gag when you are a slave, {ctx.author.mention}! "
f"how Pathetic!!!\n I need to tell this joke to Deity, she will love it.",
color=0xF2A2C0)
elif member_is == 102: # slave kitty gag on Domme
embed = discord.Embed(title=f'You shall not try such thing!',
description=f'{ctx.author.mention}, you are a slave, you are not as powerful as a domme and you '
f'will never be! How could you even consider trying something s'
f'o foolish!! {member.mention} I think someone needs to learn a lesson!!!, brainless slave',
color=0xFF2030)
elif member_is == 222 or member_is == 111: # when mentioned member doesn't have slave or domme role
embed = discord.Embed(
description=f"{member.mention} should have any of the following roles \n{self.list_roles(database.get_config('domme', member.guild.id))}\n{self.list_roles(database.get_config('slave', member.guild.id))}",
color=0xF2A2C0)
elif member_is == 0: # when the author doesn't have domme or slave role.
embed = discord.Embed(
description=f"{ctx.author.mention}, you should have any of the following roles \n{self.list_roles(database.get_config('domme', member.guild.id))}\n{self.list_roles(database.get_config('slave', member.guild.id))}",
color=0xF2A2C0)
elif member_is == -1: # when member is bot banned
embed = discord.Embed(title='Bot ban',
description=f"{member.mention} is banned from using {self.bot.user.mention}",
color=0xF2A2C0)
elif member_is < -1: # when author is bot banned
embed = discord.Embed(title='Bot ban',
description=f"{ctx.author.mention} you are banned from using {self.bot.user.mention} till <t:{member_is * -1}:F>",
color=0xF2A2C0)
await action.react('n')
await ctx.send(embed=embed)
@commands.command(aliases=['word', 'addbadword', 'words', 'badwords', 'addbadwords'])
@commands.guild_only()
async def badword(self, ctx, member: discord.Member, *, words):
if ctx.author.bot: # when author is a bot
return
if not database.is_config(ctx.guild.id): # if bot is not configred in the server
embed = discord.Embed(title='I am not ready yet.',
description=f"Ask the Admins to run the command **`t.setup`** and try again",
color=0xF2A2C0)
await ctx.send(embed=embed)
return
elif member.bot: # when mentioned user is a bot
embed = discord.Embed(description=f"{member.mention} is a bot not a Pathetic Slut!",
color=0xF2A2C0)
await ctx.send(embed=embed)
else:
action = Action(self.bot, ctx, member)
member_is = who_is(ctx.author, member)
if member_is == 1: # slave self adding badword
embed = discord.Embed(title='Pathetic...',
description=f"Pathetic!!, This simpleton slave is trying to be add badword himself! {ctx.author.mention} only a "
f"Domme can do it.",
color=0xF2A2C0)
elif member_is == 2 or member_is == 202: # Domme adding badword on self or Domme on Domme
embed = discord.Embed(title='Nah',
description=f"I am sorry {ctx.author.mention}, but I can't do such a thing because you are a beautiful domme.",
color=0xF2A2C0)
elif member_is == 201: # Domme addword on Free slave
gem = database.get_money(ctx.author.id, ctx.guild.id)[3]
if gem > 0:
words = words.split(',')
await action.add_badword(words)
database.remove_money(ctx.author.id, ctx.guild.id, 0, 10)
return
else:
embed = discord.Embed(title='No Gems',
description=f"{ctx.author.mention}, you don't have gems to add | |
<reponame>tenkeyaikoukaint/cggpyg<filename>syslcdk.py
import pygame
class CGGPYG:
def __init__(self):
pygame.init()
self.datainit()
self.cvs=pygame.display.set_mode((1024,160))
self.myclock=pygame.time.Clock()
self.curx=0
self.cury=0
def putc(self,str,chrctx,cx,cy):
chr=self.chrname(str)
for i in range(0,chrctx[2]):
for j in range(0,chrctx[1]):
if chr[i*chrctx[1]+j]==1:
if chrctx[0]==0:
rect=((cx+j)*4,(cy+i)*4,4,4)
pygame.draw.rect(self.cvs,self.color,rect)
if chrctx[0]==1:
rect=((cx+j)*4,cy*8+i*6,3,5)
pygame.draw.rect(self.cvs,self.color,rect)
if chrctx[0]==2:
rect=((cx+j)*2,(cy+i)*4,2,4)
pygame.draw.rect(self.cvs,self.color,rect)
if chrctx[0]==3:
rect=((cx+j)*2,(cy*2+i)*2,2,2)
pygame.draw.rect(self.cvs,self.color,rect)
def putg(self,chr,chrctx,cx,cy):
for i in range(0,chrctx[2]):
for j in range(0,chrctx[1]):
rect=(cx+j,cy+i,2,2)
self.setcolor(chr[i*chrctx[1]+j])
pygame.draw.rect(self.cvs,self.color,rect)
def line(self,x1,y1,x2,y2):
pygame.draw.line(self.cvs,self.color,(x1,y1),(x2,y2))
def cls(self):
rect=(0,0,1280,480);
self.setcolor(0);
pygame.draw.rect(self.cvs,self.color,rect)
self.setcolor(1)
for i in range(0,4):
for j in range(0,33):
self.puth("back",j,i)
def printc(self,str,x,y):
for i in range(0,len(str)):
c=str[i]
self.puth(c,x+i,y)
def printk(self,str,x,y):
"""english:ea, ex, etc kana: xa, xi, ka, sa, etc"""
for i in range(0,len(str),2):
c=str[i:i+2]
if c[0]=="e":
self.puth(c[1],(x+i)/2,y)
else:
self.puth(c,(x+i)/2,y)
def putk(self,str,x,y):
if str=="a":
chr=self.chka
self.puth(chr,x,y)
def chrname(self,str):
if str=="a":
return self.chra
if str=="b":
return self.chrb
if str=="c":
return self.chrc
if str=="d":
return self.chrd
if str=="e":
return self.chre
if str=="f":
return self.chrf
if str=="g":
return self.chrg
if str=="h":
return self.chrh
if str=="i":
return self.chri
if str=="j":
return self.chrj
if str=="k":
return self.chrk
if str=="l":
return self.chrl
if str=="m":
return self.chrm
if str=="n":
return self.chrn
if str=="o":
return self.chro
if str=="p":
return self.chrp
if str=="q":
return self.chrq
if str=="a":
return self.chra
if str=="r":
return self.chrr
if str=="s":
return self.chrs
if str=="t":
return self.chrt
if str=="u":
return self.chru
if str=="v":
return self.chrv
if str=="w":
return self.chrw
if str=="x":
return self.chrx
if str=="y":
return self.chry
if str=="z":
return self.chrz
if str=="0":
return self.chrnum0
if str=="1":
return self.chrnum1
if str=="2":
return self.chrnum2
if str=="3":
return self.chrnum3
if str=="4":
return self.chrnum4
if str=="5":
return self.chrnum5
if str=="6":
return self.chrnum6
if str=="7":
return self.chrnum7
if str=="8":
return self.chrnum8
if str=="9":
return self.chrnum9
if str==" " or str==" ":
return self.chr0
if str=="circle":
return self.chr1
if str=="heart":
return self.chr2
if str=="fill":
return self.chr3
if str=="fillsw":
return self.chr4
if str=="fillse":
return self.chr5
if str=="wave":
return self.chr6
if str=="spade":
return self.chr7
if str=="sharp":
return self.chr8
if str=="star":
return self.chr9
if str=="flag":
return self.chr10
if str=="slash" or str=="/":
return self.chr11
if str=="backslash":
return self.chr12
if str=="fillnw":
return self.chr13
if str=="fillne":
return self.chr14
if str=="block":
return self.chr15
if str=="brick":
return self.chr16
if str=="equal":
return self.chr17
if str=="point":
return self.chr18
if str=="cornernw":
return self.chr19
if str=="cornerne":
return self.chr20
if str=="cornersw":
return self.chr21
if str=="cornerse":
return self.chr22
if str=="upbar":
return self.chr23
if str=="downbar" or str=="_":
return self.chr24
if str=="rightbar":
return self.chr25
if str=="leftbar":
return self.chr26
if str=="clover":
return self.chr27
if str=="larget":
return self.chr28
if str=="downarrow":
return self.chr29
if str==":":
return self.chr30
if str=="diamond":
return self.chr31
if str=="circlefill":
return self.chr32
if str=="-" or str=="hh":
return self.chr33
if str==".":
return self.chr34
if str=="leftbar2":
return self.chr35
if str=="ship":
return self.chr36
if str=="xa":
return self.chka
if str=="xi":
return self.chki
if str=="xu":
return self.chku
if str=="xe":
return self.chke
if str=="xo":
return self.chko
if str=="ka":
return self.chkka
if str=="ki":
return self.chkki
if str=="ku":
return self.chkku
if str=="ke":
return self.chkke
if str=="ko":
return self.chkko
if str=="sa":
return self.chksa
if str=="si":
return self.chksi
if str=="su":
return self.chksu
if str=="se":
return self.chkse
if str=="so":
return self.chkso
if str=="ta":
return self.chkta
if str=="ti":
return self.chkti
if str=="tu":
return self.chktu
if str=="te":
return self.chkte
if str=="to":
return self.chkto
if str=="na":
return self.chkna
if str=="ni":
return self.chkni
if str=="nu":
return self.chknu
if str=="ne":
return self.chkne
if str=="no":
return self.chkno
if str=="ha":
return self.chkha
if str=="hi":
return self.chkhi
if str=="hu":
return self.chkhu
if str=="he":
return self.chkhe
if str=="ho":
return self.chkho
if str=="ma":
return self.chkma
if str=="mi":
return self.chkmi
if str=="mu":
return self.chkmu
if str=="me":
return self.chkme
if str=="mo":
return self.chkmo
if str=="ya":
return self.chkya
if str=="yu":
return self.chkyu
if str=="yo":
return self.chkyo
if str=="ra":
return self.chkra
if str=="ri":
return self.chkri
if str=="ru":
return self.chkru
if str=="re":
return self.chkre
if str=="ro":
return self.chkro
if str=="wa":
return self.chkwa
if str=="wo":
return self.chkwo
if str=="nn":
return self.chknn
if str=="tt":
return self.chktt
if str=="dt":
return self.chkdt
if str=="pt":
return self.chkpt
if str=="chr1":
return self.grchr
if str=="back":
return self.lcdbk
return self.chr0
def setcolor(self,cc):
if cc==0:
self.color=(170,170,170)
if cc==1:
self.color=(140,140,140)
if cc==2:
self.color=(0,0,0)
if cc==3:
self.color=(0,0,0)
if cc==4:
self.color=(0,0,0)
if cc==5:
self.color=(0,0,0)
if cc==6:
self.color=(0,0,0)
if cc==7:
self.color=(0,0,0)
def grput(self,str,x,y):
ctx=[0,16,16]
self.putg(str,ctx,x,y)
def put(self,str,x,y):
ctx=[0,8,5]
self.putc(str,ctx,x*8,y*5)
def puth(self,str,x,y):
if str=="ne":
ctx=[3,8,10]
else:
ctx=[1,8,5]
self.putc(str,ctx,x*8,y*5)
def putl(self,str,x,y):
ctx=[2,8,5]
self.putc(str,ctx,x*8,y*5)
def datainit(self):
self.lcdbk=[0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0]
self.chra=[0,0,0,1,1,0,0,0,
0,0,1,0,0,1,0,0,
0,1,1,1,1,1,1,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0]
self.chrb=[0,1,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,1,1,1,1,0,0]
self.chrc=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,0,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrd=[0,1,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,1,1,1,1,1,0,0]
self.chre=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,0,0,
0,1,1,1,1,0,0,0,
0,1,0,0,0,0,0,0,
0,0,1,1,1,1,0,0]
self.chrf=[0,1,1,1,1,1,0,0,
0,1,0,0,0,0,0,0,
0,1,1,1,1,0,0,0,
0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0]
self.chrg=[0,0,1,1,1,1,1,0,
0,1,0,0,0,0,0,0,
0,1,0,0,1,1,0,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrh=[0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,1,1,1,1,1,1,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0]
self.chri=[0,0,0,1,1,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,1,1,0,0]
self.chrj=[0,0,0,1,1,1,1,0,
0,0,0,0,0,1,0,0,
0,1,0,0,0,1,0,0,
0,1,0,0,0,1,0,0,
0,0,1,1,1,0,0,0]
self.chrk=[0,1,0,0,0,1,0,0,
0,1,0,0,1,0,0,0,
0,1,1,1,0,0,0,0,
0,1,0,0,1,0,0,0,
0,1,0,0,0,1,0,0]
self.chrl=[0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,
0,1,1,1,1,1,1,0]
self.chrm=[0,1,0,0,0,0,0,1,
0,1,1,0,0,0,1,1,
0,1,0,1,0,1,0,1,
0,1,0,0,1,0,0,1,
0,1,0,0,0,0,0,1]
self.chrn=[0,1,1,0,0,0,0,1,
0,1,0,1,0,0,0,1,
0,1,0,0,1,0,0,1,
0,1,0,0,0,1,0,1,
0,1,0,0,0,0,1,1]
self.chro=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrp=[0,1,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,1,1,1,1,0,0,
0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0]
self.chrq=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,0,0,1,0,1,0,
0,1,0,0,0,1,0,0,
0,0,1,1,1,0,1,0]
self.chrr=[0,1,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,1,1,1,1,0,0,
0,1,0,0,0,1,0,0,
0,1,0,0,0,0,1,0]
self.chrs=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,0,0,
0,0,1,1,1,1,0,0,
0,0,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrt=[0,0,1,1,1,1,1,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0]
self.chru=[0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrv=[0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,0,1,0,0,1,0,0,
0,0,0,1,1,0,0,0]
self.chrw=[0,1,0,0,0,0,0,1,
0,1,0,0,0,0,0,1,
0,1,0,0,1,0,0,1,
0,1,0,1,0,1,0,1,
0,0,1,0,0,0,1,0]
self.chrx=[0,0,1,0,0,0,1,0,
0,0,0,1,0,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,1,0,0,
0,0,1,0,0,0,1,0]
self.chry=[0,0,1,0,0,0,1,0,
0,0,0,1,0,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0]
self.chrz=[0,1,1,1,1,1,1,0,
0,0,0,0,0,1,0,0,
0,0,0,1,1,0,0,0,
0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0]
self.chrex=[0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0]
self.chrnum0=[0,0,1,1,1,1,0,0,
0,1,0,0,0,1,1,0,
0,1,0,0,1,0,1,0,
0,1,0,1,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrnum1=[0,0,0,0,1,0,0,0,
0,0,0,1,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,1,1,0,0]
self.chrnum2=[0,0,0,1,1,1,0,0,
0,0,1,0,0,0,1,0,
0,0,0,0,1,1,0,0,
0,0,0,1,0,0,0,0,
0,0,1,1,1,1,1,0]
self.chrnum3=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,0,0,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrnum4=[0,0,0,0,1,0,0,0,
0,0,0,1,1,0,0,0,
0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,0,
0,0,0,0,1,0,0,0]
self.chrnum5=[0,1,1,1,1,1,0,0,
0,1,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,
0,0,0,0,0,0,1,0,
0,1,1,1,1,1,0,0]
self.chrnum6=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrnum7=[0,0,1,1,1,1,1,0,
0,0,0,0,0,0,1,0,
0,0,0,0,0,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,0,0,0]
self.chrnum8=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chrnum9=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,1,0,
0,0,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chr0=[0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0]
self.chr1=[0,0,1,1,1,1,0,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,1,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chr2=[0,0,1,0,0,1,0,0,
0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0,
0,0,1,1,1,1,0,0,
0,0,0,1,1,0,0,0]
self.chr3=[1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1]
self.chr4=[1,0,0,0,0,0,0,0,
1,1,1,0,0,0,0,0,
1,1,1,1,1,0,0,0,
1,1,1,1,1,1,1,0,
1,1,1,1,1,1,1,1]
self.chr5=[0,0,0,0,0,0,0,1,
0,0,0,0,0,1,1,1,
0,0,0,1,1,1,1,1,
0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1]
self.chr6=[0,0,0,0,0,0,0,0,
0,0,1,1,0,0,0,0,
1,1,0,0,1,1,0,0,
0,0,0,0,0,0,1,1,
0,0,0,0,0,0,0,0]
self.chr7=[0,0,0,1,1,0,0,0,
0,0,1,1,1,1,0,0,
0,1,1,1,1,1,1,0,
0,0,0,1,1,0,0,0,
0,0,1,1,1,1,0,0]
self.chr8=[0,0,1,0,0,1,0,0,
1,1,1,1,1,1,1,1,
0,0,1,0,0,1,0,0,
1,1,1,1,1,1,1,1,
0,0,1,0,0,1,0,0]
self.chr9=[0,1,0,0,0,0,1,0,
0,0,1,0,0,1,0,0,
0,1,1,1,1,1,1,0,
0,0,1,0,0,1,0,0,
0,1,0,0,0,0,1,0]
self.chr10=[0,0,0,1,1,0,0,0,
0,0,0,1,1,1,1,0,
0,0,0,1,1,0,0,0,
0,0,0,1,1,0,0,0,
0,0,1,1,1,1,0,0]
self.chr11=[0,0,0,0,0,0,0,1,
0,0,0,0,0,1,1,0,
0,0,0,1,1,0,0,0,
0,1,1,0,0,0,0,0,
1,0,0,0,0,0,0,0]
self.chr12=[1,0,0,0,0,0,0,0,
0,1,1,0,0,0,0,0,
0,0,0,1,1,0,0,0,
0,0,0,0,0,1,1,0,
0,0,0,0,0,0,0,1]
self.chr13=[1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,
1,1,1,1,1,0,0,0,
1,1,1,0,0,0,0,0,
1,0,0,0,0,0,0,0]
self.chr14=[1,1,1,1,1,1,1,1,
0,1,1,1,1,1,1,1,
0,0,0,1,1,1,1,1,
0,0,0,0,0,1,1,1,
0,0,0,0,0,0,0,1]
self.chr15=[0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,0]
self.chr16=[1,1,0,1,1,1,1,1,
1,1,0,1,1,1,1,1,
0,0,0,0,0,0,0,0,
1,1,1,1,1,0,1,1,
1,1,1,1,1,0,1,1]
self.chr17=[0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,0]
self.chr18=[0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,
0,0,0,1,1,0,0,0,
0,0,0,0,0,0,0,0]
self.chr19=[1,1,1,1,1,1,1,1,
1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0]
self.chr20=[1,1,1,1,1,1,1,1,
0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1]
self.chr21=[1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1]
self.chr22=[0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,1]
self.chr23=[1,1,1,1,1,1,1,1,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0]
self.chr24=[0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1]
self.chr25=[0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,1]
self.chr26=[1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0]
self.chr27=[0,0,1,1,1,0,0,0,
1,0,0,1,0,0,1,0,
1,1,1,1,1,1,1,0,
0,0,0,1,0,0,0,0,
0,0,1,1,1,0,0,0]
self.chr28=[1,1,1,1,1,1,1,1,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0]
self.chr29=[0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,1,0,1,0,1,0,
0,0,0,1,1,1,0,0,
0,0,0,0,1,0,0,0]
self.chr30=[0,0,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,
0,0,0,0,0,0,0,0]
self.chr31=[0,0,0,0,1,0,0,0,
0,0,0,1,1,1,0,0,
0,0,1,1,1,1,1,0,
0,0,0,1,1,1,0,0,
0,0,0,0,1,0,0,0]
self.chr32=[0,0,1,1,1,1,0,0,
0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0,
0,1,1,1,1,1,1,0,
0,0,1,1,1,1,0,0]
self.chr33=[0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,1,1,1,1,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0]
self.chr34=[0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,
0,0,0,1,1,0,0,0]
self.chr35=[0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,
0,1,0,0,0,0,0,0]
self.chr36=[0,0,0,1,1,0,0,0,
0,0,1,0,0,1,0,0,
0,0,1,0,0,1,0,0,
0,1,1,1,1,1,1,0,
1,1,0,1,1,0,1,1]
self.chka=[0,0,1,1,1,1,1,1,
0,0,0,0,0,0,1,0,
0,0,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0]
self.chki=[0,0,0,0,0,1,0,0,
0,0,0,1,1,0,0,0,
0,0,1,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0]
self.chku=[0,0,0,1,0,0,0,0,
0,1,1,1,1,1,1,0,
0,1,0,0,0,0,1,0,
0,0,0,0,0,0,1,0,
0,0,0,0,1,1,0,0]
self.chke=[0,0,1,1,1,1,1,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,1,1,1,1,1,0]
self.chko=[0,0,0,0,1,0,0,0,
0,0,1,1,1,1,1,0,
0,0,0,1,1,0,0,0,
0,0,1,0,1,0,0,0,
0,1,0,0,1,0,0,0]
self.chkka=[0,0,0,1,0,0,0,0,
0,1,1,1,1,1,1,0,
0,0,0,1,0,0,1,0,
0,0,0,1,0,0,1,0,
0,0,1,0,0,1,0,0]
self.chkki=[0,0,0,0,1,0,0,0,
0,0,1,1,1,1,1,0,
0,0,0,0,1,0,0,0,
0,0,1,1,1,1,1,0,
0,0,0,0,1,0,0,0]
self.chkku=[0,0,1,1,1,1,0,0,
0,0,1,0,0,1,0,0,
0,1,0,0,0,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,0,0,0]
self.chkke=[0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,
1,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,0,0,0]
self.chkko=[0,0,1,1,1,1,0,0,
0,0,0,0,0,1,0,0,
0,0,0,0,0,1,0,0,
0,0,0,0,0,1,0,0,
0,0,1,1,1,1,0,0]
self.chksa=[0,0,1,0,0,1,0,0,
0,1,1,1,1,1,1,0,
0,0,1,0,0,1,0,0,
0,0,0,0,0,1,0,0,
0,0,0,1,1,0,0,0]
self.chksi=[0,1,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,1,1,0,0,0,1,0,
0,0,0,0,0,1,0,0,
0,1,1,1,1,0,0,0]
self.chksu=[0,0,1,1,1,1,1,0,
0,0,0,0,0,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,1,0,0,
0,0,1,0,0,0,1,0]
self.chkse=[0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,
0,0,1,0,0,1,0,0,
0,0,1,0,0,0,0,0,
0,0,0,1,1,0,0,0]
self.chkso=[0,0,1,0,0,0,1,0,
0,0,0,1,0,1,0,0,
0,0,0,0,0,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,0,0,0]
self.chkta=[0,0,0,1,1,1,1,0,
0,0,1,0,0,0,1,0,
0,1,0,1,1,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,0,0,0]
self.chkti=[0,0,0,0,0,0,1,0,
0,0,1,1,1,1,0,0,
0,0,0,0,1,0,0,0,
0,0,1,1,1,1,1,0,
0,0,0,0,1,0,0,0]
self.chktu=[0,0,1,0,1,0,1,0,
0,0,1,0,1,0,1,0,
0,0,0,0,0,0,1,0,
0,0,0,0,0,0,1,0,
0,0,1,1,1,1,0,0]
self.chkte=[0,0,1,1,1,1,0,0,
0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,0,0,0]
self.chkto=[0,0,0,1,0,0,0,0,
0,0,0,1,0,0,0,0,
0,0,0,1,1,0,0,0,
0,0,0,1,0,1,0,0,
0,0,0,1,0,0,0,0]
self.chkna=[0,0,0,0,1,0,0,0,
0,0,1,1,1,1,1,0,
0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,0,0,0]
self.chkni=[0,0,1,1,1,1,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0]
self.chknu=[0,0,1,1,1,1,1,0,
0,0,0,0,0,1,0,0,
0,0,1,1,1,0,0,0,
0,0,0,1,1,0,0,0,
0,0,1,0,0,1,1,0]
self.chkne=[0,0,0,0,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,1,1,1,1,1,0,
0,0,0,0,0,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,1,1,0,0,
0,0,1,0,1,0,1,0,
0,1,0,0,1,0,0,1,
0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0]
self.chkno=[0,0,0,0,0,1,0,0,
0,0,0,0,0,1,0,0,
0,0,0,0,0,1,0,0,
0,0,0,0,1,0,0,0,
0,0,0,1,0,0,0,0]
self.chkha=[0,0,0,0,0,0,0,0,
0,0,1,0,0,1,0,0,
0,1,0,0,0,0,1,0,
1,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0]
self.chkhi=[0,0,1,0,0,0,0,0,
0,0,1,1,1,1,0,0,
0,0,1,0,0,0,0,0,
0,0,1,0,0,0,0,0,
0,0,0,1,1,1,0,0]
self.chkhu=[0,0,1,1,1,1,1,0,
0,0,0,0,0,0,1,0,
0,0,0,0,0,0,1,0,
0,0,0,0,0,1,0,0,
0,0,0,0,1,0,0,0]
self.chkhe=[0,0,0,1,0,0,0,0,
0,0,1,0,1,0,0,0,
0,1,0,0,0,1,0,0,
0,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0]
self.chkho=[0,0,0,0,1,0,0,0,
0,0,1,1,1,1,1,0,
0,0,0,0,1,0,0,0,
0,0,1,0,1,0,1,0,
0,1,0,0,1,0,0,1]
self.chkma=[0,1,1,1,1,1,1,0,
0,0,0,0,0,1,0,0,
0,0,0,1,1,0,0,0,
0,0,0,0,1,0,0,0,
0,0,0,0,0,1,0,0]
self.chkmi=[0,0,1,1,1,1,0,0,
0,0,0,0,0,0,0,0,
0,0,1,1,1,1,0,0,
0,0,0,0,0,0,0,0,
0,0,1,1,1,1,0,0]
self.chkmu=[0,0,0,1,0,0,0,0,
0,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,
0,1,0,0,0,1,0,0,
0,1,1,1,1,1,1,0]
self.chkme=[0,0,0,0,0,1,0,0,
0,0,1,0,1,0,0,0,
0,0,0,1,0,0,0,0,
0,0,1,0,1,0,0,0,
0,1,0,0,0,0,0,0]
self.chkmo=[0,0,1,1,1,0,0,0,
0,0,0,1,0,0,0,0,
0,1,1,1,1,1,0,0,
0,0,0,1,0,0,0,0,
0,0,0,0,1,1,0,0]
self.chkya=[0,0,0,1,0,0,0,0,
0,1,1,1,1,1,1,0,
0,0,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,
0,0,0,1,0,0,0,0]
self.chkyu=[0,0,1,1,1,1,0,0,
0,0,0,0,0,1,0,0,
0,0,0,0,0,1,0,0,
0,0,0,0,0,1,0,0,
| |
"gqNzaWfEQJ7q2rOT8Sb/wB0F87ld+1zMprxVlYqbUbe+oz0WM63FctIi+K9eYFSqT"
"26XBZ4Rr3+VTJpBE+JLKs8nctl9hgijdHhuiKRhcmN2xCAJ+9J2LAj4bFrmv23Xp6"
"kB3mZ111Dgfoxcdphkfbbh/aNmZWXNCOiiZnbOAATsD6JnaMQgSGO1GKSzyE7IEPI"
"tTxCByw9x8FmnrCDexi9/cOUJOiKibHbOAATv96NzbmTEIAn70nYsCPhsWua/bden"
"qQHeZnXXUOB+jFx2mGR9tuH9pHR5cGWlYXhmZXKkeGFpZAE="
)
self.assertEqual(golden, encoding.msgpack_encode(signed_txn))
def test_serialize_asset_revoke(self):
mn = (
"awful drop leaf tennis indoor begin mandate discover uncle seven "
"only coil atom any hospital uncover make any climb actor armed me"
"asure need above hundred"
)
sk = mnemonic.to_private_key(mn)
pk = account.address_from_private_key(sk)
fee = 10
first_round = 322575
last_round = 323575
gh = "SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI="
index = 1
amount = 1
to = "BH<KEY>"
sp = transaction.SuggestedParams(fee, first_round, last_round, gh)
txn = transaction.AssetTransferTxn(
pk, sp, to, amount, index, revocation_target=to
)
signed_txn = txn.sign(sk)
golden = (
"gqNzaWfEQHsgfEAmEHUxLLLR9s+Y/yq5WeoGo/jAArCbany+7ZYwExMySzAhmV7M7"
"S8+LBtJalB4EhzEUMKmt3kNKk6+vAWjdHhuiqRhYW10AaRhcmN2xCAJ+9J2LAj4bF"
"rmv23Xp6kB3mZ111Dgfoxcdphkfbbh/aRhc25kxCAJ+9J2LAj4bFrmv23Xp6kB3mZ"
"111Dgfoxcdphkfbbh/aNmZWXNCqqiZnbOAATsD6JnaMQgSGO1GKSzyE7IEPItTxCB"
"yw9x8FmnrCDexi9/cOUJOiKibHbOAATv96NzbmTEIAn70nYsCPhsWua/bdenqQHeZ"
"nXXUOB+jFx2mGR9tuH9pHR5cGWlYXhmZXKkeGFpZAE="
)
self.assertEqual(golden, encoding.msgpack_encode(signed_txn))
def test_pay_float_amt(self):
address = "7ZUECA7HFLZTXENRV24SHLU4AVPUTMTTDUFUBNBD64C73F3UHRTHAIOF6Q"
gh = "<KEY>
sp = transaction.SuggestedParams(3, 1, 100, gh)
f = lambda: transaction.PaymentTxn(
address, sp, address, 10.0, note=bytes([1, 32, 200])
)
self.assertRaises(error.WrongAmountType, f)
def test_pay_negative_amt(self):
address = "7ZUECA7HFLZTXENRV24SHLU4AVPUTMTTDUFUBNBD64C73F3UHRTHAIOF6Q"
gh = "<KEY>
sp = transaction.SuggestedParams(3, 1, 100, gh)
f = lambda: transaction.PaymentTxn(
address, sp, address, -5, note=bytes([1, 32, 200])
)
self.assertRaises(error.WrongAmountType, f)
def test_asset_transfer_float_amt(self):
address = "7ZUECA7HFLZTXENRV24SHLU4AVPUTMTTDUFUBNBD64C73F3UHRTHAIOF6Q"
fee = 10
first_round = 322575
last_round = 323576
gh = "SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI="
index = 1
amount = 1.0
to = "<KEY>"
close = "BH55E5RMBD4GYWXGX5W5PJ5JAHPGM5OXKDQH5DC4O2MGI7NW4H6VOE4CP4"
sp = transaction.SuggestedParams(fee, first_round, last_round, gh)
f = lambda: transaction.AssetTransferTxn(
address, sp, to, amount, index, close
)
self.assertRaises(error.WrongAmountType, f)
def test_asset_transfer_negative_amt(self):
address = "7ZUECA7HFLZTXENRV24SHLU4AVPUTMTTDUFUBNBD64C73F3UHRTHAIOF6Q"
fee = 10
first_round = 322575
last_round = 323576
gh = "SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI="
index = 1
amount = -1
to = "<KEY>"
close = "BH55E5RMBD4GYWXGX5W5PJ5JAHPGM5OXKDQH5DC4O2MGI7NW4H6VOE4CP4"
sp = transaction.SuggestedParams(fee, first_round, last_round, gh)
f = lambda: transaction.AssetTransferTxn(
address, sp, to, amount, index, close
)
self.assertRaises(error.WrongAmountType, f)
def test_group_id(self):
address = "UPYAFLHSIPMJOHVXU2MPLQ46GXJKSDCEMZ6RLCQ7GWB5PRDKJUWKKXECXI"
fromAddress, toAddress = address, address
fee = 1000
amount = 2000
genesisID = "devnet-v1.0"
genesisHash = "sC3P7e2SdbqKJK0tbiCdK9tdSpbe6XeCGKdoNzmlj0E="
firstRound1 = 710399
note1 = base64.b64decode("wRKw5cJ0CMo=")
sp = transaction.SuggestedParams(
fee,
firstRound1,
firstRound1 + 1000,
genesisHash,
genesisID,
flat_fee=True,
)
tx1 = transaction.PaymentTxn(
fromAddress, sp, toAddress, amount, note=note1
)
firstRound2 = 710515
note2 = base64.b64decode("dBlHI6BdrIg=")
sp.first = firstRound2
sp.last = firstRound2 + 1000
tx2 = transaction.PaymentTxn(
fromAddress, sp, toAddress, amount, note=note2
)
# goal clerk send dumps unsigned transaction as signed with empty
# signature in order to save tx type
stx1 = transaction.SignedTransaction(tx1, None)
stx2 = transaction.SignedTransaction(tx2, None)
goldenTx1 = (
"gaN0eG6Ko2FtdM0H0KNmZWXNA+iiZnbOAArW/6NnZW6rZGV2bmV0LXYxLjCiZ2jEI"
"LAtz+3tknW6iiStLW4gnSvbXUqW3ul3ghinaDc5pY9Bomx2zgAK2uekbm90ZcQIwR"
"Kw5cJ0CMqjcmN2xCCj8AKs8kPYlx63ppj1w5410qkMRGZ9FYofNYPXxGpNLKNzbmT"
"EIKPwAqzyQ9iXHremmPXDnjXSqQxEZn0Vih81g9fEak0spHR5cGWjcGF5"
)
goldenTx2 = (
"gaN0eG6Ko2FtdM0H0KNmZWXNA+iiZnbOAArXc6NnZW6rZGV2bmV0LXYxLjCiZ2jEI"
"LAtz+3tknW6iiStLW4gnSvbXUqW3ul3ghinaDc5pY9Bomx2zgAK21ukbm90ZcQIdB"
"lHI6BdrIijcmN2xCCj8AKs8kPYlx63ppj1w5410qkMRGZ9FYofNYPXxGpNLKNzbmT"
"EIKPwAqzyQ9iXHremmPXDnjXSqQxEZn0Vih81g9fEak0spHR5cGWjcGF5"
)
self.assertEqual(goldenTx1, encoding.msgpack_encode(stx1))
self.assertEqual(goldenTx2, encoding.msgpack_encode(stx2))
# preserve original tx{1,2} objects
tx1 = copy.deepcopy(tx1)
tx2 = copy.deepcopy(tx2)
gid = transaction.calculate_group_id([tx1, tx2])
stx1.transaction.group = gid
stx2.transaction.group = gid
# goal clerk group sets Group to every transaction and concatenate
# them in output file
# simulating that behavior here
txg = base64.b64encode(
base64.b64decode(encoding.msgpack_encode(stx1))
+ base64.b64decode(encoding.msgpack_encode(stx2))
).decode()
goldenTxg = (
"gaN0eG6Lo2FtdM0H0KNmZWXNA+iiZnbOAArW/6NnZW6rZGV2bmV0LXYxLjCiZ2jEI"
"LAtz+3tknW6iiStLW4gnSvbXUqW3ul3ghinaDc5pY9Bo2dycMQgLiQ9OBup9H/bZL"
"SfQUH2S6iHUM6FQ3PLuv9FNKyt09SibHbOAAra56Rub3RlxAjBErDlwnQIyqNyY3b"
"EIKPwAqzyQ9iXHremmPXDnjXSqQxEZn0Vih81g9fEak0so3NuZMQgo/ACrPJD2Jce"
"t6aY9cOeNdKpDERmfRWKHzWD18RqTSykdHlwZaNwYXmBo3R4boujYW10zQfQo2ZlZ"
"c0D6KJmds4ACtdzo2dlbqtkZXZuZXQtdjEuMKJnaMQgsC3P7e2SdbqKJK0tbiCdK9"
"tdSpbe6XeCGKdoNzmlj0GjZ3JwxCAuJD04G6n0f9tktJ9BQfZLqIdQzoVDc8u6/0U"
"0rK3T1KJsds4ACttbpG5vdGXECHQZRyOgXayIo3JjdsQgo/ACrPJD2Jcet6aY9cOe"
"NdKpDERmfRWKHzWD18RqTSyjc25kxCCj8AKs8kPYlx63ppj1w5410qkMRGZ9FYofN"
"YPXxGpNLKR0eXBlo3BheQ=="
)
self.assertEqual(goldenTxg, txg)
# repeat test above for assign_group_id
txa1 = copy.deepcopy(tx1)
txa2 = copy.deepcopy(tx2)
txns = transaction.assign_group_id([txa1, txa2])
self.assertEqual(len(txns), 2)
stx1 = transaction.SignedTransaction(txns[0], None)
stx2 = transaction.SignedTransaction(txns[1], None)
# goal clerk group sets Group to every transaction and concatenate
# them in output file
# simulating that behavior here
txg = base64.b64encode(
base64.b64decode(encoding.msgpack_encode(stx1))
+ base64.b64decode(encoding.msgpack_encode(stx2))
).decode()
self.assertEqual(goldenTxg, txg)
# check filtering
txns = transaction.assign_group_id([tx1, tx2], address=fromAddress)
self.assertEqual(len(txns), 2)
self.assertEqual(stx1.transaction.group, txns[0].group)
txns = transaction.assign_group_id([tx1, tx2], address="NONEXISTENT")
self.assertEqual(len(txns), 0)
class TestAssetConfigConveniences(unittest.TestCase):
"""Tests that the simplified versions of Config are equivalent to Config"""
sender = "7ZUECA7HFLZTXENRV24SHLU4AVPUTMTTDUFUBNBD64C73F3UHRTHAIOF6Q"
genesis = "JgsgCaCTqIaLeVhyL6XlRu3n7Rfk2FxMeK+wRSaQ7dI="
params = transaction.SuggestedParams(0, 1, 100, genesis)
def test_asset_create(self):
create = transaction.AssetCreateTxn(
self.sender,
self.params,
1000,
"2",
False,
manager=None,
reserve=None,
freeze=None,
clawback=None,
unit_name="NEWCOIN",
asset_name="A new kind of coin",
url="https://newcoin.co/",
)
config = transaction.AssetConfigTxn(
self.sender,
self.params,
index=None,
total="1000",
decimals=2,
unit_name="NEWCOIN",
asset_name="A new kind of coin",
url="https://newcoin.co/",
strict_empty_address_check=False,
)
self.assertEqual(create.dictify(), config.dictify())
self.assertEqual(config, create)
self.assertEqual(
transaction.AssetCreateTxn.undictify(create.dictify()), config
)
def test_asset_update(self):
update = transaction.AssetUpdateTxn(
self.sender,
self.params,
6,
manager=None,
reserve=self.sender,
freeze=None,
clawback=None,
)
config = transaction.AssetConfigTxn(
self.sender,
self.params,
index="6",
reserve=self.sender,
strict_empty_address_check=False,
)
self.assertEqual(update.dictify(), config.dictify())
self.assertEqual(config, update)
self.assertEqual(
transaction.AssetUpdateTxn.undictify(update.dictify()), config
)
def test_asset_destroy(self):
destroy = transaction.AssetDestroyTxn(self.sender, self.params, 23)
config = transaction.AssetConfigTxn(
self.sender,
self.params,
index="23",
strict_empty_address_check=False,
)
self.assertEqual(destroy.dictify(), config.dictify())
self.assertEqual(config, destroy)
self.assertEqual(
transaction.AssetDestroyTxn.undictify(destroy.dictify()), config
)
class TestAssetTransferConveniences(unittest.TestCase):
sender = "7ZUECA7HFLZTXENRV24SHLU4AVPUTMTTDUFUBNBD64C73F3UHRTHAIOF6Q"
receiver = "DOMUC6VGZH7SSY5V332JR5HRLZSOJDWNPBI4OI2IIBU6A3PFLOBOXZ3KFY"
genesis = "JgsgCaCTqIaLeVhyL6XlRu3n7Rfk2FxMeK+wRSaQ7dI="
params = transaction.SuggestedParams(0, 1, 100, genesis)
def test_asset_optin(self):
optin = transaction.AssetOptInTxn(self.sender, self.params, "7")
xfer = transaction.AssetTransferTxn(
self.sender, self.params, self.sender, 0, index=7
)
self.assertEqual(optin.dictify(), xfer.dictify())
self.assertEqual(xfer, optin)
self.assertEqual(
transaction.AssetOptInTxn.undictify(optin.dictify()), xfer
)
def test_asset_closeout(self):
closeout = transaction.AssetCloseOutTxn(
self.sender, self.params, self.receiver, "7"
)
xfer = transaction.AssetTransferTxn(
self.sender,
self.params,
self.receiver,
0,
index=7,
close_assets_to=self.receiver,
)
self.assertEqual(closeout.dictify(), xfer.dictify())
self.assertEqual(xfer, closeout)
self.assertEqual(
transaction.AssetCloseOutTxn.undictify(closeout.dictify()), xfer
)
class TestApplicationTransactions(unittest.TestCase):
sender = "7ZUECA7HFLZTXENRV24SHLU4AVPUTMTTDUFUBNBD64C73F3UHRTHAIOF6Q"
genesis = "JgsgCaCTqIaLeVhyL6XlRu3n7Rfk2FxMeK+wRSaQ7dI="
lschema = transaction.StateSchema(1, 2)
gschema = transaction.StateSchema(3, 4)
def test_application_address(self):
appID = 77
expected = "PCYUFPA2ZTOYWTP43MX2MOX2OWAIAXUDNC2WFCXAGMRUZ3DYD6BWFDL5YM"
actual = logic.get_application_address(appID)
self.assertEqual(actual, expected)
appID = "seventy seven"
with self.assertRaises(AssertionError):
logic.get_application_address(appID)
def test_application_call(self):
params = transaction.SuggestedParams(0, 1, 100, self.genesis)
for oc in transaction.OnComplete:
b = transaction.ApplicationCallTxn(
self.sender, params, 10, oc, app_args=[b"hello"]
)
s = transaction.ApplicationCallTxn(
self.sender, params, "10", oc, app_args=["hello"]
)
self.assertEqual(
b, s
) # string is encoded same as corresponding bytes
transaction.ApplicationCallTxn(
self.sender, params, 10, oc, app_args=[2, 3, 0]
) # ints work
with self.assertRaises(AssertionError):
transaction.ApplicationCallTxn(
self.sender, params, 10, oc, app_args=[3.4]
) # floats don't
with self.assertRaises(OverflowError):
transaction.ApplicationCallTxn(
self.sender, params, 10, oc, app_args=[-10]
) # nor negative
transaction.ApplicationCallTxn(
self.sender,
params,
10,
oc, # maxuint64
app_args=[18446744073709551615],
)
with self.assertRaises(OverflowError):
transaction.ApplicationCallTxn(
self.sender,
params,
10,
oc, # too big
app_args=[18446744073709551616],
)
i = transaction.ApplicationCallTxn(
self.sender,
params,
10,
oc,
foreign_apps=[4, 3],
foreign_assets=(2, 1),
)
s = transaction.ApplicationCallTxn(
self.sender,
params,
"10",
oc,
foreign_apps=["4", 3],
foreign_assets=[2, "1"],
)
self.assertEqual(
i, s
) # string is encoded same as corresponding int
def test_application_create(self):
approve = b"\0"
clear = b"\1"
params = transaction.SuggestedParams(0, 1, 100, self.genesis)
for oc in transaction.OnComplete:
# We will confirm that the Create is just shorthand for
# the Call. But note that the programs come before the
# schemas and the schemas are REVERSED! That's
# unfortunate, and we should consider adding "*" to the
# argument list after on_completion, thereby forcing the
# use of named arguments.
create = transaction.ApplicationCreateTxn(
self.sender,
params,
oc,
approve,
clear,
self.lschema,
self.gschema,
)
call = transaction.ApplicationCallTxn(
self.sender,
params,
0,
oc,
self.gschema,
self.lschema,
approve,
clear,
)
# Check the dict first, it's important on it's own, and it
# also gives more a meaningful error if they're not equal.
self.assertEqual(create.dictify(), call.dictify())
self.assertEqual(create, call)
self.assertEqual(call, create)
def test_application_create_schema(self):
approve = b"\0"
clear = b"\1"
zero_schema = transaction.StateSchema(0, 0)
params = transaction.SuggestedParams(0, 1, 100, self.genesis)
for oc in transaction.OnComplete:
# verify that a schema with 0 uints and 0 bytes behaves the same as no schema
txn_zero_schema = transaction.ApplicationCreateTxn(
self.sender,
params,
oc,
approve,
clear,
zero_schema,
zero_schema,
)
txn_none_schema = transaction.ApplicationCreateTxn(
self.sender, params, oc, approve, clear, None, None
)
# Check the dict first, it's important on its own, and it
# also gives more a meaningful error if they're not equal.
self.assertEqual(
txn_zero_schema.dictify(), txn_none_schema.dictify()
)
self.assertEqual(txn_zero_schema, txn_none_schema)
self.assertEqual(txn_none_schema, txn_zero_schema)
def test_application_update(self):
empty = b""
params = transaction.SuggestedParams(0, 1, 100, self.genesis)
i = transaction.ApplicationUpdateTxn(
self.sender, params, 10, empty, empty
)
s = transaction.ApplicationUpdateTxn(
self.sender, params, "10", empty, empty
)
self.assertEqual(i, s) # int and string encoded same
call = transaction.ApplicationCallTxn(
self.sender,
params,
10,
transaction.OnComplete.UpdateApplicationOC,
None,
None,
empty,
empty,
)
self.assertEqual(i.dictify(), call.dictify())
self.assertEqual(i, call)
def test_application_delete(self):
params = transaction.SuggestedParams(0, 1, 100, self.genesis)
i = transaction.ApplicationDeleteTxn(self.sender, params, 10)
s = transaction.ApplicationDeleteTxn(self.sender, params, "10")
self.assertEqual(i, s) # int and string encoded same
call = transaction.ApplicationCallTxn(
self.sender, params, 10, transaction.OnComplete.DeleteApplicationOC
)
self.assertEqual(i.dictify(), call.dictify())
self.assertEqual(i, call)
class TestMnemonic(unittest.TestCase):
zero_bytes = bytes(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
)
def test_mnemonic_private_key(self):
priv_key, _ = account.generate_account()
mn = mnemonic.from_private_key(priv_key)
self.assertEqual(len(mn.split(" ")), constants.mnemonic_len)
self.assertEqual(priv_key, mnemonic.to_private_key(mn))
def test_zero_mnemonic(self):
expected_mnemonic = (
"abandon abandon abandon abandon abandon abandon abandon abandon "
"abandon abandon abandon abandon abandon abandon abandon abandon "
"abandon abandon abandon abandon abandon abandon abandon abandon "
"invest"
)
result = mnemonic._from_key(self.zero_bytes)
self.assertEqual(expected_mnemonic, result)
result = mnemonic._to_key(result)
self.assertEqual(self.zero_bytes, result)
def test_whitespace_irrelevance(self):
padded = """
| |
#!/usr/bin/env python3
def normalize(value):
return value & 0xFFFFFFFF
def signed(value):
return value - 0x100000000 if value & 0x80000000 else value
# from http://code.activestate.com/recipes/577977-get-single-keypress/, MIT licensed
try:
import tty, termios
except ImportError:
# Probably Windows.
try: import msvcrt
except ImportError: raise ImportError("getch not available")
else: getch = msvcrt.getch
else:
import sys
def getch():
"""
getch() -> key character
Read a single keypress from stdin and return the resulting character. Nothing is echoed to the console. This call will block if a keypress is not already available, but will not wait for Enter to be pressed.
If the pressed key was a modifier key, nothing will be detected; if it were a special function key, it may return the first character of of an escape sequence, leaving additional characters in the buffer.
"""
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class Mippit:
def __init__(self):
self.registers = [0] * 32
self.PC = 0
self.HI, self.LO = 0, 0
self.MEM = {}
self.offset = self.PC
self.tracing = False
def trace(self, instruction, comment = None):
if not self.tracing: return # tracing disabled
if comment is None:
print(instruction)
else:
print("[DEBUGGER] {:=#010x} {:<20}; {}".format(self.offset, instruction, comment))
def decode_execute(self, instruction):
r = self.registers
r[0] = 0 # reset the 0 register
d, s, t = (instruction >> 11) & 0b11111, (instruction >> 21) & 0b11111, (instruction >> 16) & 0b11111
i = instruction & 0b1111111111111111
if i & 0x8000: i -= 0x10000 # make sure we interpret the value as a signed 16 bit integer
if instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000100000: # add (add)
r[d] = normalize(r[s] + r[t])
self.trace("add ${}, ${}, ${}".format(d, s, t), "${}={}, ${}={}, ${}={}".format(d, r[d], s, r[s], t, r[t]))
elif instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000100010: # subtract (sub)
r[d] = normalize(r[s] - r[t])
self.trace("sub ${}, ${}, ${}".format(d, s, t), "${}={}, ${}={}, ${}={}".format(d, r[d], s, r[s], t, r[t]))
elif instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011000: # multiply (mult)
result = signed(r[s]) * signed(r[t])
self.HI, self.LO = normalize(result >> 32), normalize(result)
self.trace("mult ${}, ${}".format(s, t), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011001: # multiply unsigned (multu)
result = r[s] * r[t]
self.HI, self.LO = normalize(result >> 32), normalize(result)
self.trace("multu ${}, ${}".format(s, t), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011010: # divide (div)
self.HI, self.LO = normalize(signed(r[s]) % signed(r[t])), normalize(signed(r[s]) // signed(r[t]))
self.trace("div ${}, ${}".format(s, t), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011011: # divide unsigned (divu)
self.HI, self.LO = r[s] % r[t], r[s] // r[t]
self.trace("divu ${}, ${}".format(s, t), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010000: # move from high/remainder (mfhi)
r[d] = self.HI
self.trace("mfhi ${}".format(d), "${}={}".format(d, r[d]))
elif instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010010: # move from low/quotient (mflo)
r[d] = self.LO
self.trace("mflo ${}".format(d), "${}={}".format(d, r[d]))
elif instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010100: # load immediate and skip (lis)
assert self.PC % 4 == 0
r[d] = self.MEM[self.PC // 4] if self.PC // 4 in self.MEM else 0
self.PC = normalize(self.PC + 4)
self.trace("lis ${}".format(d), "${}={}".format(d, r[d]))
self.trace(".word {}".format(r[d]))
elif instruction & 0b11111100000000000000000000000000 == 0b10001100000000000000000000000000: # load word (lw)
address = normalize(r[s] + i)
assert address % 4 == 0
if address == 0xFFFF0004: # read from stdin
value = ord(getch())
assert 0 <= value <= 255, "Invalid character entered - character must be ASCII"
r[t] = value
else: r[t] = self.MEM[address // 4] if address // 4 in self.MEM else 0
self.trace("lw ${}, {}(${})".format(t, i, s), "${}={}, ${}={}".format(t, r[t], s, r[s]))
elif instruction & 0b11111100000000000000000000000000 == 0b10101100000000000000000000000000: # store word (sw)
address = normalize(r[s] + i)
assert address % 4 == 0, "Invalid address - not aligned to word boundary."
if address == 0xFFFF000C: # write to stdout
print(chr(r[t] & 0xFF), end="")
else: self.MEM[address // 4] = r[t]
self.trace("sw ${}, {}(${})".format(t, i, s), "${}={}, ${}={}".format(t, r[t], s, r[s]))
elif instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000101010: # set less than (slt)
r[d] = 1 if signed(r[s]) < signed(r[t]) else 0
self.trace("slt ${}, ${}, ${}".format(d, s, t), "${}={}, ${}={}, ${}={}".format(d, r[d], s, r[s], t, r[t]))
elif instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000101011: # set less than unsigned (sltu)
r[d] = 1 if r[s] < r[t] else 0
self.trace("sltu ${}, ${}, ${}".format(d, s, t), "${}={}, ${}={}, ${}={}".format(d, r[d], s, r[s], t, r[t]))
elif instruction & 0b11111100000000000000000000000000 == 0b00010000000000000000000000000000: # branch on equal (beq)
if r[s] == r[t]: self.PC = normalize(self.PC + i * 4)
self.trace("beq ${}, ${}, {}".format(s, t, i), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000000000000000000000000 == 0b00010100000000000000000000000000: # branch on not equal (bne)
if r[s] != r[t]: self.PC = normalize(self.PC + i * 4)
self.trace("bne ${}, ${}, {}".format(s, t, i), "${}={}, ${}={}".format(s, r[s], t, r[t]))
elif instruction & 0b11111100000111111111111111111111 == 0b00000000000000000000000000001000: # jump register (jr)
self.PC = r[s]
self.trace("jr ${}".format(s), "${}={}".format(s, r[s]))
elif instruction & 0b11111100000111111111111111111111 == 0b00000000000000000000000000001001: # jump and link register (jalr)
temp = r[s]
r[31] = self.PC
self.PC = temp
self.trace("jalr ${}".format(s), "${}={}".format(s, r[s]))
else: raise ValueError("Unknown instruction: {:=#010x}".format(instruction))
def load(self, code, offset = 0): # load binary code into memory
assert offset % 4 == 0, "Invalid offset - offset must be aligned to 32-bit word boundary"
offset //= 4 # get the offset in words
for i, word in enumerate(code_to_words(code)): self.MEM[i + offset] = word # copy the code into memory
self.registers[30] = 0x00000000
self.registers[31] = 0xFFFFFFFF
def load_hex(self, hex_code, offset = 0): # load hex code into memory
assert offset % 4 == 0, "Invalid offset - offset must be aligned to 32-bit word boundary"
offset //= 4
for i, word in enumerate(hex_to_words(hex_code)): self.MEM[i + offset] = word # copy the code into memory
self.registers[30] = 0x00000000
self.registers[31] = 0xFFFFFFFF
def step(self):
if self.PC == 0xFFFFFFFF: return False # jumped past end of memory, program ended
assert self.PC % 4 == 0, "Program counter must be aligned to word boundaries"
instruction = self.MEM[self.PC // 4] if self.PC // 4 in self.MEM else 0
self.offset = self.PC
self.PC = normalize(self.PC + 4)
self.decode_execute(instruction)
return True
def run(self, offset = 0):
self.PC = offset
while self.step(): pass
def code_to_words(code):
assert len(code) % 4 == 0, "Invalid code length - machine code must be collection of 32-bit words"
import struct
return [struct.unpack(">i", code[i * 4:i * 4 + 4])[0] for i in range(0, len(code) // 4)] # load each 4 bytes as a big endian 32-bit integer
def hex_to_words(hex_code):
assert len(hex_code) % 8 == 0, "Invalid code length - machine code must be collection of 32-bit words"
return [int(hex_code[i * 8:i * 8 + 8], 16) for i in range(0, len(hex_code) // 8)]
def decode(instruction):
d, s, t = (instruction >> 11) & 0b11111, (instruction >> 21) & 0b11111, (instruction >> 16) & 0b11111
i = instruction & 0b1111111111111111
if i & 0x8000: i -= 0x10000 # make sure we interpret the value as a signed 16 bit integer
if instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000100000: # add (add)
return "add ${}, ${}, ${}".format(d, s, t)
if instruction & 0b11111100000000000000011111111111 == 0b00000000000000000000000000100010: # subtract (sub)
return "sub ${}, ${}, ${}".format(d, s, t)
if instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011000: # multiply (mult)
return "mult ${}, ${}".format(s, t)
if instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011001: # multiply unsigned (multu)
return "multu ${}, ${}".format(s, t)
if instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011010: # divide (div)
return "div ${}, ${}".format(s, t)
if instruction & 0b11111100000000001111111111111111 == 0b00000000000000000000000000011011: # divide unsigned (divu)
return "divu ${}, ${}".format(s, t)
if instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010000: # move from high/remainder (mfhi)
return "mfhi ${}".format(d)
if instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010010: # move from low/quotient (mflo)
return "mflo ${}".format(d)
if instruction & 0b11111111111111110000011111111111 == 0b00000000000000000000000000010100: # load immediate and skip (lis)
return "lis ${}".format(d)
if instruction & 0b11111100000000000000000000000000 == 0b10001100000000000000000000000000: # load word (lw)
return "lw ${}, {}(${})".format(t, i, s)
if | |
%s" % (filename, lineno))
print('------------------------')
print(line, "\n")
print('------------------------')
print("%s: %s" % (str(traceback.error.__class__.__name__), traceback.error))
outstring = '<!-- ERROR in name -->'
return outstring
def generatesvg(self,output=None):
# Generate standard tag sheet (1 type of tag, 1 size)
if (output is None):
output=self.layout.output
if (self.verbose>0):
print("Creating SVG tag sheet '{}'...".format(output))
svgtext = self.render('template_tagsheet.svg')
with open(output, 'w') as out:
out.write(svgtext)
def customsvg(self,output=None):
# Generate custom tag sheet with multiple types of tags/sizes
if (output is None):
output=self.layout.output
if (self.verbose>0):
print("Creating custom SVG tag sheet '{}'...".format(output))
svgtext = self.render('template_opening.svg')
self.layout.nblocksx = 2
self.layout.nblocksy = 1
self.layout.ntagsx = 10
self.layout.ntagsy = 10
self.layout.tagmarginx = 5.0 # pix
self.layout.tagmarginy = 5.0 # pix
class GridLayout(object):
def __init__(self, layout, dpp=10):
self.layout = layout
self.setdpp(dpp)
def setdpp(self,dpp):
self.dpp = dpp
self.layout.tagdpp1200 = dpp # Compute lengths for largest page
self.layout.recompute_lengths()
def pos(self,i,j):
return {
'page_left': self.layout.sheet_x0+i*self.layout.pagestep_x,
'page_top': self.layout.sheet_y0+j*self.layout.pagestep_y
}
gl = GridLayout(self.layout)
def dpp4mm(tagsize_pix,tagsize_mm):
return tagsize_mm / tagsize_pix * 1200 / 25.4
def render_page(**kargs):
print("Rendering page family={family}, tagdpp1200={tagdpp1200} at (left,top)=({page_left:.1f},{page_top:.1f})".format(**kargs))
svgtext = self.render('template_page.svg', **kargs)
return svgtext
if (self.layout.custom=='custom_tag36h10'):
tag = dict(family="tag36h10", tagdir='tag36h10/svg',
tagcode_pix = 6, style=1)
taginv = dict(family="tag36h10inv", tagdir='tag36h10inv/svg',
tagcode_pix = 6, style=2)
gl.setdpp(10)
svgtext += render_page(tagdpp1200=10, **taginv, **gl.pos(0,0))
svgtext += render_page(tagdpp1200=10, **tag, **gl.pos(1,0))
svgtext += render_page(tagdpp1200=9, **taginv, **gl.pos(0,1))
svgtext += render_page(tagdpp1200=9, **tag, **gl.pos(1,1))
svgtext += render_page(tagdpp1200=8, **taginv, **gl.pos(0,2))
svgtext += render_page(tagdpp1200=8, **tag, **gl.pos(1,2))
elif (self.layout.custom=='custom_tag25h6'):
tag = dict(family="tag25h6", tagdir='tag25h6/svg',
tagcode_pix = 5, style=1)
taginv = dict(family="tag25h6inv", tagdir='tag25h6inv/svg',
tagcode_pix = 5, style=2)
gl.setdpp(11)
svgtext += render_page(tagdpp1200=11, **taginv, **gl.pos(0,0))
svgtext += render_page(tagdpp1200=11, **tag, **gl.pos(1,0))
svgtext += render_page(tagdpp1200=10, **taginv, **gl.pos(0,1))
svgtext += render_page(tagdpp1200=10, **tag, **gl.pos(1,1))
svgtext += render_page(tagdpp1200=9, **taginv, **gl.pos(0,2))
svgtext += render_page(tagdpp1200=9, **tag, **gl.pos(1,2))
elif (self.layout.custom=='custom_tag25h5'):
tag = dict(family="tag25h5", tagdir='tag25h5/svg',
tagcode_pix = 5, style=1)
taginv = dict(family="tag25h5inv", tagdir='tag25h5inv/svg',
tagcode_pix = 5, style=2)
gl.setdpp(11)
svgtext += render_page(tagdpp1200=11, **taginv, **gl.pos(0,0))
svgtext += render_page(tagdpp1200=11, **tag, **gl.pos(1,0))
svgtext += render_page(tagdpp1200=10, **taginv, **gl.pos(0,1))
svgtext += render_page(tagdpp1200=10, **tag, **gl.pos(1,1))
svgtext += render_page(tagdpp1200=9, **taginv, **gl.pos(0,2))
svgtext += render_page(tagdpp1200=9, **tag, **gl.pos(1,2))
elif (self.layout.custom=='custom_test'):
tag = dict(family="tag25h5", tagdir='tag25h5/svg',
tagcode_pix = 5, style=1)
taginv = dict(family="tag25h5inv", tagdir='tag25h5inv/svg',
tagcode_pix = 5, style=2)
svgtext += render_page(tagdpp1200=11, **taginv, **gl.pos(0,0))
elif (self.layout.custom=='custom_tag25h6_dpp10'):
tag = dict(family="tag25h6", tagdir='tag25h6/svg', maxid=958,
tagcode_pix = 5, style=1)
taginv = dict(family="tag25h6inv", tagdir='tag25h6inv/svg', maxid=958,
tagcode_pix = 5, style=2)
self.layout.sheet_x0 = 25.4*1.00
self.layout.sheet_y0 = 25.4*0.75
self.layout.pagemargin = 10
self.layout.nblocksx = 5
self.layout.nblocksy = 2
self.layout.tagmarginx = 5.0 # pix
self.layout.tagmarginy = 5.0 # pix
gl.setdpp(10)
svgtext += render_page(tagdpp1200=10, **taginv, **gl.pos(0,0))
svgtext += render_page(tagdpp1200=10, **tag, **gl.pos(0,1))
else:
print('ERROR: unknown custom layout, custom={}'.format(self.layout.custom))
return
self.layout.show_footer=True
svgtext += self.render('template_closing.svg')
with open(output, 'w') as out:
out.write(svgtext)
def topdf(self, output):
if (output is None):
output=self.layout.output
# Command line:
print("Converting {} to PDF...".format(output))
os.system('svg2pdf_inkscape.sh "{}"'.format(output))
# # Alternative: cairosvg produces less efficient PDF than inkscape
#
# print("Converting to PDF...")
#
# import cairosvg
#
# cairosvg.svg2pdf(
# file_obj=open("output_tagsheet.svg", "rb"),
# write_to="output_tagsheet.pdf")
def rasterize(self, output): # FIXME
if (output is None):
output=self.layout.output
# Rasterize at 1200 dpi to check quality
print("Convert to TIFF 1200dpi:")
print(" convert -density 1200 -background white -alpha remove -compress lzw output_tagsheet.svg_output.pdf output_tagsheet_1200dpi.tiff")
os.system("convert -density 1200 -background white -alpha remove -compress lzw output_tagsheet.svg_output.pdf output_tagsheet_1200dpi.tiff")
def cmyk(self, output=None): # FIXME
if (output is None):
output=self.layout.output
if output.endswith('.pdf') or output.endswith('.svg'):
name = output[:-4]
else:
name = output
output_pdf = name+'.pdf'
output_cmyk = name+'_CMYK.pdf'
print("Convert {} to CMYK PDF {}:".format(output,output_cmyk))
os.system(' svg2cmyk.sh "{}"'.format(output))
def cmyk_bw(self, output=None): # FIXME
if (output is None):
output=self.layout.output
if output.endswith('.pdf') or output.endswith('.svg'):
name = output[:-4]
else:
name = output
output_pdf = name+'.pdf'
output_cmyk = name+'_BW.pdf'
print("Convert {} to CMYK (B/W) PDF {}:".format(output,output_cmyk))
os.system(' svg2cmyk_bw.sh "{}"'.format(output))
class CustomPageSizeAction(argparse.Action):
def __init__(self, option_strings=None, dest=None, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(CustomPageSizeAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
#print('--page_size: %r %r %r' % (namespace, values, option_string))
setattr(namespace, 'page_size', 'custom')
print('Option {} set page_size to custom'.format(option_string))
class TagMarginAction(argparse.Action):
def __init__(self, option_strings=None, dest=None, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(TagMarginAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
#print('--page_size: %r %r %r' % (namespace, values, option_string))
vals = values.split(',')
if (len(vals)==1):
tagmarginx = float(vals[0])
tagmarginy = float(vals[0])
elif (len(vals)==2):
tagmarginx = float(vals[0])
tagmarginy = float(vals[1])
else:
raise argparse.ArgumentTypeError('Incorrect format. Should be either <pixels> or <pixelsx>,<pixelsy>')
setattr(namespace, 'tagmarginx', tagmarginx)
setattr(namespace, 'tagmarginy', tagmarginy)
print('--tagmargin: set to {},{}'.format(tagmarginx,tagmarginy))
class MyArgumentParser(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
return arg_line.split()
if __name__ == "__main__":
layout = Layout()
generator = Generator(layout)
parser = argparse.ArgumentParser(description='Generate SVG tag sheet given a directory <tagdir> of tag images', formatter_class=argparse.RawDescriptionHelpFormatter)
# ,fromfile_prefix_chars='@', convert_arg_line_to_args=MyArgumentParser)
parser.add_argument('--verbose', metavar='<level>',
type=int,
dest='verbose', default=1,
help='Verbosity level (default: %(default)s)')
group = parser.add_argument_group('Input/Output')
# parser.add_argument('-o', '--output', metavar='<tagsheet-TAGS.svg>',
# dest='output', default=layout.output,
# help='basename for output file (default: %(default)s)')
group.add_argument('-ob', '--output_basename', metavar='<tagsheet>',
dest='output_basename', default=layout.output_basename,
help='basename for output file (default: %(default)s)')
group.add_argument('-m', '--mode', metavar='<mode>',
dest='mode', default='tags',
help='sheets to generate, as comma separated list: tags,cuts,view or all (default: %(default)s)')
group.add_argument('-rm', '--removesvg',
dest='removesvg', action='store_true',
help='Delete tmp SVG file on success')
group.add_argument('-r', '--rasterize',
dest='rasterize', action='store_true',
help='rasterize output PDF')
group.add_argument('-pdf', '--to_pdf',
dest='to_pdf', action='store_true',
help='convert SVG to RGB PDF (default: %(default)s)')
group.add_argument('-no_rgb', '--no_rgb',
dest='to_pdf', action='store_false',
help='prevent RGB PDF output (default: %(default)s)')
group.add_argument('-cmyk', '--to_cmyk',
dest='to_cmyk', action='store_true',
help='convert output SVG to CMYK PDF (default: %(default)s)')
group.add_argument('-bw', '--to_cmyk_bw',
dest='to_cmyk_bw', action='store_true',
help='convert output SVG to CMYK pure Black PDF (default: %(default)s)')
group = parser.add_argument_group('Tag family info')
group.add_argument('-f', '--family', metavar='<family>',
dest='family', default=layout.family,
help='tag family name (default: %(default)s)')
group.add_argument('-td', '--tagdir', metavar='<tagdir>',
dest='tagdir', default=layout.tagdir,
help='directory containing tag image files (default: %(default)s)')
group.add_argument('-tf', '--tagfiles', metavar='<tagfiles>',
dest='tagfiles', default=layout.tagfiles,
help='pattern of tag image files, python format style (default: %(default)s)')
group.add_argument('-p', '--tagcode_pix', metavar='<tagcode_pix>',
dest='tagcode_pix', default=layout.tagcode_pix, type=int,
help='width of the code in pixels, without border (default: %(default)s)')
group.add_argument('-u', '--first_id', metavar='<first_id>', type=int,
dest='first_id', default=layout.first_id,
help='first id, inclusive (default: %(default)s)')
group.add_argument('-v', '--maxid', metavar='<max_id>', type=int,
dest='maxid', default=layout.maxid,
help='last id, inclusive (default: %(default)s)')
group = parser.add_argument_group('Style')
group.add_argument('-s', '--style', metavar='<style>',
dest='style', default=layout.style,
help='color style: auto, tag, invtag, tagdebug, invdebug (default: %(default)s)')
group.add_argument('-sc', '--show_colored_corners', action='store_true',
dest='show_colored_corners', default=layout.show_colored_corners,
help='Margin2 is colored (default: %(default)s)')
group.add_argument('-sa', '--show_arrows', action='store_true',
dest='show_arrows', default=layout.show_arrows,
help='Margin2 show contrasting arrows (default: %(default)s)')
group.add_argument('-sb', '--show_bicolor', action='store_true',
dest='show_bicolor', default=layout.show_bicolor,
help='Top/Bottom bicolors (default: %(default)s)')
group.add_argument('-col0', '--tagid_bgcolor',
dest='tagid_bgcolor', default=layout.tagid_bgcolor,
help='tag id color (default: %(default)s)')
group.add_argument('-col1', '--tagcornercolor1',
dest='tagcornercolor1', default=layout.tagcornercolor1,
help='tag color 1(default: %(default)s)')
group.add_argument('-col2', '--tagcornercolor2',
dest='tagcornercolor2', default=layout.tagcornercolor2,
help='tag color 2 (default: %(default)s)')
group.add_argument('-cb', '--codebottom',
dest='codebottom', default=None, type=str,
help='binary code for bottom (default: %(default)s)')
group.add_argument('-cs', '--codesides',
dest='codesides', default=None, type=str,
help='binary code for sides (default: %(default)s)')
group.add_argument('-sbc', '--show_bitcode', action='store_true',
dest='show_bitcode', default=layout.show_bitcode,
help='Show color bitcode instead of id (default: %(default)s)')
group.add_argument('-b1sw', '--border1strokewidth', type=float,
dest='border1strokewidth', default=layout.border1strokewidth,
help='Thickness of border stroke in tag pixels (default: %(default)s)')
group.add_argument('-tc', '--textcolor',
dest='textcolor', default=layout.textcolor,
help='Color of tag ID text (default: %(default)s)')
group.add_argument('-tbc', '--textbgcolor',
dest='textbgcolor', default=layout.textbgcolor,
help='Color of tag ID background (default: %(default)s)')
group.add_argument('-tbm', '--textbg_margin', type=float,
dest='textbg_margin',
default=layout.textbg_margin,
help='Additional margin around text for bg (default: %(default)s)')
group.add_argument('-ff', '--fontfamily',
dest='fontfamily', default=layout.fontfamily,
help='Font family for tag ID text (default: %(default)s)')
group.add_argument('-fls', '--letterspacing',
dest='letterspacing', default=layout.letterspacing,
help='Font letter spacing for tag ID text (default: %(default)s)')
group.add_argument('-fsi', '--fontsize_id',
dest='fontsize_id', default=layout.fontsize_id,
help='Tag ID fontsize in tag pixels (default: %(default)s)')
group.add_argument('-fwi', '--fontweight_id',
dest='fontweight_id', default=layout.fontweight_id,
help='Tag ID fontweight (default: %(default)s)')
group.add_argument('-fsx', '--fontsize_scalex',
dest='fontsize_scalex', default=layout.fontsize_scalex,
help='X scale for tag ID (default: %(default)s)')
group = parser.add_argument_group('Geometry')
group.add_argument('--dpp', '-d', type=int,
dest='tagdpp1200', default=layout.tagdpp1200,
help='number of printer dots per pixel of the tag at 1200 dpi.\nFor 6 pixels code, dpp=8 -> 1.37mm, dpp=9 -> 1.54mm tag, dpp=10 -> 1.71mm tag')
group.add_argument('-udpi', '--use_local_dpi', action='store_true',
dest='use_local_dpi', default=layout.use_local_dpi,
help='Use local DPI if not 1200 (default: %(default)s)')
group.add_argument('-ldpi', '--local_dpi', type=float,
dest='local_dpi', default=layout.local_dpi,
help='Local DPI value (default: %(default)s)')
group.add_argument('-ptsh', '--ptshift', type=float,
dest='ptshift', default=layout.ptshift,
help='Shift in dpi pts from round hint (default: %(default)s)')
group.add_argument('-uc', '--use_color', action='store_true',
dest='use_color', default=layout.use_color,
help='Use color patterns (default: %(default)s)')
group.add_argument('-c', '--custom', metavar='<layout name>',
dest='custom', default=layout.custom,
choices='custom_tag25h5,custom_tag25h6,custom_tag36h10,custom_tag25h6_dpp10,custom_test'.split(','),
help='Use custom layout (hardcoded %(choices)s)')
group.add_argument('-pz','--page_size', metavar='<size_name>',
dest='page_size', default=layout.page_size,
help='page size (options: | |
*args, caller=caller, **kwargs)
print("^"*70,file=sys.stderr)
#- WARNING: The above code may do weird things (specifically,
# print the delimiters in one place and the traceback in
# another) if the console is shutting down and logging is being
# temporarily redirected to somewhere other than sys.stderr.
# End function exception().
def critical(msg:str, *args, **kwargs):
caller = mainLogger.logger.findCaller()
mainLogger.critical(msg, *args, caller=caller, **kwargs)
#==================================================================
# lvlname_to_loglevel() [module public function]
#
# This function simply converts the string name of a
# given logging level to its numeric equivalent.
#
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def lvlname_to_loglevel(lvlname):
# We use the logging._levelNames dictionary b/c it already
# has the reverse mappings as well as the forward ones.
if lvlname in logging._levelNames.keys():
return logging._levelNames[lvlname]
else:
localLogger.error("There is no logging level named '%s'." % lvlname)
return NOTSET
#=====================================================
# byname() [module public function]
#
# This simply generates a log message using
# the main logger and the string name of the
# desired logging level.
#
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def byname(lvlname, msg):
caller = mainLogger.logger.findCaller()
loglevel = lvlname_to_loglevel(lvlname)
mainLogger.log(loglevel, msg, caller=caller)
#==================================================================
# testLogging() [module public function]
#
# Tests the logging facility for various message types.
# initLogMaster(), below, should already have been called.
#
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def testLogging():
moduleLogger.debug('This message just verifies that debug-level log output is enabled for this stream.')
moduleLogger.info('This message just verifies that info-level log output is enabled for this stream.')
moduleLogger.normal('This message just verifies that normal-level log output is enabled for this stream.')
moduleLogger.warning('This message just verifies that warning-level log output is enabled for this stream.')
# Maybe shouldn't test these two b/c they look unnecessarily panicky & are always enabled anyway.
moduleLogger.error('This message just verifies that error-level log output is enabled for this stream.')
moduleLogger.critical('This message just verifies that critical-level log output is enabled for this stream.')
#==================================================================
# updateStderr() [module public function]
#
# In case the definition of sys.stderr changes, this
# function retrieves its new definition for use by the
# console output log handler (if that is defined).
#
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
global consHandler
consHandler = None
def updateStderr():
# The locking here prevents another thread from putting some
# more output to the console handler after we flush it, and
# before we redirect its stream. Or from nulling out the
# consHandler global after we check it and before we use it.
logging._acquireLock()
if consHandler:
consHandler.flush()
consHandler.stream = sys.stderr
logging._releaseLock()
#==================================================================
# setLogLevels() [module public function]
#
# Sets the file and console logging levels based on
# various configuration Booleans.
#
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def setLogLevels(verbose=False):
global console_level, log_level
# Set the log level for the console based on whether we want to see warnings or not,
# and whether the console is in debug mode.
console_level = logging.WARNING # Default setting.
if CONS_DEBUG:
if verbose:
print("logmaster.setLogLevels(): Turning on detailed debug messages on console...",
file=sys.stderr)
console_level = logging.DEBUG
else:
if not CONS_WARN:
if verbose:
print("logmaster.setLogLevels(): Suppressing warnings from console...",
file=sys.stderr)
console_level = logging.ERROR
if verbose:
print("logmaster.setLogLevels(): Console log level is set to %d (%s)." %
(console_level, logging.getLevelName(console_level)),
file=sys.stderr)
# Set the log level for the log file based on whether we want to log verbose info or not,
# and whether the main logger is in debug mode.
log_level = logging.INFO # Default setting.
if LOG_DEBUG:
if verbose:
print("logmaster.setLogLevels(): Turning on detailed debug messages in log file...",
file=sys.stderr)
log_level = logging.DEBUG
if not LOG_INFO:
if verbose:
print("logmaster.setLogLevels(): Suppressing verbose info from log file...",
file=sys.stderr)
log_level = logging.NORMAL
if verbose:
print("logmaster.setLogLevels(): File log level is set to %d (%s)." %
(log_level, logging.getLevelName(log_level)),
file=sys.stderr)
# End setLogLevels().
#=================================================================
# setDefaultRole() [public function]
#
# If the current thread has no "role" attribute assigned,
# or if its role is "None", give it a default role called
# "general". This is appropriate for the main thread.
#
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def setDefaultRole():
thread = threading.current_thread() # Get the current thread.
if thread.name == "MainThread": # If we're in the main thread,
if not hasattr(thread, "role") or thread.role == None: # and it has no role assigned yet,
thread.role = "general" # assign it a generic role.
# The main thread's __str__() method also does something
# ugly. Fix it up by replacing it with ThreadActor's method.
thread.__str__ = lambda: ThreadActor.__str__(thread)
#==================================================================
# initLogMaster() [public function]
#
# Basic initialization of the logmaster facility,
# called whenever this module is first loaded.
#
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def initLogMaster():
global logFormatter, theLoggingContext, mainLogger # In python, we have to declare
global consHandler, moduleLogger, initialized # the globals that we'll reassign.
global sysLogger, appLogger
# Pretend the main thread is a ThreadActor by giving it a "role"
# attribute. This is descriptive, for debugging purposes.
setDefaultRole()
# If we have no stdout/stderr streams yet (which will be the case
# if the program was started by, for example, just double-clicking
# on a script.pyw icon), temporarily assign them to null output
# streams, just to make sure we won't crash if someone tries
# to send log messages to the console before more effective values
# of stdout/stderr have been set up.
if sys.stdout == None: sys.stdout = NullOut()
if sys.stderr == None: sys.stderr = NullOut()
# Define a new logging level for "normal" messages that should be
# echoed (without decoration) to standard output. The level of this
# is in between ERROR and WARNING, so that warnings can be suppressed
# without suppressing normal output.
logging.addLevelName(NORMAL_LEVEL,'NORMAL')
logging.NORMAL = NORMAL_LEVEL
# Create our main log file formatter. This may be useful for
# creating file log handlers in subordinate loggers that use
# the same file format as the main file log handler. This can
# be changed if needed in a later call to configLogMaster().
# (Note: Changing the logFormatter is not yet implemented.)
logFormatter = logging.Formatter(LOG_FORMATSTR)
# - LOG_FORMATSTR will be our normal log message format.
# Set the default console and file logging levels based on the
# module defaults. (Don't print debug info since this is just
# the default setting anyway.)
setLogLevels(verbose=False)
# Set the default log file name, and its log level & format.
# With the initial defaults, the log file will just contain
# messages at INFO level or higher (no DEBUG). To change
# this, use configLogMaster().
logging.basicConfig(filename=LOG_FILENAME,
level=log_level,
format=LOG_FORMATSTR)
# Create the global loggingContext object. This is thread-local, and
# does not get initialized for a given thread until it is actually
# first accessed within that thread.
theLoggingContext = LoggingContext()
# Get the main (root) logger object for this environment. Note this
# uses our special getLogger() method defined above, which actually
# creates a NormalLoggerAdapter wrapped around a Logger that is
# mutated to look like a Normal Logger. We don't use appName as the
# logger name here, because we want modules that are not even
# application-specific to still go through this logger.
mainLogger = getLogger('')
# Add a console log handler for messages (other than NORMAL messages)
# that are (typically) at warning level or higher. This will ensure
# that these messages also appear on the stdout/stderr console.
consHandler = logging.StreamHandler() # The default stream handler uses stderr.
consHandler.addFilter(AbnormalFilter()) # Add a filter to ignore NORMAL-level messages.
consHandler.setLevel(console_level) # Set console to log level we determined earlier.
# Console log messages will just have the simple format showing the log
# level name and the actual log message. Look at the log file to see
# more details such as thread, module, and function.
consHandler.setFormatter(logging.Formatter("%(levelname)8s: %(message)s"))
# Add the console log handler to the main logger adapter's underlying logger.
mainLogger.logger.addHandler(consHandler)
# Create a subordinate logger that is the top-level logger for system components.
sysLogger = getLogger(sysName)
# Create a subordinate logger that is the top-level logger for application components.
appLogger = | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
***************************************************************************
Grid.py
---------------------
Date : May 2010
Copyright : (C) 2010 by <NAME>
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, <NAME>'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
import math
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsApplication,
QgsField,
QgsFeatureSink,
QgsFeature,
QgsGeometry,
QgsLineString,
QgsPoint,
QgsPointXY,
QgsWkbTypes,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterEnum,
QgsProcessingParameterExtent,
QgsProcessingParameterNumber,
QgsProcessingParameterDistance,
QgsProcessingParameterCrs,
QgsProcessingParameterFeatureSink,
QgsFields)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class Grid(QgisAlgorithm):
TYPE = 'TYPE'
EXTENT = 'EXTENT'
HSPACING = 'HSPACING'
VSPACING = 'VSPACING'
HOVERLAY = 'HOVERLAY'
VOVERLAY = 'VOVERLAY'
CRS = 'CRS'
OUTPUT = 'OUTPUT'
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmCreateGrid.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmCreateGrid.svg")
def tags(self):
return self.tr('grid,lines,polygons,vector,create,fishnet,diamond,hexagon').split(',')
def group(self):
return self.tr('Vector creation')
def groupId(self):
return 'vectorcreation'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.types = [self.tr('Point'),
self.tr('Line'),
self.tr('Rectangle (polygon)'),
self.tr('Diamond (polygon)'),
self.tr('Hexagon (polygon)')]
self.addParameter(QgsProcessingParameterEnum(self.TYPE,
self.tr('Grid type'), self.types))
self.addParameter(QgsProcessingParameterExtent(self.EXTENT, self.tr('Grid extent')))
self.addParameter(QgsProcessingParameterDistance(self.HSPACING,
self.tr('Horizontal spacing'),
1.0, self.CRS, False, 0, 1000000000.0))
self.addParameter(QgsProcessingParameterDistance(self.VSPACING,
self.tr('Vertical spacing'),
1.0, self.CRS, False, 0, 1000000000.0))
self.addParameter(QgsProcessingParameterDistance(self.HOVERLAY,
self.tr('Horizontal overlay'),
0.0, self.CRS, False, 0, 1000000000.0))
self.addParameter(QgsProcessingParameterDistance(self.VOVERLAY,
self.tr('Vertical overlay'),
0.0, self.CRS, False, 0, 1000000000.0))
self.addParameter(QgsProcessingParameterCrs(self.CRS, 'Grid CRS', 'ProjectCrs'))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Grid'), type=QgsProcessing.TypeVectorPolygon))
def name(self):
return 'creategrid'
def displayName(self):
return self.tr('Create grid')
def processAlgorithm(self, parameters, context, feedback):
idx = self.parameterAsEnum(parameters, self.TYPE, context)
hSpacing = self.parameterAsDouble(parameters, self.HSPACING, context)
vSpacing = self.parameterAsDouble(parameters, self.VSPACING, context)
hOverlay = self.parameterAsDouble(parameters, self.HOVERLAY, context)
vOverlay = self.parameterAsDouble(parameters, self.VOVERLAY, context)
crs = self.parameterAsCrs(parameters, self.CRS, context)
bbox = self.parameterAsExtent(parameters, self.EXTENT, context, crs)
if hSpacing <= 0 or vSpacing <= 0:
raise QgsProcessingException(
self.tr('Invalid grid spacing: {0}/{1}').format(hSpacing, vSpacing))
if bbox.width() < hSpacing:
raise QgsProcessingException(
self.tr('Horizontal spacing is too large for the covered area'))
if hSpacing <= hOverlay or vSpacing <= vOverlay:
raise QgsProcessingException(
self.tr('Invalid overlay: {0}/{1}').format(hOverlay, vOverlay))
if bbox.height() < vSpacing:
raise QgsProcessingException(
self.tr('Vertical spacing is too large for the covered area'))
fields = QgsFields()
fields.append(QgsField('left', QVariant.Double, '', 24, 16))
fields.append(QgsField('top', QVariant.Double, '', 24, 16))
fields.append(QgsField('right', QVariant.Double, '', 24, 16))
fields.append(QgsField('bottom', QVariant.Double, '', 24, 16))
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
if idx == 0:
outputWkb = QgsWkbTypes.Point
elif idx == 1:
outputWkb = QgsWkbTypes.LineString
else:
outputWkb = QgsWkbTypes.Polygon
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, outputWkb, crs)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
if idx == 0:
self._pointGrid(
sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback)
elif idx == 1:
self._lineGrid(
sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback)
elif idx == 2:
self._rectangleGrid(
sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback)
elif idx == 3:
self._diamondGrid(
sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback)
elif idx == 4:
self._hexagonGrid(
sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback)
return {self.OUTPUT: dest_id}
def _pointGrid(self, sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback):
feat = QgsFeature()
columns = int(math.ceil(float(bbox.width()) / (hSpacing - hOverlay)))
rows = int(math.ceil(float(bbox.height()) / (vSpacing - vOverlay)))
cells = rows * columns
count_update = cells * 0.05
id = 1
count = 0
for col in range(columns):
for row in range(rows):
x = bbox.xMinimum() + (col * hSpacing - col * hOverlay)
y = bbox.yMaximum() - (row * vSpacing - row * vOverlay)
feat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(x, y)))
feat.setAttributes([x, y, x + hSpacing, y + vSpacing, id])
sink.addFeature(feat, QgsFeatureSink.FastInsert)
id += 1
count += 1
if int(math.fmod(count, count_update)) == 0:
feedback.setProgress(int(count / cells * 100))
def _lineGrid(self, sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback):
feat = QgsFeature()
if hOverlay > 0:
hSpace = [hSpacing - hOverlay, hOverlay]
else:
hSpace = [hSpacing, hSpacing]
if vOverlay > 0:
vSpace = [vSpacing - vOverlay, vOverlay]
else:
vSpace = [vSpacing, vSpacing]
count = 0
id = 1
# latitude lines
count_max = bbox.height() / vSpacing
count_update = count_max * 0.10
y = bbox.yMaximum()
while y >= bbox.yMinimum():
if feedback.isCanceled():
break
pt1 = QgsPoint(bbox.xMinimum(), y)
pt2 = QgsPoint(bbox.xMaximum(), y)
line = QgsLineString([pt1, pt2])
feat.setGeometry(QgsGeometry(line))
feat.setAttributes([bbox.xMinimum(),
y,
bbox.xMaximum(),
y,
id,
y])
sink.addFeature(feat, QgsFeatureSink.FastInsert)
y = y - vSpace[count % 2]
id += 1
count += 1
if int(math.fmod(count, count_update)) == 0:
feedback.setProgress(int(count / count_max * 50))
feedback.setProgress(50)
# longitude lines
# counters for progressbar - update every 5%
count = 0
count_max = bbox.width() / hSpacing
count_update = count_max * 0.10
x = bbox.xMinimum()
while x <= bbox.xMaximum():
if feedback.isCanceled():
break
pt1 = QgsPoint(x, bbox.yMaximum())
pt2 = QgsPoint(x, bbox.yMinimum())
line = QgsLineString([pt1, pt2])
feat.setGeometry(QgsGeometry(line))
feat.setAttributes([x,
bbox.yMaximum(),
x,
bbox.yMinimum(),
id,
x])
sink.addFeature(feat, QgsFeatureSink.FastInsert)
x = x + hSpace[count % 2]
id += 1
count += 1
if int(math.fmod(count, count_update)) == 0:
feedback.setProgress(50 + int(count / count_max * 50))
def _rectangleGrid(self, sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback):
feat = QgsFeature()
columns = int(math.ceil(float(bbox.width()) / (hSpacing - hOverlay)))
rows = int(math.ceil(float(bbox.height()) / (vSpacing - vOverlay)))
cells = rows * columns
count_update = cells * 0.05
id = 1
count = 0
for col in range(columns):
if feedback.isCanceled():
break
x1 = bbox.xMinimum() + (col * hSpacing - col * hOverlay)
x2 = x1 + hSpacing
for row in range(rows):
y1 = bbox.yMaximum() - (row * vSpacing - row * vOverlay)
y2 = y1 - vSpacing
polyline = []
polyline.append(QgsPointXY(x1, y1))
polyline.append(QgsPointXY(x2, y1))
polyline.append(QgsPointXY(x2, y2))
polyline.append(QgsPointXY(x1, y2))
polyline.append(QgsPointXY(x1, y1))
feat.setGeometry(QgsGeometry.fromPolygonXY([polyline]))
feat.setAttributes([x1, y1, x2, y2, id])
sink.addFeature(feat, QgsFeatureSink.FastInsert)
id += 1
count += 1
if int(math.fmod(count, count_update)) == 0:
feedback.setProgress(int(count / cells * 100))
def _diamondGrid(self, sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback):
feat = QgsFeature()
halfHSpacing = hSpacing / 2
halfVSpacing = vSpacing / 2
halfHOverlay = hOverlay / 2
halfVOverlay = vOverlay / 2
columns = int(math.ceil(float(bbox.width()) / (halfHSpacing - halfHOverlay)))
rows = int(math.ceil(float(bbox.height()) / (vSpacing - halfVOverlay)))
cells = rows * columns
count_update = cells * 0.05
id = 1
count = 0
for col in range(columns):
if feedback.isCanceled():
break
x = bbox.xMinimum() - (col * halfHOverlay)
x1 = x + ((col + 0) * halfHSpacing)
x2 = x + ((col + 1) * halfHSpacing)
x3 = x + ((col + 2) * halfHSpacing)
for row in range(rows):
y = bbox.yMaximum() + (row * halfVOverlay)
if (col % 2) == 0:
y1 = y - (((row * 2) + 0) * halfVSpacing)
y2 = y - (((row * 2) + 1) * halfVSpacing)
y3 = y - (((row * 2) + 2) * halfVSpacing)
else:
y1 = y - (((row * 2) + 1) * halfVSpacing)
y2 = y - (((row * 2) + 2) * halfVSpacing)
y3 = y - (((row * 2) + 3) * halfVSpacing)
polyline = []
polyline.append(QgsPointXY(x1, y2))
polyline.append(QgsPointXY(x2, y1))
polyline.append(QgsPointXY(x3, y2))
polyline.append(QgsPointXY(x2, y3))
polyline.append(QgsPointXY(x1, y2))
feat.setGeometry(QgsGeometry.fromPolygonXY([polyline]))
feat.setAttributes([x1, y1, x3, y3, id])
sink.addFeature(feat, QgsFeatureSink.FastInsert)
id += 1
count += 1
if int(math.fmod(count, count_update)) == 0:
feedback.setProgress(int(count / cells * 100))
def _hexagonGrid(self, sink, bbox, hSpacing, vSpacing, hOverlay, vOverlay, feedback):
feat = QgsFeature()
# To preserve symmetry, hspacing is fixed relative to vspacing
xVertexLo = 0.288675134594813 * vSpacing
xVertexHi = 0.577350269189626 * vSpacing
hSpacing = xVertexLo + xVertexHi
hOverlay = hSpacing - hOverlay
if hOverlay < 0:
raise QgsProcessingException(
self.tr('To preserve symmetry, hspacing is fixed relative to vspacing\n \
hspacing is fixed at: {0} and hoverlay is fixed at: {1}\n \
hoverlay cannot be negative. Increase hoverlay.').format(hSpacing, hOverlay)
)
halfVSpacing = vSpacing / 2.0
columns = int(math.ceil(float(bbox.width()) / hOverlay))
rows = int(math.ceil(float(bbox.height()) / (vSpacing - vOverlay)))
cells = rows * columns
count_update = cells * 0.05
id = 1
count = 0
for col in range(columns):
if feedback.isCanceled():
break
# (column + 1) and (row + 1) calculation is used to maintain
# topology between adjacent shapes and avoid overlaps/holes
# due to rounding errors
x1 = bbox.xMinimum() + (col * hOverlay) # far left
x2 = x1 + (xVertexHi - xVertexLo) # left
x3 = bbox.xMinimum() | |
in att_dict.keys() ):
if (att_dict['coordinate_defines'] == 'center'):
CENTERS = True
#------------------------------------
# Get user-select minlat and maxlat
#------------------------------------
user_minlon = self.map_minlon.value
user_maxlon = self.map_maxlon.value
#----------------------------------
# Get the array of lons, and info
#----------------------------------
lons = self.dataset[ lon_name ][:].data
if (lons.ndim > 1):
msg1 = 'Sorry, cannot yet restrict longitude indices'
msg2 = ' when lon array has more than 1 dimension.'
self.append_download_log( [msg1, msg2] )
return (None, None)
# print('## type(lons) =', type(lons) )
# print('## lons.shape =', lons.shape )
# print('## lons.ndim =', lons.ndim )
#------------------------------------------
# Compute the longitude spacing, dlon
#------------------------------------------
# This only works if lons are a 1D list.
# If a "list of lists", len() will be for
# the outer list and min() won't work.
# Also, no "size" attribute, etc.
#------------------------------------------
nlons = lons.size
minlon = lons.min()
maxlon = lons.max()
dlon = np.abs(lons[1] - lons[0])
#--------------
# Another way
#--------------
# londif = (maxlon - minlon)
# if (CENTERS):
# dlon = (londif / (nlons - 1))
# else:
# dlon = (londif / nlons)
#-----------------------------------------
# Convert lons to have range [-180,180]?
#-----------------------------------------
# lons = ((lons + 180.0) % 360) - 180
# lons.sort() #####################
# user_maxlon = ((user_maxlon + 180.0) % 360) - 180
# user_minlon = ((user_minlon + 180.0) % 360) - 180
# if (user_minlon > user_maxlon):
# user_minlon -= 180.0
#-------------------------------------------
# Convert user lons to have range [0,360]?
#-------------------------------------------
if (minlon >= 0) and (maxlon <= 360):
user_minlon = (user_minlon + 360.0) % 360
user_maxlon = (user_maxlon + 360.0) % 360
#--------------------------------------
# Compute the new, restricted indices
# New method: (2020-12-12)
#--------------------------------------
all_indices = np.arange( nlons )
w = np.logical_and(lons > user_minlon, lons < user_maxlon) # boolean array
indices = all_indices[w]
if (indices.size > 0):
lon_i1 = indices[0]
lon_i2 = indices[-1]
else:
lon_i1 = 0
lon_i2 = nlons-1
#--------------------------------------
# Compute the new, restricted indices
#--------------------------------------
# Here, int() behaves like "floor()".
# So maybe add 1 to lon_i2 ???
#--------------------------------------
# lon_i1 = int( (user_minlon - minlon) / dlon )
# lon_i2 = int( (user_maxlon - minlon) / dlon )
# lon_i2 = lon_i2 + 1 #######
#---------------------------------
# Make sure indices are in range
#----------------------------------------
# lon_i1 = min( max(lon_i1, 0), nlons-1 )
# lon_i2 = min( max(lon_i2, 0), nlons-1 )
#------------------------------------------
# User region may be smaller than v_dlat,
# as is the case with Puerto Rico, where
# data grid cells are 1 deg x 1 deg or so.
#------------------------------------------
# if (lon_i1 == lon_i2): # (still needed?)
# lon_i2 = lon_i1 + 1
if (REPORT):
print()
print('lon_name =', lon_name)
print('minlon =', minlon, '(var)')
print('maxlon =', maxlon, '(var)')
print('dlon =', dlon)
print('u_minlon =', user_minlon, '(user)')
print('u_maxlon =', user_maxlon, '(user)')
print('lon_i1 =', lon_i1, '(new index)')
print('lon_i2 =', lon_i2, '(new index)')
# print('nlons =', nlons)
# print('New longitude indices =', lon_i1, ',', lon_i2 )
# print()
#--------------------------------------------------
i1s = str(lon_i1)
i2s = str(lon_i2)
msg1 = 'lon_name = ' + lon_name
msg2 = 'dlon = ' + str(dlon)
msg3 = 'nlons = ' + str(nlons)
msg4 = 'min, max = ' + str(minlon) + ', ' + str(maxlon) + ' (data)'
msg5 = 'min, max = ' + str(user_minlon) + ', ' + str(user_maxlon) + ' (user)'
msg6 = 'New longitude indices = ' + i1s + ', ' + i2s
self.append_download_log([msg1, msg2, msg3, msg4, msg5, msg6, ' '])
return (lon_i1, lon_i2)
# get_new_lon_index_range()
#--------------------------------------------------------------------
def get_duration(self, start_date=None, start_time=None,
end_date=None, end_time=None,
dur_units=None, REPORT=False):
#------------------------------------------------
# Note: Compute time span between 2 datetimes.
#------------------------------------------------
## date_sep = '/'
date_sep = '-'
time_sep = ':'
#-------------------------------------
# Get parts of the start date & time
#-------------------------------------
(y1, m1, d1) = self.split_date_str( start_date )
(h1, mm1, s1) = self.split_time_str( start_time )
#-----------------------------------
# Get parts of the end date & time
#-----------------------------------
(y2, m2, d2) = self.split_date_str( end_date )
(h2, mm2, s2) = self.split_time_str( end_time )
#------------------------------
# Convert to datetime objects
#------------------------------
start_obj = datetime.datetime(y1, m1, d1, h1, mm1, s1)
end_obj = datetime.datetime(y2, m2, d2, h2, mm2, s2)
#---------------------------------------------
# Comput time duration between start and end
#---------------------------------------------
duration_obj = (end_obj - start_obj)
duration_secs = duration_obj.total_seconds()
#-----------------------------------------
# Convert duration to dur_units provided
#-----------------------------------------
if (dur_units == 'seconds'):
duration = duration_secs
elif (dur_units == 'minutes'):
duration = (duration_secs / 60.0)
elif (dur_units == 'hours'):
duration = (duration_secs / 3600.0)
elif (dur_units == 'days'):
duration = (duration_secs / 86400.0)
elif (dur_units == 'years'):
duration = (duration_secs / 31536000.0)
else:
print('Unknown duration units = ' + dur_units + '.')
print('Returning duration in hours.')
duration = (duration_secs / 3600.0)
if (REPORT):
print( 'duration =', duration, '[' + dur_units + ']' )
return duration
#-----------------------------------------
# Alternate approach, where dur_units is
# determined and then returned
#-----------------------------------------
# if (duration_secs < 60):
# duration = duration_secs
# dur_units = 'seconds'
# elif (duration_secs < 3600):
# duration = divmod( duration_secs, 60 )[0]
# dur_units = 'minutes'
# elif (duration_secs < 86400):
# duration = divmod( duration_secs, 3600 )[0]
# dur_units = 'hours'
# elif (duration_secs < 31536000):
# duration = divmod( duration_secs, 86400 )[0]
# dur_units = 'days'
# else:
# duration = divmod( duration_secs, 86400 )[0]
# dur_units = 'days'
#
# return (duration, dur_units)
# get_duration()
#--------------------------------------------------------------------
def get_download_format(self):
return self.download_format.value
# get_download_format()
#--------------------------------------------------------------------
def clear_download_log(self):
self.download_log.value = ''
# clear_download_log()
#--------------------------------------------------------------------
def append_download_log(self, msg):
## type_str = str( type(msg) )
## if (type_str == "<class 'list'>"):
if (isinstance( msg, list)):
for string in msg:
self.download_log.value += (string + '\n')
else:
self.download_log.value += (msg + '\n')
# append_download_log()
#--------------------------------------------------------------------
def print_user_choices(self):
if not(hasattr(self, 'dataset')):
msg = 'ERROR: No dataset has been selected.'
self.append_download_log( msg )
return ############
start_datetime_obj = self.get_start_datetime_obj()
if (start_datetime_obj is not None):
start_date = str( start_datetime_obj.date() )
start_time = str( start_datetime_obj.time() )
else:
start_date = 'unknown'
start_time = 'unknown'
end_datetime_obj = self.get_end_datetime_obj()
if (end_datetime_obj is not None):
end_date = str( end_datetime_obj.date() )
end_time = str( end_datetime_obj.time() )
else:
end_date = 'unknown'
end_time = 'unknown'
#------------------------------------------
# Show message in downloads panel log box
#------------------------------------------
msg1 = 'var short name = ' + self.get_var_shortname()
msg2 = 'download format = ' + self.get_download_format()
msg3 = 'map bounds = ' + str(self.get_map_bounds( FROM_MAP=False ))
msg4 = 'start date and time = ' + start_date + ' ' + start_time
msg5 = 'end date and time = ' + end_date + ' ' + end_time
## msg6 = 'opendap package = ' + self.get_opendap_package()
msgs = [msg1, msg2, msg3, msg4, msg5]
self.append_download_log( msgs )
# print_user_choices()
#--------------------------------------------------------------------
def download_data(self, caller_obj=None):
#-------------------------------------------------
# Note: After a reset, self still has a dataset,
# but short_name was reset to ''.
#-------------------------------------------------
short_name = self.get_var_shortname()
if (short_name == ''):
msg = 'Sorry, no variable has been selected.'
self.download_log.value = msg
return
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Go" button beside the Dropdown of filenames.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
## status = self.download_status
self.print_user_choices()
#--------------------------------------------------
# print_user_choices() already displayed error msg
#--------------------------------------------------
if not(hasattr(self, 'dataset')):
return
#----------------------------------------
# Get names of the variables dimensions
#----------------------------------------
dim_list = self.dataset[ short_name ].dimensions
#--------------------------------------
# Uncomment to test other time_deltas
#------------------------------------------
# If test time_delta is too small, we'll
# get a start_index that is out of range.
# Next 3 worked in some SST tests.
#------------------------------------------
# self.time_delta = '0000-02-00 00:00:00'
# self.time_delta = '0000-00-30 12:00:00'
# self.time_delta = '0001-00-00 00:00:00'
#----------------------------------------------
# Is there a time variable ? If so, use time
# range selected in GUI to clip the data.
#----------------------------------------------
(t_i1, t_i2) = self.get_new_time_index_range( REPORT=True)
#--------------------------------------------
# Is there a lat variable ? If so, use lat
# range selected in GUI | |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
from .. import try_manual
# EXAMPLE: /Workspaces/put/Create or update a workspace
@try_manual
def step_workspace_create(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13, rg_14,
rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse workspace create '
'--resource-group "{rg_2}" '
'--type "SystemAssigned" '
'--location "East US" '
'--default-data-lake-storage account-url="https://accountname.dfs.core.windows.net" filesystem="default" '
'--managed-resource-group-name "workspaceManagedResourceGroupUnique" '
'--managed-virtual-network "default" '
'--sql-administrator-login "login" '
'--sql-administrator-login-password "password" '
'--tags key="value" '
'--name "{myWorkspace2}"',
checks=[])
test.cmd('az synapse workspace wait --created '
'--resource-group "{rg_2}" '
'--name "{myWorkspace2}"',
checks=checks)
# EXAMPLE: /Workspaces/get/Get a workspace
@try_manual
def step_workspace_show(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13, rg_14,
rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse workspace show '
'--resource-group "{rg_2}" '
'--name "{myWorkspace2}"',
checks=checks)
# EXAMPLE: /Workspaces/get/List workspaces in resource group
@try_manual
def step_workspace_list(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13, rg_14,
rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse workspace list '
'--resource-group "{rg_2}"',
checks=checks)
# EXAMPLE: /Workspaces/get/List workspaces in subscription
@try_manual
def step_workspace_list2(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13, rg_14,
rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse workspace list '
'-g ""',
checks=checks)
# EXAMPLE: /Workspaces/patch/Update a workspace
@try_manual
def step_workspace_update(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13, rg_14,
rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse workspace update '
'--resource-group "{rg_2}" '
'--name "{myWorkspace2}" '
'--type "SystemAssigned" '
'--sql-administrator-login-password "password" '
'--tags key="value"',
checks=checks)
# EXAMPLE: /BigDataPools/put/Create or update a Big Data pool
@try_manual
def step_big_data_pool_create(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13,
rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse big-data-pool create '
'--location "West US 2" '
'--auto-pause delay-in-minutes=15 enabled=true '
'--auto-scale enabled=true max-node-count=50 min-node-count=3 '
'--default-spark-log-folder "/logs" '
'--library-requirements content="" filename="requirements.txt" '
'--node-count 4 '
'--node-size "Medium" '
'--node-size-family "MemoryOptimized" '
'--spark-events-folder "/events" '
'--spark-version "2.4" '
'--tags key="value" '
'--name "{myBigDataPool}" '
'--resource-group "{rg}" '
'--workspace-name "{myWorkspace}"',
checks=[])
test.cmd('az synapse big-data-pool wait --created '
'--name "{myBigDataPool}" '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /BigDataPools/get/Get a Big Data pool
@try_manual
def step_big_data_pool_show(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13,
rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse big-data-pool show '
'--name "{myBigDataPool}" '
'--resource-group "{rg}" '
'--workspace-name "{myWorkspace}"',
checks=checks)
# EXAMPLE: /BigDataPools/get/List Big Data pools in a workspace
@try_manual
def step_big_data_pool_list(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13,
rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse big-data-pool list '
'--resource-group "{rg}" '
'--workspace-name "{myWorkspace}"',
checks=checks)
# EXAMPLE: /BigDataPools/patch/Update a Big Data pool
@try_manual
def step_big_data_pool_update(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13,
rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse big-data-pool update '
'--name "{myBigDataPool}" '
'--tags key="value" '
'--resource-group "{rg}" '
'--workspace-name "{myWorkspace}"',
checks=checks)
# EXAMPLE: /BigDataPools/delete/Delete a Big Data pool
@try_manual
def step_big_data_pool_delete(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13,
rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse big-data-pool delete -y '
'--name "{myBigDataPool}" '
'--resource-group "{rg}" '
'--workspace-name "{myWorkspace}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimes/put/Create integration runtime
@try_manual
def step_integration_runtime_create(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12,
rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime create '
'--properties "{{\\"type\\":\\"SelfHosted\\",\\"description\\":\\"A selfhosted integration runtime\\"}}" '
'--name "{myIntegrationRuntime}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimes/get/Get integration runtime
@try_manual
def step_integration_runtime_show(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13,
rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime show '
'--name "{myIntegrationRuntime}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimes/get/List integration runtimes
@try_manual
def step_integration_runtime_list(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13,
rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime list '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimes/patch/Update integration runtime
@try_manual
def step_integration_runtime_update(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12,
rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime update '
'--name "{myIntegrationRuntime}" '
'--resource-group "{rg_17}" '
'--auto-update "Off" '
'--update-delay-offset "\\"PT3H\\"" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimes/post/Start integration runtime
@try_manual
def step_integration_runtime_start(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12,
rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime start '
'--name "{myIntegrationRuntime2}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimes/post/Stop integration runtime
@try_manual
def step_integration_runtime_stop(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12, rg_13,
rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime stop '
'--name "{myIntegrationRuntime2}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimes/post/Upgrade integration runtime
@try_manual
def step_integration_runtime_upgrade(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12,
rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime upgrade '
'--name "{myIntegrationRuntime}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeAuthKeys/post/List auth keys
@try_manual
def step_integration_runtime_auth_key_list(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11,
rg_12, rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime-auth-key list '
'--integration-runtime-name "{myIntegrationRuntime}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeAuthKeys/post/Regenerate auth key
@try_manual
def step_integration_runtime_auth_key_regenerate(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10,
rg_11, rg_12, rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime-auth-key regenerate '
'--integration-runtime-name "{myIntegrationRuntime}" '
'--key-name "authKey2" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeConnectionInfos/post/Get connection info
@try_manual
def step_integration_runtime_connection_info_get(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10,
rg_11, rg_12, rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime-connection-info get '
'--integration-runtime-name "{myIntegrationRuntime}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeCredentials/post/Sync credentials
@try_manual
def step_integration_runtime_credentials_sync(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11,
rg_12, rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime-credentials sync '
'--integration-runtime-name "{myIntegrationRuntime}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeMonitoringData/post/Get monitoring data
@try_manual
def step_integration_runtime_monitoring_data_get(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10,
rg_11, rg_12, rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime-monitoring-data get '
'--integration-runtime-name "{myIntegrationRuntime}" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeNodeIpAddress/post/Get integration runtime node IP address
@try_manual
def step_integration_runtime_node_ip_address_get(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10,
rg_11, rg_12, rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime-node-ip-address get '
'--integration-runtime-name "{myIntegrationRuntime}" '
'--node-name "Node_1" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeNodes/get/Get integration runtime node
@try_manual
def step_integration_runtime_node_show(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12,
rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime-node show '
'--integration-runtime-name "{myIntegrationRuntime}" '
'--node-name "Node_1" '
'--resource-group "{rg_17}" '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeNodes/patch/Update integration runtime node
@try_manual
def step_integration_runtime_node_update(test, rg_5, rg, rg_2, rg_3, rg_4, rg_6, rg_7, rg_8, rg_9, rg_10, rg_11, rg_12,
rg_13, rg_14, rg_15, rg_16, rg_17, checks=None):
if checks is None:
checks = []
test.cmd('az synapse integration-runtime-node update '
'--integration-runtime-name "{myIntegrationRuntime}" '
'--node-name "Node_1" '
'--resource-group "{rg_17}" '
'--concurrent-jobs-limit 2 '
'--workspace-name "{myWorkspace20}"',
checks=checks)
# EXAMPLE: /IntegrationRuntimeNodes/delete/Delete integration runtime | |
<filename>Packs/AzureFirewall/Integrations/AzureFirewall/AzureFirewall_test.py
import copy
import pytest
from unittest.mock import Mock
from CommonServerPython import *
SUBSCRIPTION_ID = "sub_id"
RESOURCE_GROUP_NAME = "group_name"
BASE_URL = f'https://management.azure.com/subscriptions/{SUBSCRIPTION_ID}' \
f'/resourceGroups/{RESOURCE_GROUP_NAME}/providers/Microsoft.Network'
CLIENT_ID = "XXXX"
ScheduledCommand.raise_error_if_not_supported = Mock()
def load_mock_response(file_path: str) -> str:
"""
Load mock file that simulates an API response.
Args:
file_path (str): Path of the mock response JSON file to return.
Returns:
str: Mock file content.
"""
with open(file_path, mode='r', encoding='utf-8') as mock_file:
return mock_file.read()
def get_azure_access_token_mock() -> dict:
"""
Mock Azure access token object.
Returns:
dict: Azure access token mock.
"""
return {
'access_token': '<PASSWORD>',
'expires_in': 3595,
'refresh_token': '<PASSWORD>',
}
def get_client_mock():
"""
Get API Client mock.
Returns:
AzureFirewallClient: API Client
"""
from AzureFirewall import AzureFirewallClient
return AzureFirewallClient(
subscription_id=SUBSCRIPTION_ID,
resource_group=RESOURCE_GROUP_NAME,
client_id=CLIENT_ID,
api_version='2021-03-01',
verify=False,
proxy=False)
def authorization_mock(requests_mock):
"""
Azure authorization API request mock.
"""
authorization_url = 'https://login.microsoftonline.com/organizations/oauth2/v2.0/token'
requests_mock.post(authorization_url, json=get_azure_access_token_mock())
def test_azure_firewall_list_command(requests_mock):
"""
Scenario: List azure firewalls in resource group or subscription.
Given:
- User has provided valid credentials.
When:
- azure-firewall-list called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
-Ensure the firewall name expected is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_list_command
authorization_mock(requests_mock)
client = get_client_mock()
url = f'{BASE_URL}/azureFirewalls'
mock_response = json.loads(load_mock_response('test_data/firewall/firewall_list.json'))
requests_mock.get(url, json=mock_response)
result = azure_firewall_list_command(client, {'resource': 'resource_group'})
assert len(result.outputs) == 1
assert result.outputs_prefix == 'AzureFirewall.Firewall'
assert result.outputs[0].get('name') == 'xsoar-firewall'
def test_azure_firewall_get_command(requests_mock):
"""
Scenario: Retrieve azure firewall information.
Given:
- User has provided valid credentials.
When:
- azure-firewall-get called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the firewall name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_get_command
authorization_mock(requests_mock)
client = get_client_mock()
firewall_name = 'xsoar-firewall'
url = f'{BASE_URL}/azureFirewalls/{firewall_name}'
mock_response = json.loads(load_mock_response('test_data/firewall/firewall_get.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(firewall_names=firewall_name)
result = azure_firewall_get_command(client, command_arguments)
assert len(result[0].outputs) == 1
assert result[0].outputs_prefix == 'AzureFirewall.Firewall'
assert result[0].outputs[0].get('name') == firewall_name
def test_azure_firewall_rules_collection_list_command_for_firewall(requests_mock):
"""
Scenario: List collection rules in firewall.
Given:
- User has provided valid credentials.
When:
- azure-firewall-rule-collection-list called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the firewall name updated is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_rules_collection_list_command
authorization_mock(requests_mock)
client = get_client_mock()
firewall_name = 'xsoar-firewall'
url = f'{BASE_URL}/azureFirewalls/{firewall_name}'
mock_response = json.loads(load_mock_response('test_data/firewall/firewall_get.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(firewall_name=firewall_name, rule_type="application_rule")
result = azure_firewall_rules_collection_list_command(client, command_arguments)
assert len(result.outputs) == 1
assert result.outputs_key_field == 'id'
assert result.outputs_prefix == 'AzureFirewall.RuleCollection'
assert result.outputs[0].get('name') == "my-app-collection"
assert dict_safe_get(result.outputs[0], ["properties", "rules"])[0].get("name") == "my-app-rule-1"
def test_azure_firewall_rules_collection_list_command_for_policy(requests_mock):
"""
Scenario: List collection rules in policy.
Given:
- User has provided valid credentials.
When:
- azure-firewall-rule-collection-list called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the rule collection name searched is the same as in the context returned.
- Ensure the rule collection key (type) searched is the same as in the context returned.
- Ensure the rule type (type) searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_rules_collection_list_command, get_policy_rule_collection_name, \
get_policy_rule_name
authorization_mock(requests_mock)
client = get_client_mock()
policy_name = 'xsoar-firewall'
url = f'{BASE_URL}/firewallPolicies/{policy_name}/ruleCollectionGroups'
mock_response = json.loads(load_mock_response('test_data/policy/policy_rule_collection_list.json'))
requests_mock.get(url, json=mock_response)
rule_type = "application_rule"
command_arguments = dict(policy=policy_name, rule_type=rule_type)
result = azure_firewall_rules_collection_list_command(client, command_arguments)
collection_key = get_policy_rule_collection_name(rule_type=rule_type)
rule_key = get_policy_rule_name(rule_type=rule_type)
assert len(result.outputs) == 1
assert result.outputs_key_field == 'id'
assert result.outputs_prefix == 'AzureFirewall.RuleCollection'
assert result.outputs[0].get('name') == "DefaultApplicationRuleCollectionGroup"
assert dict_safe_get(result.outputs[0], ["properties", "ruleCollections"])[0].get("rules")[0].get(
'ruleType') == rule_key
assert dict_safe_get(result.outputs[0], ["properties", "ruleCollections"])[0].get(
"ruleCollectionType") == collection_key
def test_azure_firewall_rules_list_command_for_policy(requests_mock):
"""
Scenario: List rules in policy.
Given:
- User has provided valid credentials.
When:
- azure-firewall-rule-list called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the rule name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_rules_list_command
authorization_mock(requests_mock)
client = get_client_mock()
policy_name = 'xsoar-firewall'
collection_name = "DefaultApplicationRuleCollectionGroup"
url = f'{BASE_URL}/firewallPolicies/{policy_name}/ruleCollectionGroups/{collection_name}'
mock_response = json.loads(load_mock_response('test_data/policy/policy_rule_list.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(policy=policy_name, collection_name=collection_name)
result = azure_firewall_rules_list_command(client, command_arguments)
assert len(result.outputs) == 1
assert result.outputs_key_field == 'name'
assert result.outputs_prefix == 'AzureFirewall.Rule'
assert result.outputs[0].get('name') == "my-app-rule-1"
def test_azure_firewall_rules_list_command_for_firewall(requests_mock):
"""
Scenario: List rules in firewall.
Given:
- User has provided valid credentials.
When:
- azure-firewall-rule-list called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the rule name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_rules_list_command
authorization_mock(requests_mock)
client = get_client_mock()
firewall_name = 'xsoar-firewall'
collection_name = "my-app-collection"
url = f'{BASE_URL}/azureFirewalls/{firewall_name}'
mock_response = json.loads(load_mock_response('test_data/firewall/firewall_get.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(firewall_name=firewall_name, collection_name=collection_name, rule_type="application_rule")
result = azure_firewall_rules_list_command(client, command_arguments)
assert len(result.outputs) == 1
assert result.outputs_key_field == 'name'
assert result.outputs_prefix == 'AzureFirewall.Rule'
assert result.outputs[0].get('name') == "my-app-rule-1"
def test_azure_firewall_rules_get_command_for_firewall(requests_mock):
"""
Scenario: Retrieve rule information in firewall.
Given:
- User has provided valid credentials.
When:
- azure-firewall-rule-get called.
Then:
- Ensure outputs prefix is correct.
- Ensure the rule name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_rule_get_command
authorization_mock(requests_mock)
client = get_client_mock()
firewall_name = 'xsoar-firewall'
collection_name = "my-app-collection"
rule_name = "my-app-rule-1"
url = f'{BASE_URL}/azureFirewalls/{firewall_name}'
mock_response = json.loads(load_mock_response('test_data/firewall/firewall_get.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(firewall_name=firewall_name, collection_name=collection_name, rule_type="application_rule",
rule_name=rule_name)
result = azure_firewall_rule_get_command(client, command_arguments)
assert result.outputs_key_field == 'name'
assert result.outputs_prefix == 'AzureFirewall.Rule'
assert result.outputs.get('name') == rule_name
def test_azure_firewall_rule_get_command_for_policy(requests_mock):
"""
Scenario: Retrieve rule information in policy.
Given:
- User has provided valid credentials.
When:
- azure-firewall-rule-get called.
Then:
- Ensure outputs prefix is correct.
- Ensure the rule name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_rule_get_command
authorization_mock(requests_mock)
client = get_client_mock()
policy_name = 'xsoar-firewall'
collection_name = "DefaultApplicationRuleCollectionGroup"
rule_name = "my-app-rule-1"
url = f'{BASE_URL}/firewallPolicies/{policy_name}/ruleCollectionGroups/{collection_name}'
mock_response = json.loads(load_mock_response('test_data/policy/policy_rule_list.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(policy=policy_name, collection_name=collection_name, rule_name=rule_name)
result = azure_firewall_rule_get_command(client, command_arguments)
assert result.outputs_key_field == 'name'
assert result.outputs_prefix == 'AzureFirewall.Rule'
assert result.outputs.get('name') == "my-app-rule-1"
def test_azure_firewall_policy_create_command(requests_mock):
"""
Scenario: Create firewall policy.
Given:
- User has provided valid credentials.
When:
- azure-firewall-policy-create called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the policy name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_policy_create_command
authorization_mock(requests_mock)
client = get_client_mock()
policy_name = 'xsoar-policy'
url = f'{BASE_URL}/firewallPolicies/{policy_name}'
mock_response = json.loads(load_mock_response('test_data/policy/policy_create.json'))
requests_mock.put(url, json=mock_response)
command_arguments = dict(policy_name=policy_name, threat_intelligence_mode="Turned-off", location="eastus",
tier="Standard", enable_proxy="False")
result = azure_firewall_policy_create_command(client, command_arguments)
assert len(result.outputs) == 1
assert result.outputs_prefix == 'AzureFirewall.Policy'
assert result.outputs[0].get('name') == policy_name
def test_azure_firewall_policy_update_command(requests_mock):
"""
Scenario: Update firewall policy.
Given:
- User has provided valid credentials.
When:
- azure-firewall-policy-update called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the policy name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_policy_update_command
authorization_mock(requests_mock)
client = get_client_mock()
policy_name = 'xsoar-policy'
url = f'{BASE_URL}/firewallPolicies/{policy_name}'
mock_response = json.loads(load_mock_response('test_data/policy/policy_get.json'))
requests_mock.get(url, json=mock_response)
mock_response = json.loads(load_mock_response('test_data/policy/policy_update.json'))
requests_mock.put(url, json=mock_response)
command_arguments = {
'base_policy_id': '/firewallPolicies/my-policy',
'domains': 'microsoft.com', 'enable_proxy': 'True',
'ips': '192.168.127.12', 'policy_name': policy_name, 'threat_intelligence_mode': 'Alert'}
result = azure_firewall_policy_update_command(client, command_arguments)
assert len(result.outputs) == 1
assert result.outputs_prefix == 'AzureFirewall.Policy'
assert result.outputs[0].get('name') == policy_name
def test_azure_firewall_policy_list_command(requests_mock):
"""
Scenario: List policy in resource group or subscription.
Given:
- User has provided valid credentials.
When:
- azure-firewall-policy-list called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the policy name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_policy_list_command
authorization_mock(requests_mock)
client = get_client_mock()
url = f'{BASE_URL}/firewallPolicies'
mock_response = json.loads(load_mock_response('test_data/policy/policy_list.json'))
requests_mock.get(url, json=mock_response)
result = azure_firewall_policy_list_command(client, {})
assert len(result.outputs) == 1
assert result.outputs_prefix == 'AzureFirewall.Policy'
assert result.outputs[0].get('name') == "xsoar-policy"
def test_azure_firewall_policy_get_command(requests_mock):
"""
Scenario: Retrieve policy information.
Given:
- User has provided valid credentials.
When:
- azure-firewall-policy-get called.
Then:
- Ensure 1 result is returned.
- Ensure outputs prefix is correct.
- Ensure the policy name searched is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_policy_get_command
authorization_mock(requests_mock)
client = get_client_mock()
policy_name = 'xsoar-policy'
url = f'{BASE_URL}/firewallPolicies/{policy_name}'
mock_response = json.loads(load_mock_response('test_data/policy/policy_get.json'))
requests_mock.get(url, json=mock_response)
command_arguments = dict(policy_names=policy_name)
result = azure_firewall_policy_get_command(client, command_arguments)
assert len(result) == 1
assert len(result[0].outputs) == 1
assert result[0].outputs_prefix == 'AzureFirewall.Policy'
assert result[0].outputs[0].get('name') == policy_name
def test_azure_firewall_policy_delete_command(requests_mock):
"""
Scenario: Delete policy resource.
Given:
- User has provided valid credentials.
When:
- azure-firewall-policy-delete called.
Then:
- Ensure that the output is empty (None).
- Ensure readable output message content.
"""
from | |
<gh_stars>1-10
# ----------------------------------------------------------------------------
# Gimel Studio Copyright 2019-2021 by <NAME> and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file includes code from wx.lib.agw.flatmenu and the wxPython demo
# ----------------------------------------------------------------------------
import wx
import wx.lib.agw.flatmenu as flatmenu
from wx.lib.agw.artmanager import ArtManager, RendererBase, DCSaver
from wx.lib.agw.fmresources import ControlFocus, ControlPressed
def switchRGBtoBGR(colour):
return wx.Colour(colour.Blue(), colour.Green(), colour.Red())
class UIMenuBarRenderer(flatmenu.FMRenderer):
def __init__(self):
flatmenu.FMRenderer.__init__(self)
self.highlightCheckAndRadio = True
self.menuFaceColour = wx.Colour("#252525")
self.menuBarFaceColour = wx.Colour("#252525")
self.menuBarFocusFaceColour = wx.Colour("#5874C5")
self.menuBarFocusBorderColour = wx.Colour("#5874C5")
self.menuBarPressedFaceColour = wx.Colour("#5874C5")
self.menuBarPressedBorderColour = wx.Colour("#5874C5")
self.menuFocusFaceColour = wx.Colour("#5874C5")
self.menuFocusBorderColour = wx.Colour("#5874C5")
self.menuPressedFaceColour = wx.Colour("#5874C5")
self.menuPressedBorderColour = wx.Colour("#5874C5")
self.buttonFaceColour = wx.Colour("#5874C5")
self.buttonBorderColour = wx.Colour("#5874C5")
self.buttonFocusFaceColour = wx.Colour("#5874C5")
self.buttonFocusBorderColour = wx.Colour("#5874C5")
self.buttonPressedFaceColour = wx.Colour("#5874C5")
self.buttonPressedBorderColour = wx.Colour("#5874C5")
def DrawMenuItem(self, item, dc, xCoord, yCoord, imageMarginX, markerMarginX, textX, rightMarginX, selected=False, backgroundImage=None):
"""
Draws the menu item.
:param `item`: a :class:`FlatMenuItem` instance;
:param `dc`: an instance of :class:`wx.DC`;
:param integer `xCoord`: the current x position where to draw the menu;
:param integer `yCoord`: the current y position where to draw the menu;
:param integer `imageMarginX`: the spacing between the image and the menu border;
:param integer `markerMarginX`: the spacing between the checkbox/radio marker and
the menu border;
:param integer `textX`: the menu item label x position;
:param integer `rightMarginX`: the right margin between the text and the menu border;
:param bool `selected`: ``True`` if this menu item is currentl hovered by the mouse,
``False`` otherwise.
:param `backgroundImage`: if not ``None``, an instance of :class:`wx.Bitmap` which will
become the background image for this :class:`FlatMenu`.
"""
borderXSize = item._parentMenu.GetBorderXWidth()
itemHeight = item._parentMenu.GetItemHeight()
menuWidth = item._parentMenu.GetMenuWidth()
# Define the item actual rectangle area
itemRect = wx.Rect(xCoord, yCoord, menuWidth, itemHeight)
# Define the drawing area
rect = wx.Rect(xCoord + 2, yCoord, menuWidth - 4, itemHeight)
# Draw the background
backColour = self.menuFaceColour
penColour = backColour
backBrush = wx.Brush(backColour)
leftMarginWidth = item._parentMenu.GetLeftMarginWidth()
if backgroundImage is None:
pen = wx.Pen(penColour)
dc.SetPen(pen)
dc.SetBrush(backBrush)
dc.DrawRectangle(rect)
# Draw the left margin gradient
if self.drawLeftMargin:
self.DrawLeftMargin(item, dc, itemRect)
# check if separator
if item.IsSeparator():
# Separator is a small grey line separating between menu items.
sepWidth = xCoord + menuWidth - textX - 1
self.DrawSeparator(dc, xCoord, yCoord, textX, sepWidth)
return
# Keep the item rect
item._rect = itemRect
# Get the bitmap base on the item state (disabled, selected ..)
bmp = item.GetSuitableBitmap(selected)
# First we draw the selection rectangle
if selected:
self.DrawMenuButton(dc, rect.Deflate(1, 0), ControlFocus)
#copy.Inflate(0, menubar._spacer)
if bmp.IsOk():
# Calculate the postion to place the image
imgHeight = bmp.GetHeight()
imgWidth = bmp.GetWidth()
if imageMarginX == 0:
xx = rect.x + (leftMarginWidth - imgWidth) / 2
else:
xx = rect.x + ((leftMarginWidth - rect.height) - imgWidth) / 2 + rect.height
yy = rect.y + (rect.height - imgHeight) / 2
dc.DrawBitmap(bmp, xx, yy, True)
if item.GetKind() == wx.ITEM_CHECK:
# Checkable item
if item.IsChecked():
# Draw surrounding rectangle around the selection box
xx = rect.x + 1
yy = rect.y + 1
rr = wx.Rect(xx, yy, rect.height - 2, rect.height - 2)
if not selected and self.highlightCheckAndRadio:
self.DrawButton(dc, rr, ControlFocus)
dc.DrawBitmap(item._checkMarkBmp, rr.x + (rr.width - 16) / 2, rr.y + (rr.height - 16) / 2, True)
if item.GetKind() == wx.ITEM_RADIO:
# Checkable item
if item.IsChecked():
# Draw surrounding rectangle around the selection box
xx = rect.x + 1
yy = rect.y + 1
rr = wx.Rect(xx, yy, rect.height - 2, rect.height - 2)
if not selected and self.highlightCheckAndRadio:
self.DrawButton(dc, rr, ControlFocus)
dc.DrawBitmap(item._radioMarkBmp, rr.x + (rr.width - 16) / 2, rr.y + (rr.height - 16) / 2, True)
# Draw text - without accelerators
text = item.GetLabel()
if text:
font = item.GetFont()
if font is None:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
# EDITED - This is my edit to always have the font color white:
enabledTxtColour = wx.Colour("#fff")
disabledTxtColour = self.itemTextColourDisabled
textColour = (item.IsEnabled() and [enabledTxtColour] or [disabledTxtColour])[0]
if item.IsEnabled() and item.GetTextColour():
textColour = item.GetTextColour()
dc.SetFont(font)
w, h = dc.GetTextExtent(text)
dc.SetTextForeground(textColour)
if item._mnemonicIdx != wx.NOT_FOUND:
# We divide the drawing to 3 parts
text1 = text[0:item._mnemonicIdx]
text2 = text[item._mnemonicIdx]
text3 = text[item._mnemonicIdx + 1:]
w1, dummy = dc.GetTextExtent(text1)
w2, dummy = dc.GetTextExtent(text2)
w3, dummy = dc.GetTextExtent(text3)
posx = xCoord + textX + borderXSize
posy = (itemHeight - h) / 2 + yCoord
# Draw first part
dc.DrawText(text1, posx, posy)
# mnemonic
if "__WXGTK__" not in wx.Platform:
font.SetUnderlined(True)
dc.SetFont(font)
posx += w1
dc.DrawText(text2, posx, posy)
# last part
font.SetUnderlined(False)
dc.SetFont(font)
posx += w2
dc.DrawText(text3, posx, posy)
else:
w, h = dc.GetTextExtent(text)
dc.DrawText(text, xCoord + textX + borderXSize, (itemHeight - h) / 2 + yCoord)
# Now draw accelerator
# Accelerators are aligned to the right
if item.GetAccelString():
accelWidth, accelHeight = dc.GetTextExtent(item.GetAccelString())
dc.DrawText(item.GetAccelString(), xCoord + rightMarginX -
accelWidth, (itemHeight - accelHeight) / 2 + yCoord)
# Check if this item has sub-menu - if it does, draw
# right arrow on the right margin
if item.GetSubMenu():
# Draw arrow
rightArrowBmp = wx.Bitmap(menu_right_arrow_xpm)
rightArrowBmp.SetMask(wx.Mask(rightArrowBmp, wx.WHITE))
xx = xCoord + rightMarginX + borderXSize
rr = wx.Rect(xx, rect.y + 1, rect.height - 2, rect.height - 2)
dc.DrawBitmap(rightArrowBmp, rr.x + 4, rr.y + (rr.height - 16) / 2, True)
def DrawMenuBar(self, menubar, dc):
"""
Draws everything for :class:`FlatMenuBar`.
:param `menubar`: an instance of :class:`FlatMenuBar`.
:param `dc`: an instance of :class:`wx.DC`.
"""
#artMgr = ArtManager.Get()
fnt = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
# EDITED - This is my edit to always make the font color white
textColour = wx.Colour("#fff")
highlightTextColour = wx.Colour("#fff")
dc.SetFont(fnt)
dc.SetTextForeground(textColour)
clientRect = menubar.GetClientRect()
self.DrawMenuBarBackground(dc, clientRect)
padding, dummy = dc.GetTextExtent("W")
posx = 0
posy = menubar._margin
# Monkey-patch padding between menus
padding += 11
# ---------------------------------------------------------------------------
# Draw as much items as we can if the screen is not wide enough, add all
# missing items to a drop down menu
# ---------------------------------------------------------------------------
menuBarRect = menubar.GetClientRect()
# mark all items as non-visibles at first
for item in menubar._items:
item.SetRect(wx.Rect())
for item in menubar._items:
# Handle accelerator ('&')
title = item.GetTitle()
fixedText = title
location, labelOnly = flatmenu.GetAccelIndex(fixedText)
# Get the menu item rect
textWidth, textHeight = dc.GetTextExtent(fixedText)
#rect = wx.Rect(posx+menubar._spacer/2, posy, textWidth, textHeight)
rect = wx.Rect(posx + padding / 2, posy, textWidth, textHeight)
# Can we draw more??
# the +DROP_DOWN_ARROW_WIDTH is the width of the drop down arrow
if posx + rect.width + flatmenu.DROP_DOWN_ARROW_WIDTH >= menuBarRect.width:
break
# In this style the button highlight includes the menubar margin
button_rect = wx.Rect(*rect)
button_rect.height = menubar._menuBarHeight
#button_rect.width = rect.width + menubar._spacer
button_rect.width = rect.width + padding
button_rect.x = posx
button_rect.y = 0
# Keep the item rectangle, will be used later in functions such
# as 'OnLeftDown', 'OnMouseMove'
copy = wx.Rect(*button_rect)
#copy.Inflate(0, menubar._spacer)
item.SetRect(copy)
selected = False
if item.GetState() == ControlFocus:
self.DrawMenuBarButton(dc, button_rect, ControlFocus)
dc.SetTextForeground(highlightTextColour)
selected = True
else:
dc.SetTextForeground(textColour)
ww, hh = dc.GetTextExtent(labelOnly)
textOffset = (rect.width - ww) / 2
if not menubar._isLCD and item.GetTextBitmap().IsOk() and not selected:
dc.DrawBitmap(item.GetTextBitmap(), rect.x, rect.y, True)
elif not menubar._isLCD and item.GetSelectedTextBitmap().IsOk() and selected:
dc.DrawBitmap(item.GetSelectedTextBitmap(), rect.x, rect.y, True)
else:
if not menubar._isLCD:
# Draw the text on a bitmap using memory dc,
# so on following calls we will use this bitmap instead
# of calculating everything from scratch
bmp = wx.Bitmap(rect.width, rect.height)
memDc = wx.MemoryDC()
memDc.SelectObject(bmp)
if selected:
memDc.SetTextForeground(highlightTextColour)
else:
memDc.SetTextForeground(textColour)
# Fill the bitmap with the masking colour
memDc.SetPen(wx.Pen(wx.Colour(255, 0, 0)))
memDc.SetBrush(wx.Brush(wx.Colour(255, 0, 0)))
memDc.DrawRectangle(0, 0, rect.width, rect.height)
memDc.SetFont(fnt)
if location == wx.NOT_FOUND or location >= len(fixedText):
# draw the text
if not menubar._isLCD:
memDc.DrawText(title, textOffset, 0)
dc.DrawText(title, rect.x + textOffset, rect.y)
else:
# underline the first '&'
before = labelOnly[0:location]
underlineLetter = labelOnly[location]
after = labelOnly[location + 1:]
# before
if not | |
temptype.lower().startswith(x):
if is_lat: return -1
else: return -1
for x in ('infobox pittsburgh neighborhood', 'info/assentamento/madeira',
'info/localidade da madeira',
'info/assentamento/marrocos', 'info/localidade dos eua', 'info/pousadapc',
'info/antigas freguesias de portugal'):
if temptype.lower().startswith(x):
if is_lat: return 1
else: return -1
return 1
# Get an argument (ARGSEARCH) by name from a hash table (ARGS). Multiple
# synonymous names can be looked up by giving a list or tuple for ARGSEARCH.
# Other parameters control warning messages.
def getarg(argsearch, temptype, args, rawargs, warnifnot=True):
if isinstance(argsearch, tuple) or isinstance(argsearch, list):
for x in argsearch:
val = args.get(x, None)
if val is not None:
return val
if warnifnot or debug['some']:
wikiwarning("None of params %s seen in template {{%s|%s}}" % (
','.join(argsearch), temptype, bound_string_length('|'.join(rawargs))))
else:
val = args.get(argsearch, None)
if val is not None:
return val
if warnifnot or debug['some']:
wikiwarning("Param %s not seen in template {{%s|%s}}" % (
argsearch, temptype, bound_string_length('|'.join(rawargs))))
return None
# Utility function for get_latd_coord().
# Extract out either latitude or longitude from a template of type
# TEMPTYPE with arguments ARGS. LATD/LATM/LATS are lists or tuples of
# parameters to look up to retrieve the appropriate value. OFFPARAM is the
# list of possible parameters indicating the offset to the N, S, E or W.
# IS_LAT is True if a latitude is being extracted, False for longitude.
def get_lat_long_1(temptype, args, rawargs, latd, latm, lats, offparam, is_lat):
d = getarg(latd, temptype, args, rawargs)
m = getarg(latm, temptype, args, rawargs, warnifnot=False)
s = getarg(lats, temptype, args, rawargs, warnifnot=False)
hemis = getarg(offparam, temptype, args, rawargs)
if hemis is None:
hemismult = get_hemisphere(temptype, is_lat)
else:
if is_lat:
convert = convert_ns
elif Opts.lang == 'de':
convert = convert_ew_german
else:
convert = convert_ew
hemismult = convert.get(hemis, None)
if hemismult is None:
wikiwarning("%s for template type %s has bad value: [%s]" %
(offparam, temptype, hemis))
return None
return convert_dms(hemismult, d, m, s)
latd_arguments = ('latd', 'latg', 'latdeg', 'latdegrees', 'latitudedegrees',
'mouthlatd', # Mouth of rivers, used as coordinate
'latitudinegradi', 'latgradi', 'latitudined', 'latitudegraden',
'breitengrad', 'koordinatebreitengrad', 'breddegrad', 'breddegrad')
def get_latd_coord(temptype, args, rawargs):
'''Given a template of type TEMPTYPE with arguments ARGS (converted into
a hash table; also available in raw form as RAWARGS), assumed to have
a latitude/longitude specification in it using latd/lat_deg/etc. and
longd/lon_deg/etc., extract out and return a tuple of decimal
(latitude, longitude) values.'''
# Note that these already have spaces and underscores removed, so should
# not be present.
lat = get_lat_long_1(temptype, args, rawargs,
latd_arguments,
('latm', 'latmin', 'latminutes', 'latitudeminutes',
'mouthlatm',
'latitudineprimi', 'latprimi',
'latitudineminuti', 'latminuti', 'latitudinem', 'latitudeminuten',
'breitenminute', 'koordinatebreitenminute', 'breddemin'),
('lats', 'latsec', 'latseconds', 'latitudeseconds',
'latitudinesecondi', 'latsecondi', 'latitudines', 'latitudeseconden',
'breitensekunde', 'koordinatebreitensekunde'),
('latns', 'mouthlatns', 'latp', 'lap', 'latdir', 'latdirection', 'latitudinens',
'koordinatebreite'),
is_lat=True)
long = get_lat_long_1(temptype, args, rawargs,
# Typos like Longtitude do occur in the Spanish Wikipedia at least
('longd', 'lond', 'longg', 'long', 'longdeg', 'londeg',
'longdegrees', 'londegrees',
'mouthlats',
'longitudinegradi', 'longgradi', 'longitudined',
'longitudedegrees', 'longtitudedegrees',
'longitudegraden',
u'längengrad', u'koordinatelängengrad',
'laengengrad', 'lengdegrad', u'længdegrad'),
('longm', 'lonm', 'longmin', 'lonmin',
'longminutes', 'lonminutes',
'mouthlatm',
'longitudineprimi', 'longprimi',
'longitudineminuti', 'longminuti', 'longitudinem',
'longitudeminutes', 'longtitudeminutes',
'longitudeminuten',
u'längenminute', u'koordinatelängenminute', u'længdemin'),
('longs', 'lons', 'longsec', 'lonsec',
'longseconds', 'lonseconds',
'mouthlats',
'longitudinesecondi', 'longsecondi', 'longitudines',
'longitudeseconds', 'longtitudeseconds',
'longitudeseconden',
u'längensekunde', u'koordinatelängensekunde'),
('longew', 'mouthlongew', 'lonew', 'longp', 'lonp', 'longdir', 'londir',
'longdirection', 'londirection', 'longitudineew',
u'koordinatelänge'),
is_lat=False)
return (lat, long)
def get_built_in_lat_long_1(temptype, args, rawargs, latd, latm, lats, mult):
d = getarg(latd, temptype, args, rawargs)
m = getarg(latm, temptype, args, rawargs, warnifnot=False)
s = getarg(lats, temptype, args, rawargs, warnifnot=False)
return convert_dms(mult, d, m, s)
# stopniN, stopniS, stopniE, stopniW occur in the Polish Wikipedia
built_in_latd_north_arguments = ('stopnin', 'degn')
built_in_latd_south_arguments = ('stopnis', 'degs')
built_in_longd_north_arguments = ('stopnie', 'dege')
built_in_longd_south_arguments = ('stopniw', 'degw')
def get_built_in_lat_coord(temptype, args, rawargs):
'''Given a template of type TEMPTYPE with arguments ARGS (converted into
a hash table; also available in raw form as RAWARGS), assumed to have
a latitude/longitude specification in it using stopniN/etc. (where the
direction NSEW is built into the argument name), extract out and return a
tuple of decimal (latitude, longitude) values.'''
if getarg(built_in_latd_north_arguments, temptype, args, rawargs) is not None:
mult = 1
elif getarg(built_in_latd_south_arguments, temptype, args, rawargs) is not None:
mult = -1
else:
wikiwarning("Didn't see any appropriate stopniN/stopniS param")
mult = 1 # Arbitrarily set to N, probably accurate in Poland
lat = get_built_in_lat_long_1(temptype, args, rawargs,
('stopnin', 'stopnis', 'degn', 'degs'),
('minutn', 'minuts', 'minn', 'mins'),
('sekundn', 'sekunds', 'secn', 'secs'),
mult)
if getarg(built_in_longd_north_arguments, temptype, args, rawargs) is not None:
mult = 1
elif getarg(built_in_longd_south_arguments, temptype, args, rawargs) is not None:
mult = -1
else:
wikiwarning("Didn't see any appropriate stopniE/stopniW param")
mult = 1 # Arbitrarily set to E, probably accurate in Poland
long = get_built_in_lat_long_1(temptype, args, rawargs,
('stopnie', 'stopniw', 'dege', 'degw'),
('minute', 'minutw', 'mine', 'minw'),
('sekunde', 'sekundw', 'sece', 'secw'),
mult)
return (lat, long)
latitude_arguments = ('latitude', 'latitud', 'latitudine',
'breitengrad',
u'mündunglatgrad', # Mouth of rivers in German Wikipedia
# 'breite', Sometimes used for latitudes but also for other types of width
#'lat' # Appears in non-article coordinates
#'latdec' # Appears to be associated with non-Earth coordinates
)
longitude_arguments = ('longitude', 'longitud', 'longitudine',
u'längengrad', u'laengengrad',
u'mündunglonggrad', # Mouth of rivers in German Wikipedia
# u'länge', u'laenge', Sometimes used for longitudes but also for other lengths
#'long' # Appears in non-article coordinates
#'longdec' # Appears to be associated with non-Earth coordinates
)
def get_latitude_coord(temptype, args, rawargs):
'''Given a template of type TEMPTYPE with arguments ARGS, assumed to have
a latitude/longitude specification in it, extract out and return a tuple of
decimal (latitude, longitude) values.'''
# German-style (e.g. 72/53/15/E) also occurs with 'latitude' and such,
# so just check for it everywhere.
lat = get_german_style_coord(getarg(latitude_arguments,
temptype, args, rawargs))
long = get_german_style_coord(getarg(longitude_arguments,
temptype, args, rawargs))
return (lat, long)
def get_infobox_ort_coord(temptype, args, rawargs):
'''Given a template 'Infobox Ort' with arguments ARGS, assumed to have
a latitude/longitude specification in it, extract out and return a tuple of
decimal (latitude, longitude) values.'''
# We handle this template specially rather than just looking for the
# template's arguments anywhere because 'breite' ("width") and
# 'länge/laenge' ("length") occur in many contexts in many templates where
# they don't refer to latitude/longitude.
#
# Check for German-style (e.g. 72/53/15/E), as in get_latitude_coord().
lat = get_german_style_coord(getarg((u'breite',),
temptype, args, rawargs))
long = get_german_style_coord(getarg((u'länge', u'laenge'),
temptype, args, rawargs))
return (lat, long)
# Utility function for get_coord(). Extract out the latitude or longitude
# values out of a Coord structure. Return a tuple (OFFSET, VAL) for decimal
# latitude or longitude VAL and OFFSET indicating the offset of the next
# argument after the arguments used to produce the value.
def get_coord_1(args, convert_nsew):
if args[1] in convert_nsew:
d = args[0]; m = 0; s = 0; i = 1
elif args[2] in convert_nsew:
d = args[0]; m = args[1]; s = 0; i = 2
elif args[3] in convert_nsew:
d = args[0]; m = args[1]; s = args[2]; i = 3
else:
# Will happen e.g. in the style where only positive/negative are given
return (1, convert_dms(1, args[0], 0, 0))
return (i+1, convert_dms(convert_nsew[args[i]], d, m, s))
# FIXME! To be more accurate, we need to look at the template parameters,
# which, despite the claim below, ARE quite interesting. In fact, if the
# parameter 'display=title' is seen (or variant like 'display=inline,title'),
# then we have *THE* correct coordinate for the article. So we need to
# return this fact if known, as an additional argument. See comments
# below at extract_coordinates_from_article().
def get_coord(temptype, args):
'''Parse a Coord template and return a tuple (lat,long) for latitude and
longitude. TEMPTYPE is the template name. ARGS is the raw arguments for
the template. Coord templates are one of four types:
{{Coord|44.112|-87.913}}
{{Coord|44.112|N|87.913|W}}
{{Coord|44|6.72|N|87|54.78|W}}
{{Coord|44|6|43.2|N|87|54|46.8|W}}
Note that all four of the above are equivalent.
In addition, extra "template" or "coordinate" parameters can be given.
The template parameters mostly control display and are basically
uninteresting. (FIXME: Not true, see above.) However, the coordinate
parameters contain lots of potentially useful information that can be
used as features or whatever. See
http://en.wikipedia.org/wiki/Template:Coord for more information.
The types of coordinate parameters are:
type: country, city, city(###) where ### is the population, isle, river, etc.
Very useful feature; can also be used to filter uninteresting info as
some articles will have multiple coordinates in them.
scale: indicates the map scale (note that type: also specifies a default scale)
dim: diameter of viewing circle centered on coordinate (gives some sense | |
+ " :: " + \
mapped + ", " + sequence + ")"
def visit_SetComp(self, exp: ast.SetComp) -> str:
# FIXME maybe we can visit generators
# to merge common part in SetComp and ListComp
assert exp.generators
assert len(exp.generators) == 1 # TODO support multiple generators
for gen in exp.generators:
assert isinstance(gen.target, ast.Name) # TODO support multiple bounded variables
var = self.visit(gen.target)
sequence = self.visit(gen.iter)
if gen.ifs:
# Set namespace
bounded_vars = [var]
self._current_namespace.enter_quantification(bounded_vars)
condition = "&&".join(map(self.visit, gen.ifs))
# Reset namespace
self._current_namespace.exit()
# Set namespace
bounded_vars = [var]
self._current_namespace.enter_quantification(bounded_vars)
mapped = self.visit(exp.elt)
# Reset namespace
self._current_namespace.exit()
return "(set " + var + " | " + var + " in " + sequence + "&&" + condition + \
" :: " + mapped + ")"
raise NotImplementedError("Set comprehension expression is not supported yet")
def visit_DictComp(self, exp):
raise NotImplementedError("Dictionary comprehension expression is not supported yet")
def visit_GeneratorExp(self, exp):
raise RuntimeError("Generator expression will not be supported")
def visit_Await(self, exp):
raise RuntimeError("\"await\" expression will not be supported")
def visit_Yield(self, exp):
raise RuntimeError("\"yield\" expression will not be supported")
def visit_YieldFrom(self, exp):
raise RuntimeError("\"yield from\" expression will not be supported")
def visit_Compare(self, exp) -> str:
ret = "(" + self.visit(exp.left)
for o, e in zip(exp.ops, exp.comparators):
ret += self.visit(o) + self.visit(e)
ret += ")"
return ret
def visit_Call(self, exp) -> str:
return super().visit_Call(exp)
def visit_Num(self, exp) -> str:
return str(exp.n)
def visit_Str(self, exp) -> str:
if self._scope == IOA.IOA_SPEC:
return "/*" + exp.s + "*/"
raise NotImplementedError
def visit_FormattedValue(self, exp):
raise NotImplementedError
def visit_JoinedStr(self, exp):
raise NotImplementedError
def visit_Bytes(self, exp):
raise NotImplementedError
def visit_NameConstant(self, exp) -> str:
if exp.value is True:
return "true"
if exp.value is False:
return "false"
# else:
raise RuntimeError("Unsupported Python constant" + str(exp.value))
def visit_Ellipsis(self, exp):
raise NotImplementedError
def visit_Constant(self, exp):
raise NotImplementedError
def visit_Attribute(self, exp) -> str:
# Special case: printing 3.__abs__() is a syntax error,
# so if t.value is an integer literal then we need to parenthesize it
# to get (3).__abs__().
if isinstance(exp.value, ast.Num) and isinstance(exp.value.n, int):
ret = "(" + self.visit(exp.value) + ")"
else:
ret = self.visit(exp.value)
ret += "." + exp.attr
return ret
def visit_Subscript(self, exp) -> str:
return super().visit_Subscript(exp)
def visit_Select(self, exp):
if isinstance(exp.ctx, ast.Store) or \
isinstance(exp.ctx, ast.AugStore):
assert isinstance(exp.value, ast.Name)
raise RuntimeError(
"Subscript expression as L-value should've been handled by"
"visit_StmtAssign")
# else:
return self.visit(exp.value) + "[" + self.visit(exp.slice) + "]"
def visit_TypeHint(self, exp) -> str:
typ_cons = self.visit(exp.value)
assert typ_cons in ['seq', 'set', 'map', 'multiset']
# Angle brackets are used for generic type in Dafny
return typ_cons + "<" + self.visit(exp.slice) + ">"
def visit_Starred(self, exp):
raise NotImplementedError
def visit_Name(self, exp):
return super().visit_Name(exp)
def visit_List(self, exp) -> str:
return "[" + ", ".join(map(self.visit, exp.elts)) + "]"
def visit_Tuple(self, exp) -> str:
return "(" + ", ".join(map(self.visit, exp.elts)) + ")"
def visit_Slice(self, slc):
ret = ""
if slc.lower:
ret += self.visit(slc.lower)
ret += ".."
if slc.upper:
ret += self.visit(slc.lower)
if slc.step:
raise RuntimeError(
"Dafny does not support step size in taking sub-sequences")
return ret
def visit_ExtSlice(self, slc):
assert all(map(lambda d: d.lower and d.upper and not d.step, slc.dims))
return ', '.join([self.visit(d.lower) + ": " + self.visit(d.upper) for d in slc.dims])
def visit_Index(self, slc):
return self.visit(slc.value)
def visit_And(self, _) -> str:
return "&&"
def visit_Or(self, _) -> str:
return "||"
def visit_Add(self, _) -> str:
return "+"
def visit_Sub(self, _) -> str:
return "-"
def visit_Mult(self, _) -> str:
return "*"
def visit_MatMult(self, _):
raise RuntimeError("Matrix multiplication will not be supported")
def visit_Div(self, _) -> str:
return "/"
def visit_Mod(self, _) -> str:
return "%"
def visit_Pow(self, _):
raise RuntimeError("Exponentiation will not be supported")
def visit_LShift(self, _) -> str:
return "<<"
def visit_RShift(self, _) -> str:
return ">>"
def visit_BitOr(self, _) -> str:
return "|"
def visit_BitXor(self, _) -> str:
return "^"
def visit_BitAnd(self, _) -> str:
return "&"
def visit_FloorDiv(self, _):
raise RuntimeError("Floor division will not be supported")
def visit_Invert(self, _) -> str:
""" Bitwise invert"""
import warnings
warnings.warn("Bitwise invert is only applicable to bit-vectors in Dafny", RuntimeWarning)
return "!"
def visit_Not(self, _) -> str:
return "!"
def visit_UAdd(self, _) -> str:
import warnings
warnings.warn("Unary plus sign is ignored", RuntimeWarning)
return ""
def visit_USub(self, _) -> str:
return "-"
def visit_Eq(self, _) -> str:
return "=="
def visit_NotEq(self, _) -> str:
return "!="
def visit_Lt(self, _) -> str:
return "<"
def visit_LtE(self, _) -> str:
return "<="
def visit_Gt(self, _) -> str:
return ">"
def visit_GtE(self, _) -> str:
return ">="
def visit_Is(self, _) -> str:
raise RuntimeError("\"is\" operator will not be supported")
def visit_IsNot(self, _) -> str:
raise RuntimeError("\"is not\" operator will not be supported")
def visit_In(self, _) -> str:
return " in "
def visit_NotIn(self, _) -> str:
return " !in "
# endregion
# region IOA specific language constructs visitors
def visit_ioa_spec(self, spec: ast.Module) -> str:
stmt_list = list(map(self.visit, spec.body))
type_def_list, rem_list = [], []
for s in stmt_list:
# FIXME using prefix of returned string feels unsafe
if any(s.startswith(ty) for ty in ["type", "newtype", "datatype"]):
type_def_list.append(s)
else:
rem_list.append(s)
# TODO Group type definitions together and create a module for types
action_type = "datatype Action = " + \
" | ".join(self.__global_signature.values())
type_def_list += [
action_type,
"function max(a: nat, b: nat, c: nat): nat\n"
"{ var tmp := if a >= b then a else b; if tmp >= c then tmp else c }\n"
]
mod_types = "module Types" + \
self.__body_block("\n".join(type_def_list))
return mod_types + "\n".join(rem_list)
def visit_ioa_type_def(self, lhs: ast.expr, rhs: ast.expr) -> str:
assert isinstance(lhs, ast.Name)
typ_name = self.visit(lhs)
typ_rhs = self.visit(rhs)
# FIXME hacky way to rename type
if "shorthand'" in typ_rhs:
return typ_rhs.replace("shorthand'", typ_name)
return "type " + typ_name + " = " + typ_rhs
def visit_ioa_shorthand(self, typ: ast.Subscript) -> str:
assert isinstance(typ.value, ast.Name)
cons = self.visit(typ.value)
name = "shorthand'"
self.__tmp_id_count += 1
if cons == "Enum":
assert isinstance(typ.slice, ast.Index)
assert isinstance(typ.slice.value, ast.Tuple)
assert all(isinstance(e, ast.Name) for e in typ.slice.value.elts)
arg_iter = map(self.visit, typ.slice.value.elts)
shorthand = "datatype " + name + " = " + " | ".join(arg_iter)
elif cons == "IntEnum":
assert isinstance(typ.slice, ast.Index)
assert isinstance(typ.slice.value, ast.Tuple)
assert all(isinstance(e, ast.Num) for e in typ.slice.value.elts)
arg_iter = map(self.visit, typ.slice.value.elts)
shorthand = "newtype " + name + " = n: int | " + "||".join(map(lambda v: "n==" + v, arg_iter))
elif cons == "IntRange":
assert isinstance(typ.slice, ast.Slice)
assert typ.slice.upper and typ.slice.lower and not typ.slice.step
upper = self.visit(typ.slice.upper)
lower = self.visit(typ.slice.lower)
shorthand = "newtype " + name + " = n: int | " + lower + "<=n<" + upper + "\n"
# TODO move these functions to a prelude file
shorthand += \
"function incre(n: " + name + "): " + name + \
self.__body_block(
"if n==" + str(ast.literal_eval(upper)-1) +
" then " + lower + " else n+1",
one_line=True
)
shorthand += \
"function decre(n: " + name + "): " + name + \
self.__body_block(
"if n==" + lower +
" then " + str(ast.literal_eval(upper)-1) + " else n-1",
one_line=True
)
elif cons == 'NamedTuple':
arg_list = self.visit(typ.slice)
shorthand = "datatype " + name + " = " + name + "(" + arg_list + ")"
elif cons in ['seq', 'set', 'iset']:
para_typ = self.visit(typ.slice.value)
shorthand = "type " + name + " = " + cons + '<' + para_typ + '>'
else:
raise ValueError("Unexpected shorthand type constructor \"" + cons + "\"")
return shorthand + '\n'
def visit_ioa_composite_automaton(self, comp: ast.FunctionDef) -> str:
# Set namespace for the given automaton
self._current_namespace.enter_automaton(comp.name)
result = self.__automaton_module(comp)
# Reset namespace
self._current_namespace.exit()
return result
def visit_ioa_automaton_where(self, cond: ast.expr):
return self.__func_name_args(IOA.WHERE) + \
self.__body_block(self.visit(cond), True)
def visit_ioa_component_list(self, comps: ast.ClassDef) -> str:
# FIXME This assumes self.visit returns a different type than str
# This prevents, for example, a pass statement
comp_list = list(map(self.visit, comps.body))
import_comps = ""
for comp_tag, comp_def, comp_actual in comp_list:
import_comps += "import | |
with close to
symmetric on either side. If False, padding is done on the
right side. Default = True
Output:
window [Numpy array] window containing the required shape and padding
if pad_width > 0
-----------------------------------------------------------------------------
"""
try:
N_window
except NameError:
raise NameError('Window size undefined. Aborting windowing().')
if not isinstance(area_normalize, bool):
raise TypeError('area_normalize should be a boolean value. Aborting windowing().')
if not isinstance(power_normalize, bool):
raise TypeError('power_normalize should be a boolean value. Aborting windowing().')
if peak is not None:
if not isinstance(peak, (int, float)):
raise ValueError('Peak should be a scalar value. Aborting windowing().')
num_norms = area_normalize + power_normalize + (peak is not None)
if num_norms > 1:
raise ValueError('Only one of peak, area_normalize or power_normalize can be set at the same time in windowing().')
# if (area_normalize) and (peak is not None):
# raise ValueError('Both area_normalize and peak cannot be set at the same time in windowing().')
if not isinstance(N_window, (int, float)):
raise TypeError('N_window should be a positive integer. Aborting windowing().')
else:
N_window = int(N_window)
if N_window < 1:
raise ValueError('N_window should be a positive integer. Aborting windowing().')
if isinstance(pad_width, (int, float)):
if pad_width < 0.0:
raise ValueError('pad_width must be non-negative')
pad_width = int(pad_width)
else:
raise TypeError('pad_width must be an integer.')
if (shape == 'rect') or (shape == 'RECT'):
if not centering:
window = NP.pad(NP.ones(N_window), (0, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(NP.ones(N_window), (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
elif (shape == 'bnw') or (shape == 'BNW'):
a = [0.3635819, -0.4891775, 0.1365995, -0.0106411]
if (N_window % 2 == 1):
win = a[0]*NP.ones(N_window) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window)/(N_window-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window)/(N_window-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window)/(N_window-1))
if not centering:
if pad_width >= 1:
window = NP.pad(win, (1, pad_width-1), mode='constant', constant_values=(pad_value, pad_value))
else:
window = win
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
else:
win = a[0]*NP.ones(N_window-1) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window-1)/(N_window-2)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window-1)/(N_window-2)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window-1)/(N_window-2))
if not centering:
window = NP.pad(win, (1, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(win, (int(NP.ceil(0.5*(pad_width+1))), int(NP.floor(0.5*(pad_width+1)))), mode='constant', constant_values=(pad_value, pad_value))
elif (shape == 'bhw') or (shape == 'BHW'):
a = [0.35875, -0.48829, 0.14128, -0.01168]
if (N_window % 2 == 1):
win = a[0]*NP.ones(N_window) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window)/(N_window-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window)/(N_window-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window)/(N_window-1))
if not centering:
if pad_width >= 1:
window = NP.pad(win, (1, pad_width-1), mode='constant', constant_values=(pad_value, pad_value))
else:
window = win
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
else:
win = a[0]*NP.ones(N_window-1) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window-1)/(N_window-2)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window-1)/(N_window-2)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window-1)/(N_window-2))
if not centering:
window = NP.pad(win, (1, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(win, (int(NP.ceil(0.5*(pad_width+1))), int(NP.floor(0.5*(pad_width+1)))), mode='constant', constant_values=(pad_value, pad_value))
if peak is not None:
window *= peak/NP.amax(NP.abs(window))
if verbose:
print '\tRescaled the shaping window to peak value.'
elif area_normalize:
# area = NP.trapz(window) # Beware that NP.trapz could differ from NP.cumsum due to edge effects. Sufficient padding will make them converge
area = NP.sum(window) # Using sum is preferable to using trapz although less accurate especially when FFT is going to be involved later on.
window /= area
if verbose:
print '\tRenormalized the shaping window to unit area.'
elif power_normalize:
powr = NP.sum(NP.abs(window)**2)
window /= NP.sqrt(powr)
if verbose:
print '\tRenormalized the shaping window to unit power.'
return window
#################################################################################
def window_fftpow(N_window, shape='rect', pad_width=0, pad_value=0.0,
fftpow=1.0, area_normalize=False, peak=None,
power_normalize=False, verbose=True, centering=True):
"""
-----------------------------------------------------------------------------
Routine to produce window functions including ability to raise the FFT to a
given power
Inputs:
N_window [Integer] Number of samples in the actual window. Should be
positive
Keyword inputs:
shape [string] Shape type. Currently allowed values are 'rect',
'bnw' and 'bhw' for rectangular, Blackman-Nuttall and
Blackman-Harris windows respectively
pad_width [scalar integer] Number of padding samples. it has to be
non-negative. Padding values are provided in pad_values.
fftpow [scalar] The FFT of the window will be raised to this power.
Must be positive. Default = 1.0
area_normalize
[Boolean] True means re-normalize the window to have unit
area. False means no re-normalization is performed. Cannot be
set simulataneously if peak or power_normalize is set.
peak [Float] If set, rescale the window so the peak is set to the
specified value. Only one of peak, area_normalize or
power_normalize can be set
power_normalize
[Boolean] True means re-normalize the window to have unit
power. False means no re-normalization is performed. Cannot be
set simulataneously if peak or area_normalize is set.
verbose [Boolean] If set, print progress and/or diagnostic messages.
centering [Boolean] If set to True, centers the window with close to
symmetric on either side. If False, padding is done on the
right side. Default = True
Output:
window [Numpy array] window containing the required shape and padding
if pad_width > 0
-----------------------------------------------------------------------------
"""
try:
N_window
except NameError:
raise NameError('Window size undefined. Aborting windowing().')
if not isinstance(area_normalize, bool):
raise TypeError('area_normalize should be a boolean value. Aborting windowing().')
if not isinstance(power_normalize, bool):
raise TypeError('power_normalize should be a boolean value. Aborting windowing().')
if peak is not None:
if not isinstance(peak, (int, float)):
raise ValueError('Peak should be a scalar value. Aborting windowing().')
num_norms = area_normalize + power_normalize + (peak is not None)
if num_norms > 1:
raise ValueError('Only one of peak, area_normalize or power_normalize can be set at the same time in windowing().')
# if (area_normalize) and (peak is not None):
# raise ValueError('Both area_normalize and peak cannot be set at the same time in windowing().')
if not isinstance(N_window, (int, float)):
raise TypeError('N_window should be a positive integer. Aborting windowing().')
else:
N_window = int(N_window)
if N_window < 1:
raise ValueError('N_window should be a positive integer. Aborting windowing().')
if not isinstance(pad_width, (int, float)):
raise TypeError('pad_width must be an integer.')
else:
pad_width = int(pad_width)
if pad_width < 0:
raise ValueError('pad_width should be non-negative. Aborting windowing().')
if not isinstance(fftpow, (int,float)):
raise TypeError('Input fftpow must be a scalar')
else:
fftpow = float(fftpow)
if fftpow < 0.0:
raise ValueError('Input fftpow must be non-negative')
eps = 1e-10
if (shape == 'rect') or (shape == 'RECT'):
if fftpow != 1.0:
nwin = int(NP.ceil(N_window/NP.float(fftpow)))
else:
nwin = N_window
win = NP.zeros(N_window, dtype=NP.float_)
if fftpow != 1.0:
win[:nwin] = 1.0
fftwin = NP.fft.fft(win)
fftwin = fftwin ** fftpow
win = NP.fft.ifft(fftwin)
if NP.abs(win.imag).max()/NP.abs(win).max() >= eps:
raise ValueError('Significant imaginary component found in FFT-based window generation. Need to investigate. Aborting...')
else:
win = win.real
nshift = 0
if NP.abs(fftpow % 1.0) < 1e-6:
nzeros = max(N_window - (fftpow * nwin - (fftpow - 1)), 0)
if nzeros > 0:
win[-int(nzeros):] = 0.0
nshift = int(NP.ceil(0.5*nzeros))
win = NP.roll(win, nshift)
else:
win = NP.ones(N_window, dtype=NP.float_)
if not centering:
window = NP.pad(win, (0, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
elif (shape == 'bnw') or (shape == 'BNW'):
a = [0.3635819, -0.4891775, 0.1365995, -0.0106411]
if fftpow != 1.0:
nwin = int(NP.ceil(N_window/NP.float(fftpow)))
else:
nwin = N_window
win = NP.zeros(N_window, dtype=NP.float_)
if (nwin % 2 == 1):
win[:nwin] = a[0]*NP.ones(nwin) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin)/(nwin-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin)/(nwin-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin)/(nwin-1))
if fftpow != 1.0:
fftwin = NP.fft.fft(win)
fftwin = fftwin ** fftpow
win = NP.fft.ifft(fftwin)
if NP.abs(win.imag).max()/NP.abs(win).max() >= eps:
raise ValueError('Significant imaginary component found in FFT-based window generation. Need to investigate. Aborting...')
else:
win = win.real
nshift = 0
if NP.abs(fftpow % 1.0) < 1e-6:
nzeros = max(N_window - (fftpow * nwin - (fftpow - 1)), 0)
if nzeros > 0:
win[-int(nzeros):] = 0.0
nshift = int(NP.ceil(0.5*nzeros))
win = NP.roll(win, nshift)
if not centering:
if pad_width >= 1:
window = NP.pad(win, (1, pad_width-1), mode='constant', constant_values=(pad_value, pad_value))
else:
window = win
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
else:
if nwin == N_window:
win[1:] = a[0]*NP.ones(nwin-1) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin-1)/(nwin-2)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin-1)/(nwin-2)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin-1)/(nwin-2))
else:
win[:nwin] = a[0]*NP.ones(nwin) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin)/(nwin-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin)/(nwin-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin)/(nwin-1))
if fftpow != 1.0:
fftwin = NP.fft.fft(win)
fftwin = fftwin ** fftpow
win = NP.fft.ifft(fftwin)
if NP.abs(win.imag).max()/NP.abs(win).max() >= eps:
raise ValueError('Significant imaginary component found in FFT-based window generation. Need to investigate. Aborting...')
else:
win = win.real
nshift = 0
if NP.abs(fftpow % 1.0) < 1e-6:
nzeros = max(N_window - (fftpow * nwin | |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, cast
import numpy as np
import torch
from ax.exceptions.model import ModelError
from ax.models.model_utils import filter_constraints_and_fixed_features, get_observed
from ax.models.random.sobol import SobolGenerator
from ax.models.types import TConfig
from ax.utils.common.constants import Keys
from ax.utils.common.logger import get_logger
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import PosteriorMean
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.monte_carlo import qSimpleRegret
from botorch.acquisition.multi_objective.objective import WeightedMCMultiOutputObjective
from botorch.acquisition.objective import ConstrainedMCObjective, MCAcquisitionObjective
from botorch.acquisition.objective import (
PosteriorTransform,
ScalarizedPosteriorTransform,
)
from botorch.acquisition.utils import get_infeasible_cost
from botorch.exceptions.errors import UnsupportedError
from botorch.models import ModelListGP, SaasFullyBayesianSingleTaskGP, SingleTaskGP
from botorch.models.model import Model
from botorch.sampling.samplers import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.constraints import get_outcome_constraint_transforms
from botorch.utils.objective import get_objective_weights_transform
from botorch.utils.sampling import sample_hypersphere, sample_simplex
from torch import Tensor
logger = get_logger(__name__)
NOISELESS_MODELS = {SingleTaskGP}
# Distributions
SIMPLEX = "simplex"
HYPERSPHERE = "hypersphere"
@dataclass
class SubsetModelData:
model: Model
objective_weights: Tensor
outcome_constraints: Optional[Tuple[Tensor, Tensor]]
objective_thresholds: Optional[Tensor]
indices: Tensor
def is_noiseless(model: Model) -> bool:
"""Check if a given (single-task) botorch model is noiseless"""
if isinstance(model, ModelListGP):
raise ModelError(
"Checking for noisless models only applies to sub-models of ModelListGP"
)
return model.__class__ in NOISELESS_MODELS
def _filter_X_observed(
Xs: List[Tensor],
objective_weights: Tensor,
bounds: List[Tuple[float, float]],
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
) -> Optional[Tensor]:
r"""Filter input points to those appearing in objective or constraints.
Args:
Xs: The input tensors of a model.
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
bounds: A list of (lower, upper) tuples for each column of X.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b. (Not used by single task models)
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b. (Not used by single task models)
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value during generation.
Returns:
Tensor: All points that are feasible and appear in the objective or
the constraints. None if there are no such points.
"""
# Get points observed for all objective and constraint outcomes
X_obs = get_observed(
Xs=Xs,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
# Filter to those that satisfy constraints.
X_obs = filter_constraints_and_fixed_features(
X=X_obs,
bounds=bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
)
if len(X_obs) > 0:
return torch.as_tensor(X_obs) # please the linter
def _get_X_pending_and_observed(
Xs: List[Tensor],
objective_weights: Tensor,
bounds: List[Tuple[float, float]],
pending_observations: Optional[List[Tensor]] = None,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
) -> Tuple[Optional[Tensor], Optional[Tensor]]:
r"""Get pending and observed points.
If all points would otherwise be filtered, remove `linear_constraints`
and `fixed_features` from filter and retry.
Args:
Xs: The input tensors of a model.
pending_observations: A list of m (k_i x d) feature tensors X
for m outcomes and k_i pending observations for outcome i.
(Only used if n > 1).
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
bounds: A list of (lower, upper) tuples for each column of X.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b. (Not used by single task models)
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b. (Not used by single task models)
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value during generation.
Returns:
Tensor: Pending points that are feasible and appear in the objective or
the constraints. None if there are no such points.
Tensor: Observed points that are feasible and appear in the objective or
the constraints. None if there are no such points.
"""
if pending_observations is None:
X_pending = None
else:
X_pending = _filter_X_observed(
Xs=pending_observations,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
bounds=bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
)
filtered_X_observed = _filter_X_observed(
Xs=Xs,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
bounds=bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
)
if filtered_X_observed is not None and len(filtered_X_observed) > 0:
return X_pending, filtered_X_observed
else:
unfiltered_X_observed = _filter_X_observed(
Xs=Xs,
objective_weights=objective_weights,
bounds=bounds,
outcome_constraints=outcome_constraints,
)
return X_pending, unfiltered_X_observed
def _generate_sobol_points(
n_sobol: int,
bounds: List[Tuple[float, float]],
device: torch.device,
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
fixed_features: Optional[Dict[int, float]] = None,
rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
model_gen_options: Optional[TConfig] = None,
) -> Tensor:
linear_constraints_array = None
if linear_constraints is not None:
linear_constraints_array = (
linear_constraints[0].detach().numpy(),
linear_constraints[1].detach().numpy(),
)
array_rounding_func = None
if rounding_func is not None:
array_rounding_func = tensor_callable_to_array_callable(
tensor_func=rounding_func, device=device
)
sobol = SobolGenerator(deduplicate=False, seed=np.random.randint(10000))
array_X, _ = sobol.gen(
n=n_sobol,
bounds=bounds,
linear_constraints=linear_constraints_array,
fixed_features=fixed_features,
rounding_func=array_rounding_func,
model_gen_options=model_gen_options,
)
return torch.from_numpy(array_X).to(device)
def normalize_indices(indices: List[int], d: int) -> List[int]:
r"""Normalize a list of indices to ensure that they are positive.
Args:
indices: A list of indices (may contain negative indices for indexing
"from the back").
d: The dimension of the tensor to index.
Returns:
A normalized list of indices such that each index is between `0` and `d-1`.
"""
normalized_indices = []
for i in indices:
if i < 0:
i = i + d
if i < 0 or i > d - 1:
raise ValueError(f"Index {i} out of bounds for tensor or length {d}.")
normalized_indices.append(i)
return normalized_indices
def subset_model(
model: Model,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
objective_thresholds: Optional[Tensor] = None,
) -> SubsetModelData:
"""Subset a botorch model to the outputs used in the optimization.
Args:
model: A BoTorch Model. If the model does not implement the
`subset_outputs` method, this function is a null-op and returns the
input arguments.
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
objective_thresholds: The `m`-dim tensor of objective thresholds. There
is one for each modeled metric.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b. (Not used by single task models)
Returns:
A SubsetModelData dataclass containing the model, objective_weights,
outcome_constraints, objective thresholds, all subset to only those
outputs that appear in either the objective weights or the outcome
constraints, along with the indices of the outputs.
"""
nonzero = objective_weights != 0
if outcome_constraints is not None:
A, _ = outcome_constraints
nonzero = nonzero | torch.any(A != 0, dim=0)
idcs_t = torch.arange(nonzero.size(0), device=objective_weights.device)[nonzero]
idcs = idcs_t.tolist()
if len(idcs) == model.num_outputs:
# if we use all model outputs, just return the inputs
return SubsetModelData(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
objective_thresholds=objective_thresholds,
indices=torch.arange(
model.num_outputs,
device=objective_weights.device,
),
)
elif len(idcs) > model.num_outputs:
raise RuntimeError(
"Model size inconsistency. Tryting to subset a model with "
f"{model.num_outputs} outputs to {len(idcs)} outputs"
)
try:
model = model.subset_output(idcs=idcs)
objective_weights = objective_weights[nonzero]
if outcome_constraints is not None:
A, b = outcome_constraints
outcome_constraints = A[:, nonzero], b
if objective_thresholds is not None:
objective_thresholds = objective_thresholds[nonzero]
except NotImplementedError:
idcs_t = torch.arange(
model.num_outputs,
device=objective_weights.device,
)
return SubsetModelData(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
objective_thresholds=objective_thresholds,
indices=idcs_t,
)
def _to_inequality_constraints(
linear_constraints: Optional[Tuple[Tensor, Tensor]] = None
) -> Optional[List[Tuple[Tensor, Tensor, float]]]:
if linear_constraints is not None:
A, b = linear_constraints
inequality_constraints = []
k, d = A.shape
for i in range(k):
indicies = A[i, :].nonzero(as_tuple=False).squeeze()
coefficients = -A[i, indicies]
rhs = -b[i, 0]
inequality_constraints.append((indicies, coefficients, rhs))
else:
inequality_constraints = None
return inequality_constraints
def tensor_callable_to_array_callable(
tensor_func: Callable[[Tensor], Tensor], device: torch.device
) -> Callable[[np.ndarray], np.ndarray]:
"""transfer a tensor callable to an array callable"""
# TODO: move this reuseable function and its equivalent reverse functions
# to some utils files
def array_func(x: np.ndarray) -> np.ndarray:
return tensor_func(torch.from_numpy(x).to(device)).detach().numpy()
return array_func
def get_botorch_objective_and_transform(
model: Model,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
objective_thresholds: Optional[Tensor] = None,
X_observed: Optional[Tensor] = None,
) -> Tuple[Optional[MCAcquisitionObjective], Optional[PosteriorTransform]]:
"""Constructs a BoTorch `AcquisitionObjective` object.
Args:
model: A BoTorch Model
| |
# parse each line
positives = {} #[list () for _ in range (0, len (steps))]
bar = util.ProgressBar ('Syntatic Distance and Strategy Analysis (' + strategy + ')', metadata ['misses'])
try: fin = open (os.path.join (args.input, 'predict'), 'r')
except: fin = gzip.open (os.path.join (args.input, 'predict.gz'), 'r')
for progress in range (0, metadata ['misses']):
bar.update (progress)
line = fin.readline ()
if len (line) == 0: break
misses = model.split (json.loads (line))
for miss in misses:
X, P, I, idx, x, y = miss ['X'], miss ['P'], miss ['I'], miss ['idx'], miss ['x'], miss ['y']
obj = {
'label': I ['label'],
'evt_idx': idx,
'seq_idx': I ['idx'],
'subject': I ['blk'],
'len': I ['len'],}
subject = obj ['subject']
if subject not in positives: positives [subject] = [list () for _ in range (0, len (steps))]
sprob = sorted (P, reverse=True)
rank = sprob.index (P [x]) / len (sprob)
# strategy: rank
if strategy == 'rank':
for i, s in enumerate (steps):
if rank <= s: continue
else: positives [subject][i].append (obj)
# strategy : distance or complete
else: # distance analysis
# sort by prob, but if probs are the same then compare distance
spred = sorted ([i for i in range (0, len(P))], reverse=True, key=lambda i: P [i] * 1000000 - dij [x][i])
sdist = [dij [x][i] for i in spred]
j, pred, reported = 0, spred [0], False
jrank = sprob.index (P [pred]) / len (sprob)
for i, s in enumerate (steps):
if strategy == 'complete' and rank <= s: continue # check rank and continue if hit
while jrank <= s and j < len (list (spred)): pred = spred [j]; jrank = sprob.index (P [pred]) / len (sprob); j += 1
dist = min (sdist [0: j + 1])
if dist > s + gapdist: positives [subject][i].append (obj)
# add positive cases to stats
stats = dablog.util.Statistics (steps, metadata ['seqlen'] + 1, metadata ['test-normal'], metadata ['test-abnormal'])
for subject in positives:
for i in range (0, len (positives [subject])):
# extra check on adjecent sequences if applicable
seqs = set ([obj ['seq_idx'] for obj in positives [subject][i]])
# if len (seqs) > 1: print (seqs)
for obj in positives [subject][i]:
idx = obj ['seq_idx']
if obj ['len'] < args.seqlen: stats.add (i, obj)
elif not any ([idx + adj not in seqs for adj in range (0, args.check_adjacent + 1)]): stats.add (i, obj)
fin.close ()
bar.finish ()
stats.output (os.path.join (args.output, '.'.join ([strategy, str (gapdist)])))
###############################################################
## List Key And Sequence Statistics ##
###############################################################
def ListKeyAndSequenceStatistics (args):
# read labels
import dablog.util
progress, bar = 0, util.ProgressBar ('List Sequences and Labels', 575139)
seqset, evtset = {}, {}
# parse each line from data
for line in open (args.label, 'r'):
progress += 1
bar.update (progress)
label, block = line.split ()
label = 'Normal' if int (label) == 0 else 'Abnormal'
try: sequence = readSequence (os.path.join (args.input, block + '.log'), args)
except KeyboardInterrupt: exit (0)
except: print (block, 'is broken'); continue
for evt in sequence: # parse event
if evt not in evtset: evtset [evt] = {'occurrence': 0, 'Normal': 0, 'Abnormal': 0}
evtset [evt]['occurrence'] += 1
evtset [evt][label] += 1
for i in range (0, max (1, len (sequence) - args.seqlen)): # parse sequence
seq = sequence [i: min (i + args.seqlen, len (sequence))]
if len (seq) < args.seqlen: seq = [dablog.util.Codebook.PAD] * (args.seqlen - len (seq)) + seq
seqstr = json.dumps (seq)
if seqstr not in seqset: seqset [seqstr] = {'Normal': 0, 'Abnormal': 0}
seqset [seqstr][label] += 1
bar.finish ()
# write to key file
count = sum ([evtset [evt]['occurrence'] for evt in evtset])
sort = sorted (list (evtset.keys ()), reverse=True, key=lambda k: evtset [k]['occurrence'])
with open ('.'.join ([args.output, str (args.logkeys), str (len (sort)), 'keys']), 'w') as fout:
for key in sort:
obj = evtset [key]
occ = evtset [key]['occurrence']
fout.write (json.dumps ({'key': key, 'occurrence': occ, 'percentage': occ / count,
'normals': obj ['Normal'], 'abnormals': obj ['Abnormal']}) + '\n')
# write to seq file
count = sum ([seqset [seq]['Normal'] + seqset [seq]['Abnormal'] for seq in seqset])
with open ('.'.join ([args.output, str (args.logkeys), str (args.seqlen), 'seqs']), 'w') as fout:
for seqstr in seqset:
seq = seqset [seqstr]
normals = seq ['Normal']
abnormals = seq ['Abnormal']
occurrence = normals + abnormals
obj = {'sequence': json.loads (seqstr),
'occurrence': occurrence, 'percentage': occurrence / count,
'normals': normals, 'abnormals': abnormals,}
fout.write (json.dumps (obj) + '\n')
def PrintKeyAndStatistics (args):
keys = []
for line in open (args.output, 'r'):
keys.append (json.loads (line))
for i in range (0, len (keys)):
print (i+1, end=', ')
print (keys [i]['percentage'], end=', ')
print (keys [i]['normals'], end=', ')
print (keys [i]['abnormals'], end=', ')
print (sum ([obj ['percentage'] for obj in keys [0: i+1]]), end=', ')
print (keys [i]['key'])
import dablog.util
ktoi = {keys [i]['key']: i for i in range (0, len (keys))}
TP, FP = [0] * 100, [0] * 100
# TP, FP = [0] * len (keys), [0] * len (keys)
progress, bar = 0, util.ProgressBar ('List Sequences and Labels', 575139)
for line in open (args.label, 'r'):
progress += 1
bar.update (progress)
label, block = line.split ()
label = 'Normal' if int (label) == 0 else 'Abnormal'
try: sequence = readSequence (os.path.join (args.input, block + '.log'), args)
except KeyboardInterrupt: exit (0)
except: print (block, 'is broken'); continue
maxindex = max ([ktoi [evt] for evt in sequence])
maxpercentile = int ((maxindex * len (TP))/ len (keys))
for i in range (0, maxpercentile):
# for i in range (0, maxindex):
if label == 'Normal': FP [i] += 1
else: TP [i] += 1
bar.finish ()
TN, FN = [0] * 100, [0] * 100
for i in range (0, len (TP)):
print (i+1, end=', ')
tp, fp = TP [i], FP [i]
tn, fn = 575139 - fp, 16838 - tp
TN [i], FN [i] = tn, fn
fpr = fp / (fp + tn)
fnr = fn / (fn + tp)
precision = tp / (tp + fp) if tp + fp > 0 else 0.0
recall = tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0.0
accuracy = (tp + tn) / (tp + fn + fp + tn)
print ('f1:' + str (f1), end=', ')
print ('acc:' + str (accuracy), end=', ')
print ('fpr:' + str (fpr))
with open (args.output + '__rank.0.0.metric', 'w') as fout:
fout.write (json.dumps ({'TP': TP}))
fout.write (json.dumps ({'FP': FP}))
fout.write (json.dumps ({'TN': TN}))
fout.write (json.dumps ({'FN': FN}))
###############################################################
## Hyper Dimentional Ploter ##
###############################################################
def PlotSyntacticDistance (args):
import hypertools, random
embed = json.load (open (args.input, 'r'))
tags = ['<pad>', '<sequence>', '</sequence>', '<unknown>']
if args.output == 'embed':
data = [embed [key] for key in embed]
# labels = [key if key in tags else None for key in embed]
labels = [key for key in embed]
# hue = [tags.index (key) + 1 if key in tags else 0 for key in embed]
# hypertools.plot (numpy.array (data), '.', hue=numpy.array (hue), labels=labels)
# hypertools.plot (numpy.array (data), '.', labels=labels, n_clusters=2)
hypertools.plot (numpy.array (data), '.', labels=labels, n_clusters=10, explore=True)
elif args.output == 'seq':
# redefine unknown embeddings
dists, unknown = {}, numpy.array ([1.0] * len (embed [tags [-1]]))
# unknown = numpy.mean ([embed [evt] for evt in embed])
normals, abnormals, uncertains = [], [], []
data, labels = [], []
# calculate pair wise distance and universe unit
for key in embed:
if key in embed: vec = numpy.array (embed [key])
else: vec = unknown
dists [key] = numpy.linalg.norm (vec - unknown)
# parse each anomalous seqeuences
for line in open (args.label, 'r'):
obj = json.loads (line)
if obj ['normal_ratio'] == 1.0: normals.append (obj)
elif obj ['abnormal_ratio'] == 1.0: abnormals.append (obj)
else: uncertains.append (obj)
# generate plotting data and plot
random.shuffle (normals)
random.shuffle (abnormals)
random.shuffle (uncertains)
for hue, dataset in [['Abnormal', abnormals], ['Normals', normals], | |
<gh_stars>0
# coding=UTF-8
"""All the REST enums used by the FCO REST API."""
from enum import Enum
class PrintableEnum(Enum):
"""Allows for easier formatting when substituting for parameters."""
def __str__(self):
"""String representation of PrintableEnum object."""
return self.value
def untype(self):
"""Return value suitable for REST API query."""
return self.value
class DeploymentInstanceStatus(PrintableEnum):
"""FCO REST API DeploymentInstanceStatus enum.
The DeploymentInstanceStatus enum represents the status of a
deployment instance.
BUILDING: Building
RECOVERY: Undergoing live recovery
STARTING: Starting
REBOOTING: Rebooting
INSTALLING: Installing
RUNNING: Running
STOPPED: Stopped
ERROR: Internal error state
STOPPING: Stopping
MIGRATING: Migrating
DELETING: Deleting
"""
BUILDING = 'BUILDING'
RECOVERY = 'RECOVERY'
STARTING = 'STARTING'
REBOOTING = 'REBOOTING'
INSTALLING = 'INSTALLING'
RUNNING = 'RUNNING'
STOPPED = 'STOPPED'
ERROR = 'ERROR'
STOPPING = 'STOPPING'
MIGRATING = 'MIGRATING'
DELETING = 'DELETING'
class Networking(PrintableEnum):
"""FCO REST API Networking enum.
The SystemCapability Networking enum enumerates the different
networking capabilities of a system or cluster.
VLAN: VLAN networking modes are permitted
PVIP: PVIP networking modes are permitted
"""
VLAN = 'VLAN'
PVIP = 'PVIP'
class HypervisorType(PrintableEnum):
"""FCO REST API HypervisorType enum.
A enum that represents the hyper visor type.
HYPERV: Hyper V Hypervisor Type
XEN4: XEN4 Hypervisor Type
PCS: PCS Hypervisor Type
KVM: KVM Hypervisor Type
XEN3: XEN3 Hypervisor Type
VMWARE: VM Ware Hypervisor Type
"""
HYPERV = 'HYPERV'
XEN4 = 'XEN4'
PCS = 'PCS'
KVM = 'KVM'
XEN3 = 'XEN3'
VMWARE = 'VMWARE'
class FirewallConnectionState(PrintableEnum):
"""FCO REST API FirewallConnectionState enum.
The FirewallConnectionState enum enumerates the possible connection
states used within a FirewallTemplate.
NEW: New connections
ALL: Any state
EXISTING: Existing connections
"""
NEW = 'NEW'
ALL = 'ALL'
EXISTING = 'EXISTING'
class EmailType(PrintableEnum):
"""FCO REST API EmailType enum.
The EmailType enum enumerates the different types of email sent by
the system.
GENERAL_EMAIL: The general email template where subject and
message is set by customer
NEW_PASSWORD_DETAILS: Sent to a user with a new password after a
password reset
ACCOUNT_ACTIVATION: Sent when an account has been activated
automatically
REVOKE_USER: Sent when a user's access is revoked from a
customer account
CREDIT_NOTE: Sent with an attached credit note.
AUTO_TOP_UP_FAIL: Sent to inform the customer of a failed
autotopup
PASSWORD_RESET_LINK: Sent to a user who requests a password reset
LOW_BALANCE: Sent to a customer whose balance reaches the
low balance warning threshold
ZERO_BALANCE: Sent to a customer who reaches a zero unit
balance
INVITE_USER: Sent when a user is invited to join a customer
account
PAID_INVOICE: Email template when an invoice that has been
emialed to the customer has been paid.
ACCOUNT_APPROVAL: Sent when an account has been activated
manually
AUTO_TOP_UP_SUCCESS: Sent to inform the customer of a successful
autotopup
ACCOUNT_CANCELLATION: Sent when a customer's account is cancelled
INVOICE: Sent with an attached invoice
"""
GENERAL_EMAIL = 'GENERAL_EMAIL'
NEW_PASSWORD_DETAILS = '<PASSWORD>DETAILS'
ACCOUNT_ACTIVATION = 'ACCOUNT_ACTIVATION'
REVOKE_USER = 'REVOKE_USER'
CREDIT_NOTE = 'CREDIT_NOTE'
AUTO_TOP_UP_FAIL = 'AUTO_TOP_UP_FAIL'
PASSWORD_RESET_LINK = 'PASSWORD_RESET_LINK'
LOW_BALANCE = 'LOW_BALANCE'
ZERO_BALANCE = 'ZERO_BALANCE'
INVITE_USER = 'INVITE_USER'
PAID_INVOICE = 'PAID_INVOICE'
ACCOUNT_APPROVAL = 'ACCOUNT_APPROVAL'
AUTO_TOP_UP_SUCCESS = 'AUTO_TOP_UP_SUCCESS'
ACCOUNT_CANCELLATION = 'ACCOUNT_CANCELLATION'
INVOICE = 'INVOICE'
class InvoiceStatus(PrintableEnum):
"""FCO REST API InvoiceStatus enum.
The InvoiceStatus enum enumerates the different statuses that an
invoice can take.
LOCKED: Locked, for processing
VOID: Void (i.e. the invoice was never finalised because its
creation was cancelled)
PAID: Valid, finalised and paid
UNPAID: Valid, finalised but unpaid
CLOSED: Closed, after processing
PENDING: Pending (i.e. under construction)
"""
LOCKED = 'LOCKED'
VOID = 'VOID'
PAID = 'PAID'
UNPAID = 'UNPAID'
CLOSED = 'CLOSED'
PENDING = 'PENDING'
class Publish(PrintableEnum):
"""FCO REST API Publish enum.
The SystemCapability Publish enum enumerates the different image
publishing capabilities of a system.
BE_ADMIN: Billing entity admins are permitted to publish
ANY: Any Customer are permitted to publish
VALIDATED_CUSTOMER: Validated customers are permitted to publish
"""
BE_ADMIN = 'BE_ADMIN'
ANY = 'ANY'
VALIDATED_CUSTOMER = 'VALIDATED_CUSTOMER'
class BillingPeriod(PrintableEnum):
"""FCO REST API BillingPeriod enum.
The PeriodType enum enumerates the different types of billing or
rating period.
HOURLY: Hourly charge
MONTHLY: Monthly charge
DAILY: Daily charge
ANNUALLY: Annual charge
ONE_OFF: One off charge
WEEKLY: Weekly charge
"""
HOURLY = 'HOURLY'
MONTHLY = 'MONTHLY'
DAILY = 'DAILY'
ANNUALLY = 'ANNUALLY'
ONE_OFF = 'ONE_OFF'
WEEKLY = 'WEEKLY'
class ResourceState(PrintableEnum):
"""FCO REST API ResourceState enum.
The ResourceState enum enumerates the different states of resources.
TO_BE_DELETED: Deletion pending on deletion of child objects
LOCKED: Locked
CREATING: Creating
DELETED: Deleted
ACTIVE: Active
HIDDEN: Hidden
"""
TO_BE_DELETED = 'TO_BE_DELETED'
LOCKED = 'LOCKED'
CREATING = 'CREATING'
DELETED = 'DELETED'
ACTIVE = 'ACTIVE'
HIDDEN = 'HIDDEN'
class UserType(PrintableEnum):
"""FCO REST API UserType enum.
An enum that represents the type of user.
USER: A traditional user type, that exists across a single
Billing Entity
API_KEY_USER: An API Key user type, that exists within a single
Customer
"""
USER = 'USER'
API_KEY_USER = 'API_KEY_USER'
class PdfPageSize(PrintableEnum):
"""FCO REST API PdfPageSize enum.
An enum that represents a size of a PDF page.
A4: A4 Page Size
LETTER: Letter Page Size
"""
A4 = 'A4'
LETTER = 'LETTER'
class FirewallProtocol(PrintableEnum):
"""FCO REST API FirewallProtocol enum.
The FirewallProtocol enum enumerates the permissible types of IP
protocol within a FirewallTemplate.
UDP: UDP
L2TP: L2TP
IPSEC_ESP: IPSEC ESP
ICMP6: ICMPV6
GRE: GRE
TCP: TCP
IPSEC_AH: IPSEC AH
ICMP: ICMP
ANY: Any protocol
"""
UDP = 'UDP'
L2TP = 'L2TP'
IPSEC_ESP = 'IPSEC_ESP'
ICMP6 = 'ICMP6'
GRE = 'GRE'
TCP = 'TCP'
IPSEC_AH = 'IPSEC_AH'
ICMP = 'ICMP'
ANY = 'ANY'
class Email(PrintableEnum):
"""FCO REST API Email enum.
The SystemCapability Email enum enumerates the different email
capabilities of a system.
YES: The BE can send email
NO: The BE cannot send email
"""
YES = 'YES'
NO = 'NO'
class Status(PrintableEnum):
"""FCO REST API Status enum.
The Status enum enumerates the possible statuses for a customer.
ACTIVE: The account is active, and a normal customer
ADMIN: The account is active, and an admin customer
DISABLED: The customer has been disabled
DELETED: The customer has been deleted
CLOSED: The account has been closed
"""
ACTIVE = 'ACTIVE'
ADMIN = 'ADMIN'
DISABLED = 'DISABLED'
DELETED = 'DELETED'
CLOSED = 'CLOSED'
class VNCHandler(PrintableEnum):
"""FCO REST API VNCHandler enum.
The VNCHandler enum enumerates the type of VNC handler requested.
RAW: Raw protocol (i.e. RFB)
ANY: Any protocol the cluster will support
GUACAMOLE: Guacamole protocol
"""
RAW = 'RAW'
ANY = 'ANY'
GUACAMOLE = 'GUACAMOLE'
class Vnc(PrintableEnum):
"""FCO REST API Vnc enum.
The SystemCapability vnc enum indicates the supported VNC handlers.
RAW: Supports raw handlers
GUACAMOLE: Supports Guacamole handlers
"""
RAW = 'RAW'
GUACAMOLE = 'GUACAMOLE'
class IPType(PrintableEnum):
"""FCO REST API IPType enum.
The IPType enum enumerates the different types of IP addresses.
IPV4: IPv4 address
INVALID: Invalid IP address
IPV6: IPv6 address
"""
IPV4 = 'IPV4'
INVALID = 'INVALID'
IPV6 = 'IPV6'
class TriggerType(PrintableEnum):
"""FCO REST API TriggerType enum.
The TriggerType enum described the type of the trigger being
executed.
PRE_USER_API_CALL: Trigger which is called before a User
API call.
POST_CREATE: Trigger which is called after the
creation of a resource.
PRE_SERVER_STATE_CHANGE: Trigger which is called before a state
change on a server.
PRE_MODIFY: Trigger which is called before a
resource has been modified.
PRE_SERVER_METADATA_UPDATE: Trigger which is called before the
server metadata is set.
POST_JOB_STATE_CHANGE: Trigger which is called after a state
change on a job.
PRE_CREATE: Trigger which is called before the
creation of a resource.
POST_PURCHASE: Trigger which is called after a purchase
has been made.
POST_USER_API_CALL: Trigger which is called after a User API
call.
PRE_AUTH: Trigger which is called before a user is
authenticated.
POST_PAYMENT: Trigger which is called after a payment
or refund is attempted.
POST_EXCEPTION: Trigger which is called after an
exception is thrown.
PRE_PURCHASE: Trigger which is called before a
purchase has been made.
PRE_PAYMENT: Trigger which is called before a payment
or refund is attempted.
POST_COLLECTION: Trigger which is called after the VPS
billing collection loop.
POST_DELETE: Trigger which is called after the
deletion of a resource.
PRE_DELETE: Trigger which is called before the
deletion of a resource.
TAX_CALCULATION: Trigger which is called before tax is
calculated for | |
<gh_stars>1-10
from random import randrange
from enum import Enum
class Colors:
"""Just a color class for showing results in colors"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class State(Enum):
"""Describes how we finished the algorithm"""
Failure = 0
Victory = 1
class Direction(Enum):
"""An enum for our moving directions for better code readability"""
right = 0
left = 1
up = 2
down = 3
class Position:
"""Our Start and Goal positions are made of 'Position Class'"""
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "x: " + str(x) + ", y: " + str(y)
class Node:
"""Every point in our 2D array is made of a 'Node Class'"""
def __init__(self, x, y):
self.x = x
self.y = y
self.f = None
self.g = None
self.blocked_directions = []
self.parent = None
self.goto = None
self.block_around()
def block_around(self):
"""If the node is on the edge, blocks edges"""
if self.y + 1 > 9:
self.blocked_directions.append(Direction.right)
if self.y - 1 < 0:
self.blocked_directions.append(Direction.left)
if self.x + 1 > 9:
self.blocked_directions.append(Direction.down)
if self.x - 1 < 0:
self.blocked_directions.append(Direction.up)
def block_it(self, direction):
"""Blocks movement to a direction"""
if not self.sizecheck() or not self.has_permission(direction):
return False
self.perform_block(direction)
return True
def has_permission(self, direction):
"""Calls sizecheck function of neighbor node on his direction side"""
if direction == Direction.right and not direction in self.blocked_directions:
if self.y + 1 > 9:
return False
if not maze.nodes[self.x, self.y + 1].sizecheck():
return False
elif direction == Direction.left and not direction in self.blocked_directions:
if self.y - 1 < 0:
return False
if self.y - 1 < 0 and not maze.nodes[self.x, self.y - 1].sizecheck():
return False
elif direction == Direction.down and not direction in self.blocked_directions:
if self.x + 1 > 9:
return False
if self.x + 1 > 9 and not maze.nodes[self.x + 1, self.y].sizecheck():
return False
elif direction == Direction.up and not direction in self.blocked_directions:
if self.x - 1 < 0:
return False
if self.x - 1 < 0 and not maze.nodes[self.x - 1, self.y].sizecheck():
return False
return True
def perform_block(self, direction):
"""Blocks the direction in the node and it's neighbor node"""
if direction == Direction.right and not direction in self.blocked_directions:
self.blocked_directions.append(Direction.right)
maze.nodes[self.x, self.y + 1].blocked_directions.append(Direction.left)
elif direction == Direction.left and not direction in self.blocked_directions:
self.blocked_directions.append(Direction.left)
maze.nodes[self.x, self.y - 1].blocked_directions.append(Direction.right)
elif direction == Direction.down and not direction in self.blocked_directions:
self.blocked_directions.append(Direction.down)
maze.nodes[self.x + 1, self.y].blocked_directions.append(Direction.up)
elif direction == Direction.up and not direction in self.blocked_directions:
self.blocked_directions.append(Direction.up)
maze.nodes[self.x - 1, self.y].blocked_directions.append(Direction.down)
def sizecheck(self):
"""Every node have a limit for blocking It's own neighbors, That limit will be checked here"""
if len(self.blocked_directions) >= 3:
return False
return True
def set_f(self, g):
"""Returns hueristic value of the node"""
self.g = g
self.f = g + self.h()
return self.f
def h(self):
"""Returns estimated cost to the goal"""
return abs(self.x - goal.x) + abs(self.y - goal.y)
def is_this_allowed(self, direction):
"""Determines if moving to the given direction in allowed or not"""
if direction in self.blocked_directions:
return False
return True
def __str__(self):
if self.parent:
return "x: " + str(self.x) + ", y: " + str(self.y) + " - blocked = " + str(self.blocked_directions) + " - parent = x: " + str(self.parent.x) + ", y: " + str(self.parent.y)
else:
return "x: " + str(self.x) + ", y: " + str(self.y) + " - blocked = " + str(self.blocked_directions)
class Maze:
"""The maze of the problem, generates random blocks and random field"""
def __init__(self):
self.nodes = {}
for i in range(10):
for j in range(10):
self.nodes[i, j] = Node(i, j)
def blocks(self):
"""Making random blocks for maze"""
# maximum possible number of blocks in a 10x10 nodes is 90
blocknum = randrange(60, 90)
print(f'{yellow}generating {blocknum} blocks...{end}')
for num in range(blocknum):
blocked = False
while(not blocked):
x = randrange(10)
y = randrange(10)
randrlud = randrange(4)
direction = get_dir[randrlud]
blocked = self.nodes[x, y].block_it(direction)
print(f'{green}generating the world is done.{end}')
def __str__(self):
"""Draw the maze"""
rl = [''] * 10
for i in range(10):
for j in range(10):
if Direction.left in self.nodes[i, j].blocked_directions and Direction.right in self.nodes[i, j].blocked_directions:
if goal.x == i and goal.y == j:
rl[i] = rl[i] + f'{red}|{green}G{end}{red}|{end}'
elif me.x == i and me.y == j:
rl[i] = rl[i] + f'{red}|{green}*{end}{red}|{end}'
else:
rl[i] = rl[i] + f'{red}| |{end}'
elif Direction.left in self.nodes[i, j].blocked_directions:
if goal.x == i and goal.y == j:
if Direction.right == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{red}|{green}G{end}{end}{green}>{end}'
else:
rl[i] = rl[i] + f'{red}|{green}G{end}{end} '
elif me.x == i and me.y == j:
if Direction.right == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{red}|{green}*{end}{end}{green}>{end}'
else:
rl[i] = rl[i] + f'{red}|{green}*{end}{end} '
else:
if Direction.right == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{red}|{green} {end}{end}{green}>{end}'
else:
rl[i] = rl[i] + f'{red}|{end} '
elif Direction.right in self.nodes[i, j].blocked_directions:
if goal.x == i and goal.y == j:
if Direction.left == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{green}<{end}{green}G{end}{red}|{end}'
else:
rl[i] = rl[i] + f' {green}G{end}{red}|{end}'
elif me.x == i and me.y == j:
if Direction.left == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{green}<{end}{green}*{end}{red}|{end}'
else:
rl[i] = rl[i] + f' {green}*{end}{red}|{end}'
else:
if Direction.left == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{green}<{end}{green} {end}{red}|{end}'
else:
rl[i] = rl[i] + f' {red}|{end}'
else:
if goal.x == i and goal.y == j:
if Direction.left == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{green}<{end}{green}G{end} '
elif Direction.right == self.nodes[i, j].goto:
rl[i] = rl[i] + f' {green}G{end}{green}>{end}'
else:
rl[i] = rl[i] + f' {green}G{end} '
elif me.x == i and me.y == j:
if Direction.left == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{green}<{end}{green}*{end} '
elif Direction.right == self.nodes[i, j].goto:
rl[i] = rl[i] + f' {green}*{end}{green}>{end}'
else:
rl[i] = rl[i] + f' {green}*{end} '
else:
if Direction.left == self.nodes[i, j].goto:
rl[i] = rl[i] + f'{green}<{end}{green} {end} '
elif Direction.right == self.nodes[i, j].goto:
rl[i] = rl[i] + f' {green} {end}{green}>{end}'
else:
rl[i] = rl[i] + f' {green} {end} '
down = [''] * 10
for i in range(10):
for j in range(10):
if Direction.down in self.nodes[i, j].blocked_directions:
if j == 0:
down[i] = down[i] + f'{red}|--{end}'
elif j == 9:
down[i] = down[i] + f'{red}--|{end}'
else:
down[i] = down[i] + f'{red}---{end}'
elif Direction.down == self.nodes[i, j].goto:
if j == 0:
down[i] = down[i] + f'{red}|{end}{green}v{end} '
elif j == 9:
down[i] = down[i] + f' {green}v{end}{red}|{end}'
else:
down[i] = down[i] + f' {green}v{end} '
elif Direction.up == self.nodes[i + 1, j].goto:
if j == 0:
down[i] = down[i] + f'|{green}^{end} '
elif j == 9:
down[i] = down[i] + f' {green}^{end}|'
else:
down[i] = down[i] + f' {green}^{end} '
else:
if j == 0:
down[i] = down[i] + f'{red}| {end}'
elif j == 9:
down[i] = down[i] + f'{red} |{end}'
else:
down[i] = down[i] + f' '
output = f'{red}|--{end}' + f'{red}---{end}'*8 + f'{red}--|{end}' + '\n'
for i in range(10):
output = output + rl[i] + '\n' + down[i] + '\n'
return output
class Environment:
"""My Main usable, direct access class in main that everything is in it"""
def __init__(self):
maze.blocks()
self.maze = maze
self.me = me
self.goal = goal
self.fringe = []
self.closed = []
def start_solve(self):
"""Initialization for A* Algorithm"""
self.fringe = []
self.closed = []
node = maze.nodes[me.x, me.y]
node.set_f(0)
self.fringe.append(node)
situation = self.solve()
if situation == State.Victory:
self.set_gotos()
return situation
def solve(self):
"""Actually solve the algorithm"""
while(True):
if not self.fringe:
return State.Failure
node = self.lowest_node()
if node.x == goal.x and node.y == goal.y:
return State.Victory
if node.is_this_allowed(Direction.up):
temp_node = maze.nodes[node.x - 1, node.y]
if temp_node in self.closed:
tempf = temp_node.f
if tempf > temp_node.set_f(node.g + 1):
temp_node.parent = node
self.closed.remove(temp_node)
self.fringe.append(temp_node)
elif temp_node in self.fringe:
tempf = temp_node.f
tempg = temp_node.g
if tempf < temp_node.set_f(node.g + 1):
temp_node.f = tempf
temp_node.g = tempg
elif tempf > temp_node.f:
temp_node.parent = node
else:
temp_node.set_f(node.g + 1)
temp_node.parent = node
self.fringe.append(temp_node)
| |
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
""" Set state information for unpickling. """
pass
class filterfalse(object):
"""
filterfalse(function or None, sequence) --> filterfalse object
Return those items of sequence for which function(item) is false.
If function is None, return the items that are false.
"""
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, function_or_None, sequence): # real signature unknown; restored from __doc__
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
class groupby(object):
"""
groupby(iterable[, keyfunc]) -> create an iterator which returns
(key, sub-iterator) grouped by each value of key(value).
"""
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, iterable, key=None): # known case of itertools.groupby.__init__
""" Initialize self. See help(type(self)) for accurate signature. """
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
""" Set state information for unpickling. """
pass
class islice(object):
"""
islice(iterable, stop) --> islice object
islice(iterable, start, stop[, step]) --> islice object
Return an iterator whose next() method returns selected values from an
iterable. If start is specified, will skip all preceding elements;
otherwise, start defaults to zero. Step defaults to one. If
specified as another value, step determines how many values are
skipped between successive calls. Works like a slice() on a list
but returns an iterator.
"""
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, iterable, stop): # real signature unknown; restored from __doc__
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
""" Set state information for unpickling. """
pass
class permutations(object):
"""
permutations(iterable[, r]) --> permutations object
Return successive r-length permutations of elements in the iterable.
permutations(range(3), 2) --> (0,1), (0,2), (1,0), (1,2), (2,0), (2,1)
"""
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, iterable, r=None): # real signature unknown; restored from __doc__
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
""" Set state information for unpickling. """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes. """
pass
class product(object):
"""
product(*iterables, repeat=1) --> product object
Cartesian product of input iterables. Equivalent to nested for-loops.
For example, product(A, B) returns the same as: ((x,y) for x in A for y in B).
The leftmost iterators are in the outermost for-loop, so the output tuples
cycle in a manner similar to an odometer (with the rightmost element changing
on every iteration).
To compute the product of an iterable with itself, specify the number
of repetitions with the optional repeat keyword argument. For example,
product(A, repeat=4) means the same as product(A, A, A, A).
product('ab', range(3)) --> ('a',0) ('a',1) ('a',2) ('b',0) ('b',1) ('b',2)
product((0,1), (0,1), (0,1)) --> (0,0,0) (0,0,1) (0,1,0) (0,1,1) (1,0,0) ...
"""
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *iterables, repeat=1): # known case of itertools.product.__init__
""" Initialize self. See help(type(self)) for accurate signature. """
return []
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
""" Set state information for unpickling. """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes. """
pass
class repeat(object):
"""
repeat(object [,times]) -> create an iterator which returns the object
for the specified number of times. If not specified, returns the object
endlessly.
"""
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, p_object, times=None): # real signature unknown; restored from __doc__
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
def __length_hint__(self, *args, **kwargs): # real signature unknown
""" Private method returning an estimate of len(list(it)). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
class starmap(object):
"""
starmap(function, sequence) --> starmap object
Return an iterator whose values are returned from the function evaluated
with an argument tuple taken from the given sequence.
"""
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, function, sequence): # real signature unknown; restored from __doc__
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
class takewhile(object):
"""
takewhile(predicate, iterable) --> takewhile object
Return successive entries from an iterable as long as the
predicate evaluates to true for each entry.
"""
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, predicate, iterable): # real signature unknown; restored from __doc__
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # | |
##
# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of OSM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: <EMAIL>
##
import abc
import asyncio
import time
from n2vc.loggable import Loggable
class K8sConnector(abc.ABC, Loggable):
"""
####################################################################################
################################### P U B L I C ####################################
####################################################################################
"""
def __init__(self, db: object, log: object = None, on_update_db=None):
"""
:param db: database object to write current operation status
:param log: logger for tracing
:param on_update_db: callback called when k8s connector updates database
"""
# parent class
Loggable.__init__(self, log=log, log_to_console=True, prefix="\nK8S")
# self.log.info('Initializing generic K8S connector')
# the database and update callback
self.db = db
self.on_update_db = on_update_db
# self.log.info('K8S generic connector initialized')
@abc.abstractmethod
async def init_env(
self, k8s_creds: str, namespace: str = "kube-system", reuse_cluster_uuid=None
) -> (str, bool):
"""
It prepares a given K8s cluster environment to run Charts or juju Bundles on
both sides:
client (OSM)
server (Tiller/Charm)
:param k8s_creds: credentials to access a given K8s cluster, i.e. a valid
'.kube/config'
:param namespace: optional namespace to be used for the K8s engine (helm
tiller, juju). By default, 'kube-system' will be used
:param reuse_cluster_uuid: existing cluster uuid for reuse
:return: uuid of the K8s cluster and True if connector has installed some
software in the cluster (on error, an exception will be raised)
"""
@abc.abstractmethod
async def repo_add(
self, cluster_uuid: str, name: str, url: str, repo_type: str = "chart"
):
"""
Add a new repository to OSM database
:param cluster_uuid: the cluster
:param name: name for the repo in OSM
:param url: URL of the repo
:param repo_type: either "chart" or "bundle"
:return: True if successful
"""
@abc.abstractmethod
async def repo_list(self, cluster_uuid: str):
"""
Get the list of registered repositories
:param cluster_uuid: the cluster
:return: list of registered repositories: [ (name, url) .... ]
"""
@abc.abstractmethod
async def repo_remove(self, cluster_uuid: str, name: str):
"""
Remove a repository from OSM
:param name: repo name in OSM
:param cluster_uuid: the cluster
:return: True if successful
"""
@abc.abstractmethod
async def synchronize_repos(self, cluster_uuid: str, name: str):
"""
Synchronizes the list of repositories created in the cluster with
the repositories added by the NBI
:param cluster_uuid: the cluster
:return: List of repositories deleted from the cluster and dictionary with
repos added
"""
@abc.abstractmethod
async def reset(
self, cluster_uuid: str, force: bool = False, uninstall_sw: bool = False
) -> bool:
"""
Uninstalls Tiller/Charm from a known K8s cluster and removes it from the list
of known K8s clusters. Intended to be used e.g. when the NS instance is deleted.
:param cluster_uuid: UUID of a K8s cluster known by OSM.
:param force: force deletion, even in case there are deployed releases
:param uninstall_sw: flag to indicate that sw uninstallation from software is
needed
:return: str: kdu_instance generated by helm
"""
@abc.abstractmethod
async def install(
self,
cluster_uuid: str,
kdu_model: str,
atomic: bool = True,
timeout: float = 300,
params: dict = None,
db_dict: dict = None,
kdu_name: str = None,
namespace: str = None,
):
"""
Deploys of a new KDU instance. It would implicitly rely on the `install` call
to deploy the Chart/Bundle properly parametrized (in practice, this call would
happen before any _initial-config-primitive_of the VNF is called).
:param cluster_uuid: UUID of a K8s cluster known by OSM
:param kdu_model: chart/bundle:version reference (string), which can be either
of these options:
- a name of chart/bundle available via the repos known by OSM
- a path to a packaged chart/bundle
- a path to an unpacked chart/bundle directory or a URL
:param atomic: If set, installation process purges chart/bundle on fail, also
will wait until all the K8s objects are active
:param timeout: Time in seconds to wait for the install of the chart/bundle
(defaults to Helm default timeout: 300s)
:param params: dictionary of key-value pairs for instantiation parameters
(overriding default values)
:param dict db_dict: where to write into database when the status changes.
It contains a dict with {collection: <str>, filter: {},
path: <str>},
e.g. {collection: "nsrs", filter:
{_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
:param kdu_name: Name of the KDU instance to be installed
:param namespace: K8s namespace to use for the KDU instance
:return: True if successful
"""
@abc.abstractmethod
async def upgrade(
self,
cluster_uuid: str,
kdu_instance: str,
kdu_model: str = None,
atomic: bool = True,
timeout: float = 300,
params: dict = None,
db_dict: dict = None,
):
"""
Upgrades an existing KDU instance. It would implicitly use the `upgrade` call
over an existing Chart/Bundle. It can be used both to upgrade the chart or to
reconfigure it. This would be exposed as Day-2 primitive.
:param cluster_uuid: UUID of a K8s cluster known by OSM
:param kdu_instance: unique name for the KDU instance to be updated
:param kdu_model: new chart/bundle:version reference
:param atomic: rollback in case of fail and wait for pods and services are
available
:param timeout: Time in seconds to wait for the install of the chart/bundle
(defaults to Helm default timeout: 300s)
:param params: new dictionary of key-value pairs for instantiation parameters
:param dict db_dict: where to write into database when the status changes.
It contains a dict with {collection: <str>, filter: {},
path: <str>},
e.g. {collection: "nsrs", filter:
{_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
:return: reference to the new revision number of the KDU instance
"""
@abc.abstractmethod
async def rollback(
self, cluster_uuid: str, kdu_instance: str, revision=0, db_dict: dict = None
):
"""
Rolls back a previous update of a KDU instance. It would implicitly use the
`rollback` call. It can be used both to rollback from a Chart/Bundle version
update or from a reconfiguration. This would be exposed as Day-2 primitive.
:param cluster_uuid: UUID of a K8s cluster known by OSM
:param kdu_instance: unique name for the KDU instance
:param revision: revision to which revert changes. If omitted, it will revert
the last update only
:param dict db_dict: where to write into database when the status changes.
It contains a dict with {collection: <str>, filter: {},
path: <str>},
e.g. {collection: "nsrs", filter:
{_id: <nsd-id>, path: "_admin.deployed.K8S.3"}
:return:If successful, reference to the current active revision of the KDU
instance after the rollback
"""
@abc.abstractmethod
async def uninstall(self, cluster_uuid: str, kdu_instance: str):
"""
Removes an existing KDU instance. It would implicitly use the `delete` call
(this call would happen after all _terminate-config-primitive_ of the VNF are
invoked).
:param cluster_uuid: UUID of a K8s cluster known by OSM
:param kdu_instance: unique name for the KDU instance to be deleted
:return: True if successful
"""
@abc.abstractmethod
async def exec_primitive(
self,
cluster_uuid: str = None,
kdu_instance: str = None,
primitive_name: str = None,
timeout: float = 300,
params: dict = None,
db_dict: dict = None,
) -> str:
"""Exec primitive (Juju action)
:param cluster_uuid str: The UUID of the cluster
:param kdu_instance str: The unique name of the KDU instance
:param primitive_name: Name of action that will be executed
:param timeout: Timeout for action execution
:param params: Dictionary of all the parameters needed for the action
:db_dict: Dictionary for any additional data
:return: Returns the output of the action
"""
@abc.abstractmethod
async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
"""
These calls will retrieve from the Chart/Bundle:
- The list of configurable values and their defaults (e.g. in Charts,
it would retrieve the contents of `values.yaml`).
- If available, any embedded help file (e.g. `readme.md`) embedded | |
DataSourceInterface()
self.data_source_interface_info = temp_model.from_map(m['data_source_interface_info'])
return self
class AuthPersonEnterpriseInfo(TeaModel):
def __init__(
self,
enterprise_name: str = None,
enterprise_credit_num: str = None,
enterprise_legal_person_name: str = None,
enterprise_legal_person_id: str = None,
enterprise_legal_person_phone_num: int = None,
):
# 企业名称
self.enterprise_name = enterprise_name
# 企业统一社会信用码
self.enterprise_credit_num = enterprise_credit_num
# 企业法人姓名
self.enterprise_legal_person_name = enterprise_legal_person_name
# 企业法人身份证号
self.enterprise_legal_person_id = enterprise_legal_person_id
# 企业法人电话号码
self.enterprise_legal_person_phone_num = enterprise_legal_person_phone_num
def validate(self):
self.validate_required(self.enterprise_name, 'enterprise_name')
self.validate_required(self.enterprise_credit_num, 'enterprise_credit_num')
self.validate_required(self.enterprise_legal_person_name, 'enterprise_legal_person_name')
self.validate_required(self.enterprise_legal_person_id, 'enterprise_legal_person_id')
self.validate_required(self.enterprise_legal_person_phone_num, 'enterprise_legal_person_phone_num')
def to_map(self):
result = dict()
if self.enterprise_name is not None:
result['enterprise_name'] = self.enterprise_name
if self.enterprise_credit_num is not None:
result['enterprise_credit_num'] = self.enterprise_credit_num
if self.enterprise_legal_person_name is not None:
result['enterprise_legal_person_name'] = self.enterprise_legal_person_name
if self.enterprise_legal_person_id is not None:
result['enterprise_legal_person_id'] = self.enterprise_legal_person_id
if self.enterprise_legal_person_phone_num is not None:
result['enterprise_legal_person_phone_num'] = self.enterprise_legal_person_phone_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('enterprise_name') is not None:
self.enterprise_name = m.get('enterprise_name')
if m.get('enterprise_credit_num') is not None:
self.enterprise_credit_num = m.get('enterprise_credit_num')
if m.get('enterprise_legal_person_name') is not None:
self.enterprise_legal_person_name = m.get('enterprise_legal_person_name')
if m.get('enterprise_legal_person_id') is not None:
self.enterprise_legal_person_id = m.get('enterprise_legal_person_id')
if m.get('enterprise_legal_person_phone_num') is not None:
self.enterprise_legal_person_phone_num = m.get('enterprise_legal_person_phone_num')
return self
class BeAuthedPersonInfo(TeaModel):
def __init__(
self,
enterprise_name: str = None,
enterprise_credit_num: str = None,
enterprise_legal_person_name: str = None,
enterprise_legal_person_id: str = None,
):
# 企业名称
self.enterprise_name = enterprise_name
# 企业统一社会信用码
self.enterprise_credit_num = enterprise_credit_num
# 企业法人姓名
self.enterprise_legal_person_name = enterprise_legal_person_name
# 企业法人身份证号
self.enterprise_legal_person_id = enterprise_legal_person_id
def validate(self):
self.validate_required(self.enterprise_name, 'enterprise_name')
self.validate_required(self.enterprise_credit_num, 'enterprise_credit_num')
self.validate_required(self.enterprise_legal_person_name, 'enterprise_legal_person_name')
self.validate_required(self.enterprise_legal_person_id, 'enterprise_legal_person_id')
def to_map(self):
result = dict()
if self.enterprise_name is not None:
result['enterprise_name'] = self.enterprise_name
if self.enterprise_credit_num is not None:
result['enterprise_credit_num'] = self.enterprise_credit_num
if self.enterprise_legal_person_name is not None:
result['enterprise_legal_person_name'] = self.enterprise_legal_person_name
if self.enterprise_legal_person_id is not None:
result['enterprise_legal_person_id'] = self.enterprise_legal_person_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('enterprise_name') is not None:
self.enterprise_name = m.get('enterprise_name')
if m.get('enterprise_credit_num') is not None:
self.enterprise_credit_num = m.get('enterprise_credit_num')
if m.get('enterprise_legal_person_name') is not None:
self.enterprise_legal_person_name = m.get('enterprise_legal_person_name')
if m.get('enterprise_legal_person_id') is not None:
self.enterprise_legal_person_id = m.get('enterprise_legal_person_id')
return self
class DataSource(TeaModel):
def __init__(
self,
id: str = None,
address: str = None,
):
# 数据源ID
self.id = id
# 数据源接口地址
self.address = address
def validate(self):
self.validate_required(self.id, 'id')
self.validate_required(self.address, 'address')
def to_map(self):
result = dict()
if self.id is not None:
result['id'] = self.id
if self.address is not None:
result['address'] = self.address
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('id') is not None:
self.id = m.get('id')
if m.get('address') is not None:
self.address = m.get('address')
return self
class GetDasLinkRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
be_authed_person_app_biz_uuid: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 被授权人接入应用biz_uuid
self.be_authed_person_app_biz_uuid = be_authed_person_app_biz_uuid
def validate(self):
self.validate_required(self.be_authed_person_app_biz_uuid, 'be_authed_person_app_biz_uuid')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.be_authed_person_app_biz_uuid is not None:
result['be_authed_person_app_biz_uuid'] = self.be_authed_person_app_biz_uuid
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('be_authed_person_app_biz_uuid') is not None:
self.be_authed_person_app_biz_uuid = m.get('be_authed_person_app_biz_uuid')
return self
class GetDasLinkResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
token: str = None,
link: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# H5应用身份认证信息
self.token = token
# 带有token 信息的H5页面链接
self.link = link
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.token is not None:
result['token'] = self.token
if self.link is not None:
result['link'] = self.link
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('token') is not None:
self.token = m.get('token')
if m.get('link') is not None:
self.link = m.get('link')
return self
class GetDasEnterprisevcRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
auth_instance_biz_uuid: str = None,
auth_person_enterprise_info: AuthPersonEnterpriseInfo = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 授权实例biz_uuid
self.auth_instance_biz_uuid = auth_instance_biz_uuid
# 授权企业信息
self.auth_person_enterprise_info = auth_person_enterprise_info
def validate(self):
self.validate_required(self.auth_instance_biz_uuid, 'auth_instance_biz_uuid')
self.validate_required(self.auth_person_enterprise_info, 'auth_person_enterprise_info')
if self.auth_person_enterprise_info:
self.auth_person_enterprise_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.auth_instance_biz_uuid is not None:
result['auth_instance_biz_uuid'] = self.auth_instance_biz_uuid
if self.auth_person_enterprise_info is not None:
result['auth_person_enterprise_info'] = self.auth_person_enterprise_info.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('auth_instance_biz_uuid') is not None:
self.auth_instance_biz_uuid = m.get('auth_instance_biz_uuid')
if m.get('auth_person_enterprise_info') is not None:
temp_model = AuthPersonEnterpriseInfo()
self.auth_person_enterprise_info = temp_model.from_map(m['auth_person_enterprise_info'])
return self
class GetDasEnterprisevcResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
vc: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# VC 内容
self.vc = vc
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.vc is not None:
result['vc'] = self.vc
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('vc') is not None:
self.vc = m.get('vc')
return self
class GetDasIndividualvcRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
auth_instance_biz_uuid: str = None,
authed_person_individual_info: AuthPersonIndividualInfo = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 授权实例biz_uuid
self.auth_instance_biz_uuid = auth_instance_biz_uuid
# 授权人个人信息
self.authed_person_individual_info = authed_person_individual_info
def validate(self):
self.validate_required(self.auth_instance_biz_uuid, 'auth_instance_biz_uuid')
self.validate_required(self.authed_person_individual_info, 'authed_person_individual_info')
if self.authed_person_individual_info:
self.authed_person_individual_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.auth_instance_biz_uuid is not None:
result['auth_instance_biz_uuid'] = self.auth_instance_biz_uuid
if self.authed_person_individual_info is not None:
result['authed_person_individual_info'] = self.authed_person_individual_info.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('auth_instance_biz_uuid') is not None:
self.auth_instance_biz_uuid = m.get('auth_instance_biz_uuid')
if m.get('authed_person_individual_info') is not None:
temp_model = AuthPersonIndividualInfo()
self.authed_person_individual_info = temp_model.from_map(m['authed_person_individual_info'])
return self
class GetDasIndividualvcResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
vc: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# VC
self.vc = vc
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.vc is not None:
result['vc'] = self.vc
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('vc') is not None:
self.vc = m.get('vc')
return self
class SendDasSmsRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
receive_phone_num: int = None,
auth_instance_biz_uuid: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 接收方电话号码
self.receive_phone_num = receive_phone_num
# 授权实例biz_uuid
self.auth_instance_biz_uuid = auth_instance_biz_uuid
def validate(self):
self.validate_required(self.receive_phone_num, 'receive_phone_num')
self.validate_required(self.auth_instance_biz_uuid, 'auth_instance_biz_uuid')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.receive_phone_num is not None:
result['receive_phone_num'] = self.receive_phone_num
if self.auth_instance_biz_uuid is not None:
result['auth_instance_biz_uuid'] = self.auth_instance_biz_uuid
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('receive_phone_num') | |
<reponame>IFCA/keystoneauth-oidc
# coding=utf-8
# Copyright 2016 Spanish National Research Council
# Copyright 2016 INDIGO-DataCloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pkce
import socket
import webbrowser
from keystoneauth1 import _utils as utils
from keystoneauth1 import access
from keystoneauth1.identity.v3 import federation
from keystoneauth1.identity.v3 import oidc
from positional import positional
from six.moves import BaseHTTPServer
from six.moves import urllib
from six.moves.urllib import parse as urlparse
from keystoneauth_oidc import exceptions
_logger = utils.get_logger(__name__)
class _ClientCallbackServer(BaseHTTPServer.HTTPServer):
"""HTTP server to handle the OpenID Connect callback to localhost.
This server will wait for a single request, storing the authorization code
obtained from the incoming request into the 'code' attribute.
"""
code = None
def server_bind(self):
"""Override original bind and set a timeout.
Authentication may fail and we could get stuck here forever, so this
method sets up a sane timeout.
"""
# NOTE(aloga): cannot call super here, as HTTPServer does not have
# object as an ancestor
BaseHTTPServer.HTTPServer.server_bind(self)
self.socket.settimeout(60)
class _ClientCallbackHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP request handler for the OpenID Connect redirect callback.
The OpenID Connect authorization code grant type is a redirection based
flow where the client needs to be capable of receiving incoming requests
(via redirection), where the access code will be obtained.
This class implements a request handler that will process a single request
and store the obtained code into the server's 'code' attribute
"""
def do_GET(self):
"""Handle a GET request and obtain an authorization code.
This method will process the query parameters and get an
authorization code from them, if any, storing it in the
server's `code` attribute.
"""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
b"<html><head><title>Authentication Status OK</title></head>"
b"<body><p>The authentication flow has been completed.</p>"
b"<p>You can close this window.</p>"
b"</body></html>")
parsed = urlparse.urlparse(self.path)
query = urlparse.parse_qs(parsed.query)
code = query.get("code", [None])[0]
state = query.get("state", [None])[0]
self.server.code = code
self.server.state = state
def log_message(self, format, *args):
"""Do not log messages to stdout."""
def _wait_for_code(redirect_host, redirect_port):
"""Spawn an HTTP server and wait for the auth code.
:param redirect_host: The hostname where the authorization request will
be redirected. This normally is localhost. This
indicates the hostname where the callback http
server will listen.
:type redirect_host: string
:param redirect_port: The port where the authorization request will
be redirected. This indicates the port where the
callback http server will bind to.
:type redirect_port: int
"""
server_address = (redirect_host, redirect_port)
try:
httpd = _ClientCallbackServer(server_address,
_ClientCallbackHandler)
except socket.error:
_logger.error("Cannot spawn the callback server on port "
"%s, please specify a different port." %
redirect_port)
raise
httpd.handle_request()
if httpd.code is not None:
return httpd.code, httpd.state
else:
raise exceptions.MissingOidcAuthorizationCode()
class OidcAuthorizationCode(oidc._OidcBase):
"""Implementation for OpenID Connect Authorization Code."""
grant_type = 'authorization_code'
@positional(4)
def __init__(self, auth_url, identity_provider, protocol, client_id,
client_secret=None,
access_token_endpoint=None,
authorization_endpoint=None,
discovery_endpoint=None,
access_token_type='access_token',
redirect_host="localhost", redirect_port=8080,
**kwargs):
"""The OpenID Authorization Code plugin expects the following.
:param redirect_host: The hostname where the authorization request will
be redirected. This normally is localhost. This
indicates the hostname where the callback http
server will listen.
:type redirect_host: string
:param redirect_port: The port where the authorization request will
be redirected. This indicates the port where the
callback http server will bind to.
:type redirect_port: int
"""
super(OidcAuthorizationCode, self).__init__(
auth_url=auth_url,
identity_provider=identity_provider,
protocol=protocol,
client_id=client_id,
client_secret=client_secret,
access_token_endpoint=access_token_endpoint,
discovery_endpoint=discovery_endpoint,
access_token_type=access_token_type,
**kwargs)
self.authorization_endpoint = authorization_endpoint
self.redirect_host = redirect_host
self.redirect_port = int(redirect_port)
self.redirect_uri = "http://%s:%s" % (self.redirect_host, self.redirect_port)
self.code_verifier = None
self.code_challenge = None
if client_secret in ['', None]:
self.code_verifier, self.code_challenge = pkce.generate_pkce_pair()
def _get_authorization_endpoint(self, session):
"""Get the "authorization_endpoint" for the OpenID Connect flow.
This method will return the correct authorization endpoint to be used.
If the user has explicitly passed an authoriation_token_endpoint to the
constructor that will be returned. If there is no explicit endpoint and
a discovery url is provided, it will try to get it from the discovery
document. If nothing is found, an exception will be raised.
:param session: a session object to send out HTTP requests.
:type session: keystoneauth1.session.Session
:return: the endpoint to use
:rtype: string or None if no endpoint is found
"""
if self.authorization_endpoint is not None:
return self.authorization_endpoint
discovery = self._get_discovery_document(session)
endpoint = discovery.get("authorization_endpoint")
if endpoint is None:
raise exceptions.OidcAuthorizationEndpointNotFound()
return endpoint
def _get_authorization_code(self, session):
"""Get an authorization code from the authorization endpoint.
:param session: a session object to send out HTTP requests.
:type session: keystoneauth1.session.Session
"""
payload = {"client_id": self.client_id,
"response_type": "code",
"scope": self.scope,
"redirect_uri": self.redirect_uri}
if self.code_challenge is not None:
payload.update({
'code_challenge': self.code_challenge,
'code_challenge_method': 'S256'
})
url = "%s?%s" % (self._get_authorization_endpoint(session),
urllib.parse.urlencode(payload))
webbrowser.open(url, new=1, autoraise=True)
code, _ = _wait_for_code(self.redirect_host, self.redirect_port)
return code
def _get_access_token(self, session, payload):
"""Exchange a variety of user supplied values for an access token.
:param session: a session object to send out HTTP requests.
:type session: keystoneauth1.session.Session
:param payload: a dict containing various OpenID Connect values, for
example::
{'grant_type': 'password', 'username': self.username,
'password': <PASSWORD>, 'scope': self.scope}
:type payload: dict
"""
access_token_endpoint = self._get_access_token_endpoint(session)
if self.code_verifier is not None:
client_auth = None
else:
client_auth = (self.client_id, self.client_secret)
op_response = session.post(access_token_endpoint,
requests_auth=client_auth,
data=payload,
authenticated=False)
access_token = op_response.json()[self.access_token_type]
return access_token
def get_payload(self, session):
"""Get an authorization grant for the "authorization_code" grant type.
:param session: a session object to send out HTTP requests.
:type session: keystoneauth1.session.Session
:returns: a python dictionary containing the payload to be exchanged
:rtype: dict
"""
code = self._get_authorization_code(session)
payload = {'redirect_uri': self.redirect_uri, 'code': code,
'scope': self.scope}
if self.code_verifier is not None:
payload.update({
'client_id': self.client_id,
'code_verifier': self.code_verifier
})
return payload
class OpenIDConnect(federation.FederationBaseAuth):
"""Implementation for OpenID Connect authentication."""
@positional(3)
def __init__(self, auth_url, identity_provider, protocol,
redirect_host="localhost", redirect_port=8080,
**kwargs):
"""The OpenID Connect plugin expects the following arguments.
:param redirect_host: The hostname where the authorization request will
be redirected. This normally is localhost. This
indicates the hostname where the callback http
server will listen.
:type redirect_host: string
:param redirect_port: The port where the authorization request will
be redirected. This indicates the port where the
callback http server will bind to.
:type redirect_port: int
"""
super(OpenIDConnect, self).__init__(
auth_url=auth_url,
identity_provider=identity_provider,
protocol=protocol,
**kwargs)
self.redirect_host = redirect_host
self.redirect_port = int(redirect_port)
self.redirect_uri = "http://%s:%s" % (self.redirect_host,
self.redirect_port)
def _get_keystone_token(self, session):
# We initiate the auth request to Keystone. We indicate the oscli=1
# query param so as to start and out-of-bound authentication.
auth_response = session.post(self.federated_token_url + "?oscli=1",
redirect=False,
authenticated=False)
# Keystone will return a 302 redirect. We need to point the user to
# that URL so that the auth request is authorized.
redirect_url = auth_response.headers["location"]
webbrowser.open(redirect_url, new=1, autoraise=True)
code, state = _wait_for_code(self.redirect_host, self.redirect_port)
# Now that we have the code and state, we can finish the token request
# by sending them back to the Keystone auth endpoing, finishing the
# out-of-bound authentication
params = {
"code": code,
"state": state,
"oscli": 1,
}
auth_response = session.get(self.federated_token_url,
params=params,
authenticated=False)
return auth_response
def get_unscoped_auth_ref(self, session):
"""Authenticate with a Keystone server with OpenID Connect.
This plugin expects that the OpenID Connect Client is the Keystone
server, not the OpenStack or Keystone client. No OpenID credentials
need to be configured.
This plugin initiates an auth request to Keystone (to the federation
endpoint) with an special query parameter (oscli=1), indicating that
the OpenID Connect redirection should not be made to the Keystone
server configured URL, but to http://localhost:8080. This way we can
perform out-of-bound authentication as follows: When the Keystone
server returns the redirection, we intercept it and, instead of
following it, we open a web browser, so that the user can authorize the
request, spawning a web server on locahost:8080. After the user has
authorized the auth request, the web browser is redirected to
http://localhost:8080 where we can get the auth code, and send it back
to Keystone to complete the authentication.
:param session: a session object to send | |
"""A set of WT related Girder tasks."""
from datetime import datetime, timedelta
import os
import shutil
import socket
import json
import time
import tempfile
import docker
import requests
import subprocess
from docker.errors import DockerException
import girder_client
from dateutil.parser import parse
import logging
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from girder_worker.utils import girder_job
from girder_worker.app import app
# from girder_worker.plugins.docker.executor import _pull_image
from .utils import \
HOSTDIR, REGISTRY_USER, REGISTRY_PASS, \
new_user, _safe_mkdir, _get_api_key, \
_get_container_config, _launch_container, _get_user_and_instance, \
_build_image, _recorded_run, DEPLOYMENT
from .lib.dataone.publish import DataONEPublishProvider
from .lib.zenodo import ZenodoPublishProvider
from .constants import GIRDER_API_URL, InstanceStatus, ENABLE_WORKSPACES, \
DEFAULT_USER, DEFAULT_GROUP, MOUNTPOINTS, REPO2DOCKER_VERSION, TaleStatus, \
RunStatus
CREATE_VOLUME_STEP_TOTAL = 2
LAUNCH_CONTAINER_STEP_TOTAL = 2
UPDATE_CONTAINER_STEP_TOTAL = 2
BUILD_TALE_IMAGE_STEP_TOTAL = 2
IMPORT_TALE_STEP_TOTAL = 2
RECORDED_RUN_STEP_TOTAL = 4
@girder_job(title='Create Tale Data Volume')
@app.task(bind=True)
def create_volume(self, instance_id):
"""Create a mountpoint and compose WT-fs."""
user, instance = _get_user_and_instance(self.girder_client, instance_id)
tale = self.girder_client.get('/tale/{taleId}'.format(**instance))
vol_name = "%s_%s_%s" % (tale['_id'], user['login'], new_user(6))
cli = docker.from_env(version='1.28')
self.job_manager.updateProgress(
message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,
current=1, forceFlush=True)
mountpoint = _create_docker_volume(cli, vol_name)
homeDir = self.girder_client.loadOrCreateFolder(
'Home', user['_id'], 'user')
data_dir = os.path.join(mountpoint, 'data')
versions_dir = os.path.join(mountpoint, 'versions')
runs_dir = os.path.join(mountpoint, 'runs')
if ENABLE_WORKSPACES:
work_dir = os.path.join(mountpoint, 'workspace')
_make_fuse_dirs(mountpoint, MOUNTPOINTS)
api_key = _get_api_key(self.girder_client)
session = _get_session(self.girder_client, tale=tale)
if session['_id'] is not None:
_mount_girderfs(mountpoint, 'data', 'wt_dms', session['_id'], api_key, hostns=True)
# webdav relies on mount.c module, don't use hostns for now
_mount_girderfs(mountpoint, 'home', 'wt_home', homeDir['_id'], api_key)
if ENABLE_WORKSPACES:
_mount_girderfs(mountpoint, 'workspace', 'wt_work', tale['_id'], api_key)
_mount_girderfs(mountpoint, 'versions', 'wt_versions', tale['_id'], api_key, hostns=True)
_mount_girderfs(mountpoint, 'runs', 'wt_runs', tale['_id'], api_key, hostns=True)
self.job_manager.updateProgress(
message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,
current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)
print("WT Filesystem created successfully.")
return dict(
nodeId=cli.info()['Swarm']['NodeID'],
mountPoint=mountpoint,
volumeName=vol_name,
sessionId=session['_id'],
instanceId=instance_id,
)
@girder_job(title='Spawn Instance')
@app.task(bind=True)
def launch_container(self, payload):
"""Launch a container using a Tale object."""
user, instance = _get_user_and_instance(
self.girder_client, payload['instanceId'])
tale = self.girder_client.get('/tale/{taleId}'.format(**instance))
self.job_manager.updateProgress(
message='Starting container', total=LAUNCH_CONTAINER_STEP_TOTAL,
current=1, forceFlush=True)
print("Launching container for a Tale...")
if 'imageInfo' not in tale:
# Wait for image to be built
tic = time.time()
timeout = 180.0
time_interval = 5
while time.time() - tic < timeout:
tale = self.girder_client.get('/tale/{taleId}'.format(**instance))
if 'imageInfo' in tale and 'digest' in tale['imageInfo']:
break
msg = f"Waiting for image build to complete. ({time_interval}s)"
logging.info(msg)
print(msg)
time.sleep(5)
# _pull_image() #FIXME
container_config = _get_container_config(self.girder_client, tale)
service, attrs = _launch_container(
payload['volumeName'], payload['nodeId'],
container_config,
tale_id=tale['_id'], instance_id=payload['instanceId'])
print(
f"Started a container using volume: {payload['volumeName']} "
f"on node: {payload['nodeId']}"
)
# wait until task is started
tic = time.time()
timeout = 300.0
started = False
print("Waiting for the environment to be accessible...")
while time.time() - tic < timeout:
try:
status = service.tasks()[0]['Status']
if status['State'] in {"failed", "rejected"}:
raise ValueError("Failed to start environment: %s" % status['Err'])
elif status['State'] == "running":
started = True
break
except IndexError:
started = False
time.sleep(0.2)
if not started:
raise ValueError("Tale did not start before timeout exceeded")
print("Environment is up and running.")
self.job_manager.updateProgress(
message='Container started', total=LAUNCH_CONTAINER_STEP_TOTAL,
current=LAUNCH_CONTAINER_STEP_TOTAL, forceFlush=True)
payload.update(attrs)
payload['name'] = service.name
return payload
@girder_job(title='Update Instance')
@app.task(bind=True)
def update_container(task, instanceId, digest=None):
user, instance = _get_user_and_instance(task.girder_client, instanceId)
cli = docker.from_env(version='1.28')
if 'containerInfo' not in instance:
return
containerInfo = instance['containerInfo'] # VALIDATE
try:
service = cli.services.get(containerInfo['name'])
except docker.errors.NotFound:
logging.info("Service not present [%s].", containerInfo['name'])
return
task.job_manager.updateProgress(
message='Restarting the Tale with a new image',
total=UPDATE_CONTAINER_STEP_TOTAL,
current=1, forceFlush=True)
# Don't try to restart if the image hasn't changed
try:
previous_image = service.attrs['Spec']['TaskTemplate']['ContainerSpec']['Image']
except KeyError:
previous_image = ''
if (previous_image == digest):
task.job_manager.updateProgress(
message='Image has not changed',
total=UPDATE_CONTAINER_STEP_TOTAL,
current=UPDATE_CONTAINER_STEP_TOTAL)
return {'image_digest': digest}
try:
# NOTE: Only "image" passed currently, but this can be easily extended
logging.info("Restarting container [%s].", service.name)
service.update(image=digest)
logging.info("Restart command has been sent to Container [%s].",
service.name)
except Exception as e:
logging.error("Unable to send restart command to container [%s]: %s",
service.id, e)
updated = False
expired = False
timeout = datetime.now() + timedelta(minutes=3)
while not (updated or expired or task.canceled):
service = cli.services.get(containerInfo['name'])
try:
state = service.attrs['UpdateStatus']['State']
except KeyError:
state = ''
if state == 'paused':
raise RuntimeError(
'Restarting the Tale failed with "{}"'.format(
service.attrs['UpdateStatus']['Message'])
)
updated = state == 'completed'
expired = datetime.now() > timeout
time.sleep(1.0)
if task.canceled:
raise RuntimeError('Tale restart cancelled')
elif expired:
raise RuntimeError('Tale update timed out')
task.job_manager.updateProgress(
message='Tale restarted with the new image',
total=UPDATE_CONTAINER_STEP_TOTAL,
current=UPDATE_CONTAINER_STEP_TOTAL)
return {'image_digest': digest}
@girder_job(title='Shutdown Instance')
@app.task(bind=True)
def shutdown_container(self, instanceId):
"""Shutdown a running Tale."""
user, instance = _get_user_and_instance(self.girder_client, instanceId)
cli = docker.from_env(version='1.28')
if 'containerInfo' not in instance:
return
containerInfo = instance['containerInfo'] # VALIDATE
try:
service = cli.services.get(containerInfo['name'])
except docker.errors.NotFound:
logging.info("Service not present [%s].",
containerInfo['name'])
return
try:
logging.info("Releasing container [%s].", service.name)
service.remove()
logging.info("Container [%s] has been released.", service.name)
except Exception as e:
logging.error("Unable to release container [%s]: %s", service.id, e)
@girder_job(title='Remove Tale Data Volume')
@app.task(bind=True)
def remove_volume(self, instanceId):
"""Unmount WT-fs and remove mountpoint."""
user, instance = _get_user_and_instance(self.girder_client, instanceId)
if 'containerInfo' not in instance:
return
containerInfo = instance['containerInfo'] # VALIDATE
cli = docker.from_env(version='1.28')
# TODO: _remove_volumes()
for suffix in MOUNTPOINTS:
dest = os.path.join(containerInfo['mountPoint'], suffix)
logging.info("Unmounting %s", dest)
subprocess.call("umount %s" % dest, shell=True)
logging.info("Unmounting licenses")
subprocess.call("umount /licenses", shell=True)
try:
self.girder_client.delete('/dm/session/{sessionId}'.format(**instance))
except Exception as e:
logging.error("Unable to remove session. %s", e)
pass
try:
volume = cli.volumes.get(containerInfo['volumeName'])
except docker.errors.NotFound:
logging.info("Volume not present [%s].", containerInfo['volumeName'])
return
try:
logging.info("Removing volume: %s", volume.id)
volume.remove()
except Exception as e:
logging.error("Unable to remove volume [%s]: %s", volume.id, e)
pass
@girder_job(title='Build Tale Image')
@app.task(bind=True)
def build_tale_image(task, tale_id, force=False):
"""
Build docker image from Tale workspace using repo2docker and push to Whole Tale registry.
"""
logging.info('Building image for Tale %s', tale_id)
task.job_manager.updateProgress(
message='Building image', total=BUILD_TALE_IMAGE_STEP_TOTAL,
current=1, forceFlush=True)
tic = time.time()
tale = task.girder_client.get('/tale/%s' % tale_id)
while tale["status"] != TaleStatus.READY:
time.sleep(2)
tale = task.girder_client.get('/tale/{_id}'.format(**tale))
if tale["status"] == TaleStatus.ERROR:
raise ValueError("Cannot build image for a Tale in error state.")
if time.time() - tic > 5 * 60.0:
raise ValueError("Cannot build image. Tale preparing for more than 5 minutes.")
last_build_time = -1
try:
last_build_time = tale['imageInfo']['last_build']
except KeyError:
pass
logging.info('Last build time {}'.format(last_build_time))
image_changed = tale["imageId"] != tale["imageInfo"].get("imageId")
if image_changed:
logging.info("Base image has changed. Forcing rebuild.")
force = True
# TODO: Move this check to the model?
# Only rebuild if files have changed since last build or base image was changed
if last_build_time > 0:
workspace_folder = task.girder_client.get('/folder/{workspaceId}'.format(**tale))
workspace_mtime = int(parse(workspace_folder['updated']).strftime('%s'))
if not force and last_build_time > 0 and workspace_mtime < last_build_time:
print('Workspace not modified since last build. Skipping.')
task.job_manager.updateProgress(
message='Workspace not modified, no need to build', total=BUILD_TALE_IMAGE_STEP_TOTAL,
current=BUILD_TALE_IMAGE_STEP_TOTAL, forceFlush=True)
return {
'image_digest': tale['imageInfo']['digest'],
'repo2docker_version': tale['imageInfo']['repo2docker_version'],
'last_build': last_build_time
}
# Workspace modified so try to build.
try:
temp_dir = tempfile.mkdtemp(dir=HOSTDIR + '/tmp')
logging.info('Copying workspace contents to %s (%s)', temp_dir, tale_id)
workspace = task.girder_client.get('/folder/{workspaceId}'.format(**tale))
task.girder_client.downloadFolderRecursive(workspace['_id'], temp_dir)
except Exception as e:
raise ValueError('Error accessing Girder: {}'.format(e))
except KeyError:
logging.info('KeyError')
pass # no workspace folderId
except girder_client.HttpError:
logging.warn("Workspace folder not found for tale: %s", tale_id)
pass
cli = docker.from_env(version='1.28')
container_config = _get_container_config(task.girder_client, tale)
# Ensure that we have proper version of r2d
try:
cli.images.pull(container_config.repo2docker_version)
except docker.errors.NotFound:
raise ValueError(
f"Requested r2d image '{container_config.repo2docker_version}' not found."
)
cli.login(username=REGISTRY_USER, password=<PASSWORD>,
registry=DEPLOYMENT.registry_url)
# Use the current time as the image build time and tag
build_time = int(time.time())
tag = '{}/{}/{}'.format(urlparse(DEPLOYMENT.registry_url).netloc,
tale_id, str(build_time))
# Image is required for config information
image = task.girder_client.get('/image/%s' % tale['imageId'])
# Write the environment.json to the workspace
with open(os.path.join(temp_dir, 'environment.json'), 'w') as fp:
json.dump(image, fp)
# Build the image from the workspace
ret = _build_image(
cli, tale_id, image, tag, temp_dir, container_config.repo2docker_version
)
# Remove the temporary directory whether the build succeeded or not
shutil.rmtree(temp_dir, ignore_errors=True)
if ret['StatusCode'] != 0:
# repo2docker build failed
raise ValueError('Error building tale {}'.format(tale_id))
# If the repo2docker build succeeded, push the image to our registry
apicli = docker.APIClient(base_url='unix://var/run/docker.sock')
apicli.login(username=REGISTRY_USER, password=<PASSWORD>,
registry=DEPLOYMENT.registry_url)
# remove clone
shutil.rmtree(temp_dir, ignore_errors=True)
for line in apicli.push(tag, stream=True):
print(line.decode('utf-8'))
# TODO: if push succeeded, delete old image?
# Get the built image digest
image = cli.images.get(tag)
digest = next((_ for _ in image.attrs['RepoDigests']
if _.startswith(urlparse(DEPLOYMENT.registry_url).netloc)), None)
task.job_manager.updateProgress(
message='Image build succeeded', total=BUILD_TALE_IMAGE_STEP_TOTAL,
current=BUILD_TALE_IMAGE_STEP_TOTAL, forceFlush=True)
logging.info('Successfully built image %s' % image.attrs['RepoDigests'][0])
# Image digest used by updateBuildStatus handler
return {
'image_digest': digest,
'repo2docker_version': container_config.repo2docker_version,
'last_build': build_time
}
@girder_job(title='Publish Tale')
@app.task(bind=True)
def publish(self,
tale_id,
token,
version_id,
repository=None,
draft=False):
"""
Publish a tale.
:param tale_id: The tale id
:param token: An access token for a given repository.
:param version_id: The version of the Tale being published
:param repository: Target repository.
:param draft: If True, don't mint DOI.
:type tale_id: str
| |
<gh_stars>0
import functools
import itertools
from dataclasses import dataclass
from operator import mul
from typing import Callable, Tuple, List, Optional, Union, Sequence
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from scipy.linalg import logm, pinv, eigh
import forest_benchmarking.distance_measures as dm
import forest_benchmarking.operator_estimation as est
from forest_benchmarking.superop_conversion import vec, unvec
from forest_benchmarking.utils import prepare_prod_sic_state, n_qubit_pauli_basis, partial_trace
from pyquil import Program
from pyquil.api import QuantumComputer
from pyquil.operator_estimation import ExperimentSetting, \
TomographyExperiment as PyQuilTomographyExperiment, ExperimentResult, SIC0, SIC1, SIC2, SIC3, \
plusX, minusX, plusY, minusY, plusZ, minusZ, TensorProductState, zeros_state
from pyquil.paulis import sI, sX, sY, sZ, PauliSum, PauliTerm, is_identity
from pyquil.unitary_tools import lifted_pauli, lifted_state_operator
MAXITER = "maxiter"
OPTIMAL = "optimal"
FRO = 'fro'
@dataclass
class TomographyExperiment:
"""
A description of tomography experiments, i.e. preparation then operations then measurements, but not
the results of experiments.
"""
in_ops: List[str]
"""The (optional) state preparation operations that precede execution of the `program`"""
program: Program
"""The pyquil Program to perform tomography on"""
out_ops: List[PauliTerm]
"""The output Pauli operators measured after the action (by conjugation in the Heisenberg picture) of the `program'
on the `in_op`"""
def _state_tomo_settings(qubits: Sequence[int]):
"""Yield settings over itertools.product(I, X, Y, Z).
Used as a helper function for generate_state_tomography_experiment
:param qubits: The qubits to tomographize.
"""
n_qubits = len(qubits)
for o_ops in itertools.product([sI, sX, sY, sZ], repeat=n_qubits):
o_op = functools.reduce(mul, (op(q) for op, q in zip(o_ops, qubits)), sI())
yield ExperimentSetting(
in_state=zeros_state(qubits),
out_operator=o_op,
)
def generate_state_tomography_experiment(program: Program, qubits: List[int]):
"""Generate a (pyQuil) TomographyExperiment containing the experimental settings required
to characterize a quantum state.
To collect data, try::
from pyquil.operator_estimation import measure_observables
results = list(measure_observables(qc=qc, tomo_experiment=experiment, n_shots=100_000))
:param program: The program to prepare a state to tomographize
:param qubits: The qubits to tomographize
"""
return PyQuilTomographyExperiment(settings=list(_state_tomo_settings(qubits)),
program=program, qubits=qubits)
def _sic_process_tomo_settings(qubits: Sequence[int]):
"""Yield settings over SIC basis cross I,X,Y,Z operators
Used as a helper function for generate_process_tomography_experiment
:param qubits: The qubits to tomographize.
"""
for in_sics in itertools.product([SIC0, SIC1, SIC2, SIC3], repeat=len(qubits)):
i_state = functools.reduce(mul, (state(q) for state, q in zip(in_sics, qubits)),
TensorProductState())
for o_ops in itertools.product([sI, sX, sY, sZ], repeat=len(qubits)):
o_op = functools.reduce(mul, (op(q) for op, q in zip(o_ops, qubits)), sI())
if is_identity(o_op):
continue
yield ExperimentSetting(
in_state=i_state,
out_operator=o_op,
)
def _pauli_process_tomo_settings(qubits):
"""Yield settings over +-XYZ basis cross I,X,Y,Z operators
Used as a helper function for generate_process_tomography_experiment
:param qubits: The qubits to tomographize.
"""
for states in itertools.product([plusX, minusX, plusY, minusY, plusZ, minusZ],
repeat=len(qubits)):
i_state = functools.reduce(mul, (state(q) for state, q in zip(states, qubits)),
TensorProductState())
for o_ops in itertools.product([sI, sX, sY, sZ], repeat=len(qubits)):
o_op = functools.reduce(mul, (op(q) for op, q in zip(o_ops, qubits)), sI())
if is_identity(o_op):
continue
yield ExperimentSetting(
in_state=i_state,
out_operator=o_op,
)
def generate_process_tomography_experiment(program: Program, qubits: List[int], in_basis='sic'):
"""
Generate a (pyQuil) TomographyExperiment containing the experiment settings required to
characterize a quantum process.
To collect data, try::
from pyquil.operator_estimation import measure_observables
results = list(measure_observables(qc=qc, tomo_experiment=experiment, n_shots=100_000))
:param program: The program describing the process to tomographize.
:param qubits: The qubits to tomographize.
:param in_basis: A string identifying the input basis. Either "sic" or "pauli". SIC requires
a smaller number of experiment settings to be run.
"""
if in_basis == 'sic':
func = _sic_process_tomo_settings
elif in_basis == 'pauli':
func = _pauli_process_tomo_settings
else:
raise ValueError(f"Unknown basis {in_basis}")
return PyQuilTomographyExperiment(settings=list(func(qubits)), program=program, qubits=qubits)
@dataclass
class TomographyData:
"""Experimental data from a tomography experiment"""
in_ops: Optional[List[str]]
"""The (optional) state preparation operations that precede execution of the `program`"""
program: Program
"""The pyquil Program to perform tomography on"""
out_ops: List[PauliTerm]
"""The output Pauli operators measured after the action (by conjugation in the Heisenberg picture) of the `program'
on the `in_op`"""
dimension: int
"""Dimension of the Hilbert space"""
number_qubits: int
"""number of qubits"""
expectations: List[float]
"""expectation values as reported from the QPU"""
variances: List[float]
"""variances associated with the `expectation`"""
counts: List[int]
"""number of shots used to calculate the `expectation`"""
def shim_pyquil_results_to_TomographyData(program, qubits, results: List[ExperimentResult]):
return TomographyData(
in_ops=[r.setting.in_operator for r in results[1:]],
out_ops=[r.setting.out_operator for r in results[1:]],
expectations=[r.expectation for r in results[1:]],
variances=[r.stddev ** 2 for r in results[1:]],
program=program,
number_qubits=len(qubits),
dimension=2 ** len(qubits),
counts=[r.total_counts for r in results[1:]],
)
def acquire_tomography_data(experiment: TomographyExperiment, qc: QuantumComputer, var: float = 0.01,
symmetrize=False) -> TomographyData:
"""
Acquire tomographic data used to estimate a quantum state or process. If the experiment has no input operators
then state tomography is assumed.
:param symmetrize: dictates whether to symmetrize readout when estimating the Pauli expectations.
:param experiment: TomographyExperiment for the desired state or process
:param qc: quantum device used to collect data
:param float var: maximum tolerable variance per observable
:return: The "TomographyData" corresponding to the TomographyExperiment
"""
# get qubit information
qubits = experiment.program.get_qubits()
n_qubits = len(qubits)
dimension = 2 ** len(qubits)
expectations = []
variances = []
counts = []
if experiment.in_ops is None:
# state tomography
for op in experiment.out_ops:
# data aqcuisition
expectation, variance, count = est.estimate_locally_commuting_operator(experiment.program, PauliSum([op]),
var, qc, symmetrize=symmetrize)
expectations.append(np.real(expectation[0]))
variances.append(variance[0, 0].real)
counts.append(count)
else:
# process tomography
for in_op in experiment.in_ops:
for op in experiment.out_ops:
# data aqcuisition
tot_prog = prepare_prod_sic_state(in_op) + experiment.program
expectation, variance, count = est.estimate_locally_commuting_operator(tot_prog, PauliSum([op]), var,
qc, symmetrize=symmetrize)
expectations.append(np.real(expectation[0]))
variances.append(variance[0, 0].real)
counts.append(count)
exp_data = TomographyData(
in_ops=experiment.in_ops,
program=experiment.program,
out_ops=experiment.out_ops,
dimension=dimension,
number_qubits=n_qubits,
expectations=expectations,
variances=variances,
counts=counts
)
return exp_data
@dataclass
class StateTomographyEstimate:
"""State estimate from tomography experiment"""
state_point_est: np.ndarray
"""A point estimate of the quantum state rho output from the program being tomographed"""
type: str
"""Type of estimator used e.g. 'linear inversion' or 'hedged_MLE'"""
beta: Optional[float]
"""The Hedging parameter"""
entropy: Optional[float]
"""The entropy penalty parameter"""
dilution: Optional[float]
"""A diluation parameter"""
loglike: Optional[float]
"""The log likelihood at the current estimate"""
@dataclass
class ProcessTomographyEstimate:
"""Process estimate from tomography experiment"""
process_choi_est: np.ndarray
"""A point estimate of the quantum process being tomographed represented as a choi matrix"""
type: str
"""Type of estimator used e.g. 'pgdb'"""
@dataclass
class TomographyEstimate:
"""State/Process estimate from tomography experiment"""
in_ops: Optional[List[str]]
"""The (optional) state preparation operations that precede execution of the `program`"""
program: Program
"""The pyquil Program to perform DFE on"""
out_ops: List[PauliTerm]
"""The output Pauli operators measured after the action (by conjugation in the Heisenberg picture) of the `program'
on the `in_op`"""
dimension: int
"""Dimension of the Hilbert space"""
number_qubits: int
"""number of qubits"""
expectations: List[float]
"""expectation values as reported from the QPU"""
variances: List[float]
"""variances associated with the `expectation`"""
estimate: Union[StateTomographyEstimate, ProcessTomographyEstimate]
"""State or process estimate from tomography experiment"""
def linear_inv_state_estimate(results: List[ExperimentResult],
qubits: List[int]) -> np.ndarray:
"""
Estimate a quantum state using linear inversion.
This is the simplest state tomography post processing. To use this function,
collect state tomography data with :py:func:`generate_state_tomography_experiment`
and :py:func:`~pyquil.operator_estimation.measure_observables`.
For more details on this post-processing technique,
see https://en.wikipedia.org/wiki/Quantum_tomography#Linear_inversion or
see section 3.4 of
Initialization and characterization of open quantum systems
<NAME>, PhD thesis from University of Waterloo, (2015).
http://hdl.handle.net/10012/9557
:param results: A tomographically complete list of results.
:param qubits: All qubits that were tomographized. This specifies the order in
which qubits will be kron'ed together.
:return: A point estimate of the quantum state rho.
"""
measurement_matrix = np.vstack([
vec(lifted_pauli(result.setting.out_operator, qubits=qubits)).T.conj()
for result in results
])
expectations = np.array([result.expectation for result in results])
rho = pinv(measurement_matrix) @ expectations
return unvec(rho)
def construct_projection_operators_on_n_qubits(num_qubits) -> List[np.ndarray]:
"""
"""
# Identity prop to the size of Hilbert space
IdH = np.eye(2 ** num_qubits, 2 ** num_qubits)
effects = []
for i, operator in enumerate(n_qubit_pauli_basis(num_qubits).ops):
if i == 0:
continue
# Might need to change for >1Q.
effects.append((IdH + operator) / 2)
effects.append((IdH - operator) / 2)
return effects
def iterative_mle_state_estimate(results: List[ExperimentResult], qubits: List[int], dilution=.005,
entropy_penalty=0.0, beta=0.0, tol=1e-9, maxiter=100_000) \
-> TomographyEstimate:
"""
Given tomography data, use one of three iterative algorithms to return an estimate of the
state.
"... [The iterative] algorithm is characterized by a very high convergence rate and features a
simple adaptive procedure that ensures likelihood increase in every iteration and
convergence to the maximum-likelihood state." [DIMLE1]
For MLE only option, set: entropy_penalty=0.0 and beta=0.0.
For MLE + maximum entropy, set: entropy_penalty=(non-zero) and beta=0.0.
For MLE + hedging, set: entropy_penalty=0.0 and beta=(non-zero).
The basic algorithm is due to
[DIMLE1] Diluted maximum-likelihood algorithm for quantum tomography
Řeháček et al., PRA 75, 042108 (2007)
https://doi.org/10.1103/PhysRevA.75.042108
with improvements from
[DIMLE2] Quantum-State Reconstruction by Maximizing Likelihood and Entropy
Teo | |
use_slave=False):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join, use_slave=use_slave)
def _instance_get_by_uuid(context, uuid, session=None,
columns_to_join=None, use_slave=False):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join,
use_slave=use_slave).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warn(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, session=None,
columns_to_join=None, use_slave=False):
query = model_query(context, models.Instance, session=session,
project_only=True, use_slave=use_slave).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances,
manual_joins=None, use_slave=False):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids,
use_slave=use_slave):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids,
use_slave=use_slave):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst.iteritems())
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
"""Separate manually joined columns from columns_to_join
If columns_to_join contains 'metadata', 'system_metadata', or
'pci_devices' those columns are removed from columns_to_join and added
to a manual_joins list to be used with the _instances_fill_metadata method.
The columns_to_join formal parameter is copied and not modified, the return
tuple has the modified columns_to_join list to be used with joinedload in
a model query.
:param:columns_to_join: List of columns to join in a model query.
:return: tuple of (manual_joins, columns_to_join)
"""
manual_joins = []
columns_to_join_new = copy.copy(columns_to_join)
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join_new:
columns_to_join_new.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join_new
@require_context
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query = model_query(context, models.Instance)
for column in columns_to_join_new:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None,
use_slave=False):
"""Return instances matching all filters sorted by the primary key.
See instance_get_all_by_filters_sort for more information.
"""
# Invoke the API with the multiple sort keys and directions using the
# single sort key/direction
return instance_get_all_by_filters_sort(context, filters, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
use_slave=use_slave,
sort_keys=[sort_key],
sort_dirs=[sort_dir])
@require_context
def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
columns_to_join=None, use_slave=False,
sort_keys=None, sort_dirs=None):
"""Return instances that match all filters sorted the the given keys.
Deleted instances will be returned by default, unless there's a filter that
says otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters::
| ['project_id', 'user_id', 'image_ref',
| 'vm_state', 'instance_type_id', 'uuid',
| 'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'::
| filters = {
| 'filter': [
| {'name': 'tag-key', 'value': '<metakey>'},
| {'name': 'tag-value', 'value': '<metaval>'},
| {'name': 'tag:<metakey>', 'value': '<metaval>'}
| ]
| }
Special keys are used to tweek the query further::
| 'changes-since' - only return instances updated after
| 'deleted' - only return (or exclude) deleted instances
| 'soft_deleted' - modify behavior of 'deleted' to either
| include or exclude instances whose
| vm_state is SOFT_DELETED.
A fourth type of filter (also using exact matching), filters
based on instance tags (not metadata tags). There are two types
of these tags:
`tag` -- One or more strings that will be used to filter results
in an AND expression.
`tag-any` -- One or more strings that will be used to filter results in
an OR expression.
Tags should be represented as list::
| filters = {
| 'tag': [some-tag, some-another-tag],
| 'tag-any: [some-any-tag, some-another-any-tag]
| }
"""
# NOTE(mriedem): If the limit is 0 there is no point in even going
# to the database since nothing is going to be returned anyway.
if limit == 0:
return []
sort_keys, sort_dirs = process_sort_params(sort_keys,
sort_dirs,
default_dir='desc')
if CONF.database.slave_connection == '':
use_slave = False
session = get_session(use_slave=use_slave)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query_prefix = session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
query_prefix = query_prefix.options(undefer(column))
else:
query_prefix = query_prefix.options(joinedload(column))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at >= changes_since)
deleted = False
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
deleted = filters.pop('deleted')
if deleted:
if filters.pop('soft_deleted', True):
delete = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(delete)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if 'tag' in filters:
tags = filters.pop('tag')
# We build a JOIN ladder expression for each tag, JOIN'ing
# the first tag to the instances table, and each subsequent
# tag to the last JOIN'd tags table
first_tag = tags.pop(0)
query_prefix = query_prefix.join(models.Instance.tags)
query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias,
models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag == tag)
if 'tag-any' in filters:
tags = filters.pop('tag-any')
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata']
# Filter the query
query_prefix = _exact_instance_filter(query_prefix,
filters, exact_match_filter_names)
query_prefix = _regex_instance_filter(query_prefix, filters)
query_prefix = _tag_instance_filter(context, query_prefix, filters)
# paginate query
if marker is not None:
try:
if deleted:
marker = _instance_get_by_uuid(
context.elevated(read_deleted='yes'), marker,
session=session)
else:
marker = _instance_get_by_uuid(context,
marker, session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
try:
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
sort_keys,
marker=marker,
sort_dirs=sort_dirs)
except db_exc.InvalidSortKey:
raise exception.InvalidSortKey()
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def _tag_instance_filter(context, query, filters):
"""Applies tag filtering to an Instance query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param context: request context object
:param query: query to apply filters to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
model = models.Instance
model_metadata = models.InstanceMetadata
model_uuid = model_metadata.instance_uuid
| |
<filename>nnef_tools/io/tensorflow/tf_py/tf_py_definitions.py
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import typing
from collections import OrderedDict
import six
import tensorflow as tf
from nnef_tools.core import utils
from nnef_tools.io.tensorflow.tf_graph import *
def _getattr(module, *names):
for name in names:
if hasattr(module, name):
return getattr(module, name)
_name = "_" + name
if hasattr(module, _name):
return getattr(module, _name)
return None
class ArgProto(object):
def __init__(self, arg_names, is_tensor, is_array, is_optional):
self.arg_names = arg_names
self.is_tensor = is_tensor
self.is_array = is_array
self.is_optional = is_optional
@property
def primary_arg_name(self):
return self.arg_names[0]
def __repr__(self):
return "ArgProto({})".format(self.arg_names)
class OpProto(object):
def __init__(self, op_name, arg_protos):
self.op_name = op_name
self.arg_protos = arg_protos # type: typing.List[ArgProto]
def list_tensor_arg_protos(self):
return [a for a in self.arg_protos if a.is_tensor]
def list_nontensor_arg_protos(self):
return [a for a in self.arg_protos if not a.is_tensor]
def __repr__(self):
return "OpProto({})".format(self.op_name)
def parse_arg_proto(s):
is_tensor = False
is_optional = False
is_array = False
if s.endswith('?'):
is_optional = True
s = s[:-1]
if s.endswith('[]'):
is_array = True
s = s[:-2]
if ':' in s:
s, t = s.split(':', 1)
t = t.strip()
assert t == "T"
is_tensor = True
arg_names = list(t.strip() for t in s.split('/'))
assert all(utils.is_identifier(n) for n in arg_names), "{}".format(arg_names)
return ArgProto(arg_names=arg_names, is_tensor=is_tensor, is_array=is_array, is_optional=is_optional)
def parse_op_proto(s):
assert s and s[-1] == ')'
s = s[:-1]
op_name, args = s.split('(', 1)
arg_protos = []
for arg in args.split(','):
arg = arg.strip()
assert arg
arg_protos.append(parse_arg_proto(arg))
assert utils.is_identifier(op_name.replace('.', '_'))
return OpProto(op_name=op_name, arg_protos=arg_protos)
def args_from_tfop(op, op_proto, allow_missing=False):
# type: (TFOperation, OpProto, bool)->OrderedDict[str, typing.Any]
args = OrderedDict()
i_tensor = 0
for arg_proto in op_proto.arg_protos:
if arg_proto.is_tensor and arg_proto.is_array:
args[arg_proto.primary_arg_name] = []
while i_tensor < len(op.inputs):
args[arg_proto.primary_arg_name].append(op.inputs[i_tensor])
i_tensor += 1
elif arg_proto.is_tensor and not arg_proto.is_array:
assert i_tensor < len(op.inputs) or arg_proto.is_optional
if i_tensor < len(op.inputs):
args[arg_proto.primary_arg_name] = op.inputs[i_tensor]
i_tensor += 1
else:
if not ((allow_missing or arg_proto.is_optional) and arg_proto.primary_arg_name not in op.attribs):
args[arg_proto.primary_arg_name] = op.attribs[arg_proto.primary_arg_name]
return args
class TraceableFunction(object):
def __init__(self, proto, fun):
# type: (typing.Union[str, OpProto], typing.Union[typing.Callable, typing.List[typing.Callable]])->None
funs = list(fun) if isinstance(fun, (list, tuple)) else [fun]
funs = [f for f in funs if f is not None]
self.op_proto = parse_op_proto(proto) if not isinstance(proto, OpProto) else proto
self.functions = funs
def __repr__(self):
return "TraceableFunction({})".format(self.op_proto.op_name)
class _Functions(object):
def __init__(self):
self.sinh = _getattr(tf, "sinh")
self.cosh = _getattr(tf, "cosh")
self.leaky_relu = _getattr(tf.nn, "leaky_relu")
class _InternalFunctions(object):
def __init__(self):
from tensorflow.python.ops import gen_array_ops as tf_gen_array_ops
from tensorflow.python.ops import array_grad as tf_array_grad
from tensorflow.python.ops import gen_math_ops as tf_gen_math_ops
from tensorflow.python.ops import math_grad as tf_math_grad
from tensorflow.python.ops import gen_nn_ops as tf_gen_nn_ops
from tensorflow.python.ops import gen_image_ops as tf_gen_image_ops
from tensorflow.python.ops import variables as tf_variables
self.RefVariable = _getattr(tf_variables, "RefVariable")
self.add = _getattr(tf_gen_math_ops, "add")
self.div = _getattr(tf_gen_math_ops, "div")
self.pow = _getattr(tf_gen_math_ops, "pow")
self.logical_and = _getattr(tf_gen_math_ops, "logical_and")
self.logical_or = _getattr(tf_gen_math_ops, "logical_or")
self.reciprocal = _getattr(tf_gen_math_ops, "reciprocal")
self.logical_not = _getattr(tf_gen_math_ops, "logical_not")
self.abs = _getattr(tf_gen_math_ops, "abs")
self.sign = _getattr(tf_gen_math_ops, "sign")
self.exp = _getattr(tf_gen_math_ops, "exp")
self.log = _getattr(tf_gen_math_ops, "log")
self.square = _getattr(tf_gen_math_ops, "square")
self.floor = _getattr(tf_gen_math_ops, "floor")
self.ceil = _getattr(tf_gen_math_ops, "ceil")
self.round = _getattr(tf_gen_math_ops, "round")
self.greater = _getattr(tf_gen_math_ops, "greater")
self.greater_equal = _getattr(tf_gen_math_ops, "greater_equal")
self.less = _getattr(tf_gen_math_ops, "less")
self.less_equal = _getattr(tf_gen_math_ops, "less_equal")
self.equal = _getattr(tf_gen_math_ops, "equal")
self.not_equal = _getattr(tf_gen_math_ops, "not_equal")
self.sqrt = _getattr(tf_gen_math_ops, "sqrt")
self.rsqrt = _getattr(tf_gen_math_ops, "rsqrt")
self.range = _getattr(tf_gen_math_ops, "range")
self.rank = _getattr(tf_gen_array_ops, "rank")
self.conv3d_backprop_input_v2 = _getattr(tf_gen_nn_ops, "conv3d_backprop_input_v2")
self.fused_batch_norm = _getattr(tf_gen_nn_ops, "fused_batch_norm")
self.transpose = _getattr(tf_gen_array_ops, "transpose")
self.strided_slice_grad = _getattr(tf_gen_array_ops, "strided_slice_grad")
self.bias_add_grad = _getattr(tf_gen_nn_ops, "bias_add_grad")
self.fused_batch_norm_grad = _getattr(tf_gen_nn_ops, "fused_batch_norm_grad")
self.resize_nearest_neighbor_grad = _getattr(tf_gen_image_ops, "resize_nearest_neighbor_grad")
self.resize_bilinear_grad = _getattr(tf_gen_image_ops, "resize_bilinear_grad")
self.resize_bicubic_grad = _getattr(tf_gen_image_ops, "resize_bicubic_grad")
self.TransposeGrad = _getattr(tf_array_grad, "TransposeGrad")
self.MinOrMaxGrad = _getattr(tf_math_grad, "MinOrMaxGrad")
self.fused_batch_norm_grad_v2 = _getattr(tf_gen_nn_ops, "fused_batch_norm_grad_v2")
self.sub = _getattr(tf_gen_math_ops, "sub")
self.mul = _getattr(tf_gen_math_ops, "mul")
self.real_div = _getattr(tf_gen_math_ops, "real_div")
self.neg = _getattr(tf_gen_math_ops, "neg")
self.mat_mul = _getattr(tf_gen_math_ops, "mat_mul")
self.softmax = _getattr(tf_gen_nn_ops, "softmax")
self.concat_offset = _getattr(tf_gen_array_ops, "concat_offset")
self.fused_batch_norm_v2 = _getattr(tf_gen_nn_ops, "fused_batch_norm_v2")
self.max_pool_grad = _getattr(tf_gen_nn_ops, "max_pool_grad")
self.max_pool_grad_with_argmax = _getattr(tf_gen_nn_ops, "max_pool_grad_with_argmax")
self.avg_pool_grad = _getattr(tf_gen_nn_ops, "avg_pool_grad")
self.sqrt_grad = _getattr(tf_gen_math_ops, "sqrt_grad")
self.elu_grad = _getattr(tf_gen_nn_ops, "elu_grad")
self.relu_grad = _getattr(tf_gen_nn_ops, "relu_grad")
self.relu6_grad = _getattr(tf_gen_nn_ops, "relu6_grad")
self.softplus_grad = _getattr(tf_gen_nn_ops, "softplus_grad")
self.rsqrt_grad = _getattr(tf_gen_math_ops, "rsqrt_grad")
self.sigmoid_grad = _getattr(tf_gen_math_ops, "sigmoid_grad")
self.tanh_grad = _getattr(tf_gen_math_ops, "tanh_grad")
self.reciprocal_grad = _getattr(tf_gen_math_ops, "reciprocal_grad")
self.lrn_grad = _getattr(tf_gen_nn_ops, "lrn_grad")
self.mirror_pad_grad = _getattr(tf_gen_array_ops, "mirror_pad_grad")
self.broadcast_gradient_args = _getattr(tf_gen_array_ops, "broadcast_gradient_args")
tf_functions = _Functions()
tf_internal = _InternalFunctions()
DefaultTraceableFunctions = [
TraceableFunction("tf.gradients(xs:T[], ys:T[])", [tf.gradients]),
TraceableFunction("tf.constant(value, dtype, shape, name)", [tf.constant]),
TraceableFunction("tf.placeholder(shape, dtype, name)", [tf.placeholder]),
TraceableFunction("tf.get_variable(shape, dtype, name)", [tf.get_variable]),
TraceableFunction("tf.Variable(initial_value, dtype, name)", [tf.Variable, tf_internal.RefVariable]),
TraceableFunction("tf.assign(ref:T, value:T)", [tf.assign]),
TraceableFunction("tf.concat(values:T[], axis)", [tf.concat]),
TraceableFunction("tf.split(value:T, num_or_size_splits, axis)", [tf.split]),
TraceableFunction("tf.reshape(tensor:T, shape)", [tf.reshape]),
TraceableFunction("tf.squeeze(input:T, axis/squeeze_dims[])", [tf.squeeze]),
TraceableFunction("tf.expand_dims(input:T, axis/dim)", [tf.expand_dims]),
TraceableFunction("tf.transpose(a/x:T, perm)", [tf.transpose, tf_internal.transpose]),
TraceableFunction("tf.pad(tensor:T, paddings, mode, constant_values)", [tf.pad]),
TraceableFunction("tf.add(x:T, y:T)", [tf.add, tf_internal.add]),
TraceableFunction("tf.subtract(x:T, y:T)", [tf.subtract, tf_internal.sub]),
TraceableFunction("tf.multiply(x:T, y:T)", [tf.multiply, tf_internal.mul]),
TraceableFunction("tf.divide(x:T, y:T)", [tf.divide, tf_internal.div, tf_internal.real_div]),
TraceableFunction("tf.floor_div(x:T, y:T)", [tf.floor_div]),
TraceableFunction("tf.mod(x:T, y:T)", [tf.mod]),
TraceableFunction("tf.pow(x:T, y:T)", [tf.pow, tf_internal.pow]),
TraceableFunction("tf.logical_and(x:T, y:T)", [tf.logical_and, tf_internal.logical_and]),
TraceableFunction("tf.logical_or(x:T, y:T)", [tf.logical_or, tf_internal.logical_or]),
TraceableFunction("tf.greater(x:T, y:T)", [tf.greater, tf_internal.greater]),
TraceableFunction("tf.greater_equal(x:T, y:T)", [tf.greater_equal, tf_internal.greater_equal]),
TraceableFunction("tf.less(x:T, y:T)", [tf.less, tf_internal.less]),
TraceableFunction("tf.less_equal(x:T, y:T)", [tf.less_equal, tf_internal.less_equal]),
TraceableFunction("tf.equal(x:T, y:T)", [tf.equal, tf_internal.equal]),
TraceableFunction("tf.not_equal(x:T, y:T)", [tf.not_equal, tf_internal.not_equal]),
TraceableFunction("tf.squared_difference(x:T, y:T)", [tf.squared_difference]),
TraceableFunction("tf.minimum(x:T, y:T)", [tf.minimum]),
TraceableFunction("tf.maximum(x:T, y:T)", [tf.maximum]),
TraceableFunction("tf.reciprocal(x:T)", [tf.reciprocal, tf_internal.reciprocal]),
TraceableFunction("tf.negative(x:T)", [tf.negative, tf_internal.neg]),
TraceableFunction("tf.logical_not(x:T)", [tf.logical_not, tf_internal.logical_not]),
TraceableFunction("tf.abs(x:T)", [tf.abs, tf_internal.abs]),
TraceableFunction("tf.sign(x:T)", [tf.sign, tf_internal.sign]),
TraceableFunction("tf.exp(x:T)", [tf.exp, tf_internal.exp]),
TraceableFunction("tf.log(x:T)", [tf.log, tf_internal.log]),
TraceableFunction("tf.sqrt(x:T)", [tf.sqrt, tf_internal.sqrt]),
TraceableFunction("tf.rsqrt(x:T)", [tf.rsqrt, tf_internal.rsqrt]),
TraceableFunction("tf.square(x:T)", [tf.square, tf_internal.square]),
TraceableFunction("tf.floor(x:T)", [tf.floor, tf_internal.floor]),
TraceableFunction("tf.ceil(x:T)", [tf.ceil, tf_internal.ceil]),
TraceableFunction("tf.round(x:T)", [tf.round, tf_internal.round]),
# TraceableFunction("tf.sin(x:T)", [tf.sin]),
# TraceableFunction("tf.cos(x:T)", [tf.cos]),
TraceableFunction("tf.sinh(x:T)", [tf_functions.sinh]),
TraceableFunction("tf.cosh(x:T)", [tf_functions.cosh]),
TraceableFunction("tf.nn.sigmoid(x:T)", [tf.sigmoid, tf.nn.sigmoid]),
TraceableFunction("tf.nn.tanh(x:T)", [tf.tanh, tf.nn.tanh]),
TraceableFunction("tf.where(condition:T, x:T, y:T)", [tf.where]),
TraceableFunction("tf.reduce_sum(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_sum]),
TraceableFunction("tf.reduce_mean(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_mean]),
TraceableFunction("tf.reduce_max(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_max]),
TraceableFunction("tf.reduce_min(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_min]),
TraceableFunction("tf.reduce_any(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_any]),
TraceableFunction("tf.reduce_all(input_tensor:T, axis/reduction_indices[], keepdims/keep_dims)", [tf.reduce_all]),
TraceableFunction("tf.argmax(input:T, axis/dimension)", [tf.argmax]),
TraceableFunction("tf.argmin(input:T, axis/dimension)", [tf.argmin]),
TraceableFunction("tf.matmul(a:T, b:T, transpose_a, transpose_b, adjoint_a?, adjoint_b?)",
[tf.matmul, tf_internal.mat_mul]),
TraceableFunction("tf.add_n(inputs:T[])", [tf.add_n]),
TraceableFunction("tf.nn.elu(features:T)", [tf.nn.elu]),
TraceableFunction("tf.nn.relu(features:T)", [tf.nn.relu]),
TraceableFunction("tf.nn.relu6(features:T)", [tf.nn.relu6]),
TraceableFunction("tf.nn.softsign(features:T)", [tf.nn.softsign]),
TraceableFunction("tf.nn.softplus(features:T)", [tf.nn.softplus]),
TraceableFunction("tf.nn.leaky_relu(features:T, alpha:T)", [tf_functions.leaky_relu]),
TraceableFunction("tf.nn.conv1d(value:T, filters:T, stride[], padding, data_format?)", [tf.nn.conv1d]),
TraceableFunction("tf.nn.conv2d(input:T, filter:T, strides, padding, data_format?, dilations?)", [tf.nn.conv2d]),
TraceableFunction("tf.nn.conv3d(input:T, filter:T, strides, padding, data_format?, dilations?)", [tf.nn.conv3d]),
TraceableFunction("tf.nn.convolution(input:T, filter:T, padding, strides, dilation_rate, data_format?)",
[tf.nn.convolution]),
TraceableFunction("tf.nn.atrous_conv2d(value:T, filters:T, rate, padding)", [tf.nn.atrous_conv2d]),
TraceableFunction("tf.nn.conv2d_transpose(value:T, filter:T, output_shape, strides, padding, data_format?)",
[tf.nn.conv2d_transpose]),
TraceableFunction("tf.nn.conv3d_transpose(value:T, filter:T, output_shape, strides, padding, data_format?)",
[tf.nn.conv3d_transpose]),
TraceableFunction("tf.nn.atrous_conv2d_transpose(value:T, filters:T, output_shape, rate, padding)",
[tf.nn.atrous_conv2d_transpose]),
TraceableFunction("tf.nn.depthwise_conv2d(input:T, filter:T, strides, padding, rate, data_format?)",
[tf.nn.depthwise_conv2d]),
TraceableFunction("tf.nn.depthwise_conv2d_native(input:T, filter:T, strides, padding, data_format?, dilations?)",
[tf.nn.depthwise_conv2d_native]),
TraceableFunction("tf.nn.separable_conv2d(input:T, depthwise_filter:T, pointwise_filter:T, strides, padding, "
"rate, data_format?)", [tf.nn.separable_conv2d]),
TraceableFunction("tf.nn.conv2d_backprop_input(input_sizes, filter:T, out_backprop:T, strides, padding, "
"data_format?, dilations?)", [tf.nn.conv2d_backprop_input]),
TraceableFunction("tf.nn.depthwise_conv2d_native_backprop_input(input_sizes, filter:T, out_backprop:T, strides, "
"padding, data_format?, dilations?)", [tf.nn.depthwise_conv2d_native_backprop_input]),
TraceableFunction("tf.nn.conv2d_backprop_filter(input:T, filter_sizes, out_backprop:T, strides, padding, "
"data_format?, dilations?)", [tf.nn.conv2d_backprop_filter]),
TraceableFunction("tf.nn.conv3d_backprop_filter_v2(input:T, filter_sizes, out_backprop:T, strides, padding, "
"data_format?, dilations?)", [tf.nn.conv3d_backprop_filter_v2]),
TraceableFunction("tf.nn.depthwise_conv2d_native_backprop_filter(input:T, filter_sizes, out_backprop:T, strides, "
"padding, data_format?, dilations?)", [tf.nn.depthwise_conv2d_native_backprop_filter]),
TraceableFunction("tf.nn.max_pool(value:T, ksize, strides, padding, data_format?)", [tf.nn.max_pool]),
TraceableFunction("tf.nn.avg_pool(value:T, ksize, strides, padding, data_format?)", [tf.nn.avg_pool]),
TraceableFunction("tf.nn.max_pool_with_argmax(input:T, ksize, strides, padding, data_format?)",
[tf.nn.max_pool_with_argmax]),
TraceableFunction("tf.nn.bias_add(value:T, bias:T, data_format?)", [tf.nn.bias_add]),
TraceableFunction("tf.nn.lrn(input:T, depth_radius, bias, alpha, beta)", [tf.nn.lrn]),
TraceableFunction("tf.nn.batch_normalization(x:T, mean:T, variance:T, offset:T, scale:T, variance_epsilon)",
[tf.nn.batch_normalization]),
TraceableFunction("tf.nn.fused_batch_norm(x:T, scale:T, offset:T, mean:T?, variance:T?, epsilon, data_format?, "
"is_training)",
[tf.nn.fused_batch_norm, tf_internal.fused_batch_norm, tf_internal.fused_batch_norm_v2]),
TraceableFunction("tf.nn.l2_normalize(x:T, axis/dim[], epsilon)", [tf.nn.l2_normalize]),
TraceableFunction("tf.nn.softmax(logits:T, axis/dim?)",
[tf.nn.softmax, tf.contrib.layers.softmax, tf_internal.softmax]),
TraceableFunction("tf.nn.moments(x:T, axes[], keep_dims)", [tf.nn.moments]),
TraceableFunction("tf.image.resize_images(images:T, size, method, align_corners)", [tf.image.resize_images]),
TraceableFunction("tf.image.resize_bilinear(images:T, size, align_corners)", [tf.image.resize_bilinear]),
TraceableFunction("tf.image.resize_nearest_neighbor(images:T, size, align_corners)",
[tf.image.resize_nearest_neighbor]),
TraceableFunction("tf.image.resize_bicubic(images:T, size, align_corners)", [tf.image.resize_bicubic]),
TraceableFunction("tf.image.resize_area(images:T, size, align_corners)", [tf.image.resize_area]),
TraceableFunction("tf.layers.flatten(inputs:T)", [tf.layers.flatten, tf.contrib.layers.flatten]),
TraceableFunction("tf.clip_by_value(t:T, clip_value_min:T, clip_value_max:T)", [tf.clip_by_value]),
TraceableFunction("tf.slice(input_:T, begin, size)", [tf.slice]),
TraceableFunction("tf.strided_slice(input_:T, begin, end, strides, begin_mask, end_mask, "
"ellipsis_mask, new_axis_mask, shrink_axis_mask, var)", [tf.strided_slice]),
TraceableFunction("tf.stack(values:T[], axis)", [tf.stack]),
TraceableFunction("tf.unstack(value:T, num, axis)", [tf.unstack]),
TraceableFunction("tf.identity(input:T)", [tf.identity]),
TraceableFunction("tf.stop_gradient(input:T)", [tf.stop_gradient]),
TraceableFunction("tf.cast(x:T, dtype)", [tf.cast]),
TraceableFunction("tf.nn.dropout(x:T, keep_prob)", [tf.nn.dropout]),
TraceableFunction("tf.space_to_batch(input:T, paddings, block_size)", [tf.space_to_batch]),
TraceableFunction("tf.space_to_batch_nd(input:T, block_shape, paddings)", [tf.space_to_batch_nd]),
TraceableFunction("tf.batch_to_space(input:T, crops, block_size)", [tf.batch_to_space]),
TraceableFunction("tf.batch_to_space_nd(input:T, block_shape, crops)", [tf.batch_to_space_nd]),
TraceableFunction("tf.zeros(shape, dtype)", [tf.zeros]),
TraceableFunction("tf.ones(shape, dtype)", [tf.ones]),
TraceableFunction("tf.zeros_like(tensor:T, dtype)", [tf.zeros_like]),
TraceableFunction("tf.ones_like(tensor:T, dtype)", [tf.ones_like]),
TraceableFunction("tf.tile(input:T, multiples)", [tf.tile]),
TraceableFunction("tf.dynamic_stitch(indices, data)", [tf.dynamic_stitch]),
TraceableFunction("tf.range(start, limit, delta, dtype)", [tf.range, tf_internal.range]),
TraceableFunction("tf.rank(input:T)", [tf.rank, tf_internal.rank]),
TraceableFunction("tf.shape(input:T)", [tf.shape]),
TraceableFunction("tf.shape_n(input:T[])", [tf.shape_n]),
TraceableFunction("tf.invert_permutation(x:T)", [tf.invert_permutation]),
TraceableFunction("tf.fill(dims, value)", [tf.fill]),
TraceableFunction("tf.random_uniform(shape, minval, maxval, dtype, seed)", [tf.random_uniform]),
TraceableFunction("_tf.conv3d_backprop_input_v2(input_sizes, filter:T, out_backprop:T, strides, padding, "
"data_format?, dilations?)", [tf_internal.conv3d_backprop_input_v2]),
TraceableFunction("_tf.concat_offset(concat_dim, shape)", [tf_internal.concat_offset]),
TraceableFunction("_tf.broadcast_gradient_args(s0, s1)", [tf_internal.broadcast_gradient_args]),
TraceableFunction("_tf.sqrt_grad(y:T, dy:T)", [tf_internal.sqrt_grad]),
TraceableFunction("_tf.rsqrt_grad(y:T, dy:T)", [tf_internal.rsqrt_grad]),
TraceableFunction("_tf.sigmoid_grad(y:T, dy:T)", [tf_internal.sigmoid_grad]),
TraceableFunction("_tf.tanh_grad(y:T, dy:T)", [tf_internal.tanh_grad]),
TraceableFunction("_tf.reciprocal_grad(y:T, | |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from datetime import timedelta
from trac.perm import DefaultPermissionPolicy, DefaultPermissionStore,\
PermissionCache, PermissionSystem
from trac.test import EnvironmentStub, MockRequest
from trac.ticket import default_workflow, api, web_ui
from trac.ticket.batch import BatchModifyModule
from trac.ticket.model import Ticket
from trac.util.datefmt import datetime_now, utc
from trac.web.api import HTTPBadRequest, RequestDone
from trac.web.chrome import web_context
from trac.web.session import DetachedSession
class BatchModifyTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True,
enable=[default_workflow.ConfigurableTicketWorkflow,
DefaultPermissionPolicy, DefaultPermissionStore,
web_ui.TicketModule])
self.env.config.set('trac', 'permission_policies',
'DefaultPermissionPolicy')
self.req = MockRequest(self.env)
self.req.session = {}
self.req.perm = PermissionCache(self.env)
def assertCommentAdded(self, ticket_id, comment):
ticket = Ticket(self.env, int(ticket_id))
changes = ticket.get_changelog()
comment_change = [c for c in changes if c[2] == 'comment'][-1]
self.assertEqual(comment_change[2], comment)
def assertFieldChanged(self, ticket_id, field, new_value):
ticket = Ticket(self.env, int(ticket_id))
changes = ticket.get_changelog()
field_change = [c for c in changes if c[2] == field][-1]
self.assertEqual(field_change[4], new_value)
def _change_list_test_helper(self, original, new, new2, mode):
batch = BatchModifyModule(self.env)
return batch._change_list(original, new, new2, mode)
def _add_list_test_helper(self, original, to_add):
return self._change_list_test_helper(original, to_add, '', '+')
def _remove_list_test_helper(self, original, to_remove):
return self._change_list_test_helper(original, to_remove, '', '-')
def _add_remove_list_test_helper(self, original, to_add, to_remove):
return self._change_list_test_helper(original, to_add, to_remove,
'+-')
def _assign_list_test_helper(self, original, new):
return self._change_list_test_helper(original, new, '', '=')
def _insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def test_ignore_summary_reporter_and_description(self):
"""These cannot be added through the UI, but if somebody tries
to build their own POST data they will be ignored."""
batch = BatchModifyModule(self.env)
self.req.args = {
'batchmod_value_summary': 'test ticket',
'batchmod_value_reporter': 'anonymous',
'batchmod_value_description': 'synergize the widgets'
}
values = batch._get_new_ticket_values(self.req)
self.assertEqual(len(values), 0)
def test_add_batchmod_value_data_from_request(self):
batch = BatchModifyModule(self.env)
self.req.args = {'batchmod_value_milestone': 'milestone1'}
values = batch._get_new_ticket_values(self.req)
self.assertEqual(values['milestone'], 'milestone1')
def test_selected_tickets(self):
self.req.args = {'selected_tickets': '1,2,3'}
batch = BatchModifyModule(self.env)
selected_tickets = batch._get_selected_tickets(self.req)
self.assertEqual(selected_tickets, ['1', '2', '3'])
def test_no_selected_tickets(self):
"""If nothing is selected, the return value is the empty list."""
self.req.args = {'selected_tickets': ''}
batch = BatchModifyModule(self.env)
selected_tickets = batch._get_selected_tickets(self.req)
self.assertEqual(selected_tickets, [])
def test_require_post_method(self):
batch = BatchModifyModule(self.env)
req = MockRequest(self.env, method='GET', path_info='/batchmodify')
req.session['query_href'] = req.href.query()
self.assertTrue(batch.match_request(req))
self.assertRaises(HTTPBadRequest, batch.process_request, req)
req = MockRequest(self.env, method='POST', path_info='/batchmodify',
args={'selected_tickets': ''})
req.session['query_href'] = req.href.query()
self.assertTrue(batch.match_request(req))
self.assertRaises(RequestDone, batch.process_request, req)
def test_redirect_to_query_href_in_req_args(self):
redirect_listener_args = []
def redirect_listener(req, url, permanent):
redirect_listener_args[:] = (url, permanent)
batch = BatchModifyModule(self.env)
req = MockRequest(self.env, method='POST', path_info='/batchmodify')
query_opened_tickets = req.href.query(status='!closed')
query_default = req.href.query()
req.args = {'selected_tickets': '', 'query_href': query_opened_tickets}
req.session['query_href'] = query_default
req.add_redirect_listener(redirect_listener)
self.assertTrue(batch.match_request(req))
self.assertRaises(RequestDone, batch.process_request, req)
self.assertEqual([query_opened_tickets, False], redirect_listener_args)
# Assign list items
def test_change_list_replace_empty_with_single(self):
"""Replace empty field with single item."""
changed = self._assign_list_test_helper('', 'alice')
self.assertEqual(changed, 'alice')
def test_change_list_replace_empty_with_items(self):
"""Replace empty field with items."""
changed = self._assign_list_test_helper('', '<NAME>')
self.assertEqual(changed, 'alice, bob')
def test_change_list_replace_item(self):
"""Replace item with a different item."""
changed = self._assign_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'bob')
def test_change_list_replace_item_with_items(self):
"""Replace item with different items."""
changed = self._assign_list_test_helper('alice', '<NAME>')
self.assertEqual(changed, 'bob, carol')
def test_change_list_replace_items_with_item(self):
"""Replace items with a different item."""
changed = self._assign_list_test_helper('<NAME>', 'carol')
self.assertEqual(changed, 'carol')
def test_change_list_replace_items(self):
"""Replace items with different items."""
changed = self._assign_list_test_helper('alice, bob', '<NAME>')
self.assertEqual(changed, '<NAME>')
def test_change_list_replace_items_partial(self):
"""Replace items with different (or not) items."""
changed = self._assign_list_test_helper('alice, bob', '<NAME>')
self.assertEqual(changed, '<NAME>')
def test_change_list_clear(self):
"""Clear field."""
changed = self._assign_list_test_helper('<NAME>', '')
self.assertEqual(changed, '')
# Add / remove list items
def test_change_list_add_item(self):
"""Append additional item."""
changed = self._add_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'alice, bob')
def test_change_list_add_items(self):
"""Append additional items."""
changed = self._add_list_test_helper('alice, bob', '<NAME>')
self.assertEqual(changed, 'alice, bob, carol, dave')
def test_change_list_remove_item(self):
"""Remove existing item."""
changed = self._remove_list_test_helper('alice, bob', 'bob')
self.assertEqual(changed, 'alice')
def test_change_list_remove_items(self):
"""Remove existing items."""
changed = self._remove_list_test_helper('alice, bob, carol',
'alice, carol')
self.assertEqual(changed, 'bob')
def test_change_list_remove_idempotent(self):
"""Ignore missing item to be removed."""
changed = self._remove_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'alice')
def test_change_list_remove_mixed(self):
"""Ignore only missing item to be removed."""
changed = self._remove_list_test_helper('alice, bob', '<NAME>')
self.assertEqual(changed, 'alice')
def test_change_list_add_remove(self):
"""Remove existing item and append additional item."""
changed = self._add_remove_list_test_helper('alice, bob', 'carol',
'alice')
self.assertEqual(changed, 'bob, carol')
def test_change_list_add_no_duplicates(self):
"""Existing items are not duplicated."""
changed = self._add_list_test_helper('alice, bob', '<NAME>')
self.assertEqual(changed, 'alice, bob, carol')
def test_change_list_remove_all_duplicates(self):
"""Remove all duplicates."""
changed = self._remove_list_test_helper('alice, bob, alice', 'alice')
self.assertEqual(changed, 'bob')
# Save
def test_save_comment(self):
"""Comments are saved to all selected tickets."""
first_ticket_id = self._insert_ticket('Test 1', reporter='joe')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, 'comment',
'leave')
self.assertCommentAdded(first_ticket_id, 'comment')
self.assertCommentAdded(second_ticket_id, 'comment')
def test_save_values(self):
"""Changed values are saved to all tickets."""
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
component='foo')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
new_values = {'component': 'bar'}
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, new_values, '',
'leave')
self.assertFieldChanged(first_ticket_id, 'component', 'bar')
self.assertFieldChanged(second_ticket_id, 'component', 'bar')
def test_save_list_fields(self):
batch = BatchModifyModule(self.env)
with self.env.db_transaction:
ticket_ids = [
self._insert_ticket('Test 1', reporter='joe', keywords='foo'),
self._insert_ticket('Test 2', reporter='joe', keywords='baz'),
]
self.req.args = {'action': 'leave',
'batchmod_mode_keywords': '+', # add
'batchmod_primary_keywords': 'baz new',
'batchmod_secondary_keywords': '*****'}
batch._save_ticket_changes(self.req, ticket_ids, {}, '', 'leave')
self.assertFieldChanged(ticket_ids[0], 'keywords', 'foo, baz, new')
self.assertFieldChanged(ticket_ids[1], 'keywords', 'baz, new')
self.req.args = {'action': 'leave',
'batchmod_mode_keywords': '+-', # add / remove
'batchmod_primary_keywords': 'one two three',
'batchmod_secondary_keywords': 'baz missing'}
batch._save_ticket_changes(self.req, ticket_ids, {}, '', 'leave')
self.assertFieldChanged(ticket_ids[0], 'keywords',
'foo, new, one, two, three')
self.assertFieldChanged(ticket_ids[1], 'keywords',
'new, one, two, three')
self.req.args = {'action': 'leave',
'batchmod_mode_keywords': '-', # remove
'batchmod_primary_keywords': 'new two',
'batchmod_secondary_keywords': '*****'}
batch._save_ticket_changes(self.req, ticket_ids, {}, '', 'leave')
self.assertFieldChanged(ticket_ids[0], 'keywords', 'foo, one, three')
self.assertFieldChanged(ticket_ids[1], 'keywords', 'one, three')
self.req.args = {'action': 'leave',
'batchmod_mode_keywords': '=', # set
'batchmod_primary_keywords': 'orange',
'batchmod_secondary_keywords': '*****'}
batch._save_ticket_changes(self.req, ticket_ids, {}, '', 'leave')
self.assertFieldChanged(ticket_ids[0], 'keywords', 'orange')
self.assertFieldChanged(ticket_ids[1], 'keywords', 'orange')
def test_action_with_state_change(self):
"""Actions can have change status."""
self.env.config.set('ticket-workflow', 'embiggen', '* -> big')
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
status='small')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, '',
'embiggen')
self.assertFieldChanged(first_ticket_id, 'status', 'big')
self.assertFieldChanged(second_ticket_id, 'status', 'big')
def test_action_with_side_effects(self):
"""Actions can have operations with side effects."""
self.env.config.set('ticket-workflow', 'buckify', '* -> *')
self.env.config.set('ticket-workflow', 'buckify.operations',
'set_owner')
self.req.args = {'action_buckify_reassign_owner': 'buck'}
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
owner='foo')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, '',
'buckify')
self.assertFieldChanged(first_ticket_id, 'owner', 'buck')
self.assertFieldChanged(second_ticket_id, 'owner', 'buck')
def test_timeline_events(self):
"""Regression test for #11288"""
tktmod = web_ui.TicketModule(self.env)
now = datetime_now(utc)
start = now - timedelta(hours=1)
stop = now + timedelta(hours=1)
events = tktmod.get_timeline_events(self.req, start, stop,
['ticket_details'])
self.assertEqual(True, all(ev[0] != 'batchmodify' for ev in events))
prio_ids = {}
for i in xrange(20):
t = Ticket(self.env)
t['summary'] = 'Ticket %d' % i
t['priority'] = ('', 'minor', 'major', 'critical')[i % 4]
tktid = t.insert()
prio_ids.setdefault(t['priority'], []).append(tktid)
tktids = prio_ids['critical'] + prio_ids['major'] + \
prio_ids['minor'] + prio_ids['']
new_values = {'summary': 'batch updated ticket',
'owner': 'ticket11288', 'reporter': 'ticket11288'}
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, tktids, new_values, '', 'leave')
# shuffle ticket_change records
with self.env.db_transaction as db:
rows = db('SELECT * FROM ticket_change')
db.execute('DELETE FROM ticket_change')
rows = rows[0::4] + rows[1::4] + rows[2::4] + rows[3::4]
db.executemany('INSERT INTO ticket_change VALUES (%s)' %
','.join(('%s',) * len(rows[0])),
rows)
events = tktmod.get_timeline_events(self.req, start, stop,
['ticket_details'])
events = [ev for ev in events if ev[0] == 'batchmodify']
self.assertEqual(1, len(events))
batch_ev = events[0]
self.assertEqual('anonymous', batch_ev[2])
self.assertEqual(tktids, batch_ev[3][0])
self.assertEqual('updated', batch_ev[3][1])
context = web_context(self.req)
self.assertEqual(
self.req.href.query(id=','.join(str(t) for t in tktids)),
tktmod.render_timeline_event(context, 'url', batch_ev))
class ProcessRequestTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True, enable=[
default_workflow.ConfigurableTicketWorkflow,
DefaultPermissionPolicy, DefaultPermissionStore,
BatchModifyModule, api.TicketSystem, web_ui.TicketModule
])
self.env.config.set('trac', 'permission_policies',
'DefaultPermissionPolicy')
ps = PermissionSystem(self.env)
ps.grant_permission('has_ta_&_bm', 'TICKET_ADMIN')
ps.grant_permission('has_bm', 'TICKET_BATCH_MODIFY')
ps.grant_permission('has_ta_&_bm', 'TICKET_BATCH_MODIFY')
session = DetachedSession(self.env, 'has_ta_&_bm')
session.set('query_href', '')
session.save()
session = DetachedSession(self.env, 'has_bm')
session.set('query_href', '')
session.save()
def tearDown(self):
self.env.reset_db()
def assertFieldChanged(self, ticket_id, field, new_value):
ticket = Ticket(self.env, int(ticket_id))
self.assertEqual(ticket[field], new_value)
def _insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
ticket['summary'] = summary
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def test_modify_reporter_with_ticket_admin(self):
"""User with TICKET_ADMIN can batch modify the reporter."""
self._insert_ticket('Ticket 1', reporter='user1')
self._insert_ticket('Ticket 2', reporter='user1')
req = MockRequest(self.env, method='POST', authname='has_ta_&_bm',
args={
'batchmod_value_reporter': 'user2',
'batchmod_value_comment': '',
'action': 'leave',
'selected_tickets': '1,2',
})
bmm = BatchModifyModule(self.env)
self.assertRaises(RequestDone, bmm.process_request, req)
self.assertFieldChanged(1, 'reporter', 'user2')
self.assertFieldChanged(2, 'reporter', 'user2')
def test_modify_reporter_without_ticket_admin(self):
"""User without TICKET_ADMIN cannot batch modify the reporter."""
self._insert_ticket('Ticket | |
<reponame>luizschmall/tce_siconfi_inconsistencies
import pandas
import string
import math
import csv
import os
from unicodedata import normalize
def remover_acentos(txt):
return normalize('NFKD', txt).encode('ASCII', 'ignore').decode('ASCII')
def containsNumber(line):
res = False
numero = 0
if any(i.isdigit() for i in str(line)):
res = True
line = str(line).split(" ")
for l in line:
if any(k.isdigit() for k in l):
l = l.replace(".", "")
l = l.replace(",", "")
l = l[:-2] + "." + l[-2:]
try:
numero = float(l)
except:
numero = 0
return res, numero
def buscaKeyParts(diretorio, file, key):
df = pandas.read_csv(diretorio + file, names=list(range(0, 10)))
mask = df.applymap(lambda x: key.upper() in remover_acentos(str(x).upper()))
# print(mask)
df1 = df[mask.any(axis=1)]
print(df1)
i = 0
j = 0
resultado = [0, 0, 0, 0, 0, 0]
if df1.empty == False:
for (columnName, columnData) in df1.iteritems():
if key.upper() in remover_acentos(str(columnData.values[0]).upper()):
j = 1
print('Colunm Name : ', columnName)
print('Column Contents : ', columnData.values)
if j == 1 and columnData.values[0] and (
isinstance(columnData.values[0], float) and math.isnan(columnData.values[0])) == False:
containnumber1, containnumber2 = containsNumber(columnData.values[0])
print('contain number : ', containnumber1, containnumber2)
if containnumber1 == True and i < 6:
resultado[i] = containnumber2
i += 1
return resultado
def main():
diretorio = "C:\\Users\\schmall\\Documents\\FGV\\Tese\\Balanços_PI\\BALORC\\ORIG\\RESULT2_despesas\\"
files = os.listdir(diretorio)
csv_files = [f for f in files if f.endswith('.csv')]
files2 = [d for d in csv_files if 'tables' in d]
new = ""
despesas_correntes = [" ", " ", " ", " ", " ", " "]
pessoal_encargos_sociais = [" ", " ", " ", " ", " ", " "]
juros_encargos_divida = [" ", " ", " ", " ", " ", " "]
outras_despesas_correntes = [" ", " ", " ", " ", " ", " "]
despesas_capital = [" ", " ", " ", " ", " ", " "]
investimentos = [" ", " ", " ", " ", " ", " "]
inversoes_financeiras = [" ", " ", " ", " ", " ", " "]
amortizacao_divida = [" ", " ", " ", " ", " ", " "]
reserva_contingencia = [" ", " ", " ", " ", " ", " "]
reserva_rpps = [" ", " ", " ", " ", " ", " "]
subtotal_despesas = [" ", " ", " ", " ", " ", " "]
amortizacao_divida_refinanciamento = [" ", " ", " ", " ", " ", " "]
subtotal_refinanciamento_d = [" ", " ", " ", " ", " ", " "]
for file in files2:
print(file)
file_parts = file.split(".")
if file_parts[0] != new:
with open(diretorio + new + "_tratado.csv", mode='a+') as balorc_file:
balorc_writer = csv.writer(balorc_file, delimiter=';', quoting=csv.QUOTE_NONNUMERIC)
if despesas_correntes[0] == 0 and despesas_correntes[1] == 0:
balorc_writer.writerow(['DESPESAS CORRENTES', despesas_correntes[0], despesas_correntes[1], despesas_correntes[2], despesas_correntes[3], despesas_correntes[4], despesas_correntes[5]])
if pessoal_encargos_sociais[0] == 0 and pessoal_encargos_sociais[1] == 0:
balorc_writer.writerow(['PESSOAL E ENCARGOS SOCIAIS', pessoal_encargos_sociais[0], pessoal_encargos_sociais[1], pessoal_encargos_sociais[2], pessoal_encargos_sociais[3], pessoal_encargos_sociais[4], pessoal_encargos_sociais[5]])
if juros_encargos_divida[0] == 0 and juros_encargos_divida[1] == 0:
balorc_writer.writerow(['JUROS E ENCARGOS DA DIVIDA', juros_encargos_divida[0], juros_encargos_divida[1], juros_encargos_divida[2], juros_encargos_divida[3], juros_encargos_divida[4], juros_encargos_divida[5]])
if outras_despesas_correntes[0] == 0 and outras_despesas_correntes[1] == 0:
balorc_writer.writerow(['OUTRAS DESPESAS CORRENTES', outras_despesas_correntes[0], outras_despesas_correntes[1], outras_despesas_correntes[2], outras_despesas_correntes[3], outras_despesas_correntes[4], outras_despesas_correntes[5]])
if despesas_capital[0] == 0 and despesas_capital[1] == 0:
balorc_writer.writerow(['DESPESAS DE CAPITAL', despesas_capital[0], despesas_capital[1], despesas_capital[2], despesas_capital[3], despesas_capital[4], despesas_capital[5]])
if investimentos[0] == 0 and investimentos[1] == 0:
balorc_writer.writerow(['INVESTIMENTOS', investimentos[0], investimentos[1], investimentos[2], investimentos[3], investimentos[4], investimentos[5]])
if inversoes_financeiras[0] == 0 and inversoes_financeiras[1] == 0:
balorc_writer.writerow(
['INVERSOES FINANCEIRAS', inversoes_financeiras[0], inversoes_financeiras[1], inversoes_financeiras[2], inversoes_financeiras[3], inversoes_financeiras[4], inversoes_financeiras[5]])
if amortizacao_divida[0] == 0 and amortizacao_divida[1] == 0:
balorc_writer.writerow(['AMORTIZACAO DA DIVIDA', amortizacao_divida[0], amortizacao_divida[1], amortizacao_divida[2], amortizacao_divida[3], amortizacao_divida[4], amortizacao_divida[5]])
if reserva_contingencia[0] == 0 and reserva_contingencia[1] == 0:
balorc_writer.writerow(['RESERVA DE CONTINGENCIA', reserva_contingencia[0], reserva_contingencia[1], reserva_contingencia[2], reserva_contingencia[3], reserva_contingencia[4], reserva_contingencia[5]])
if reserva_rpps[0] == 0 and reserva_rpps[1] == 0:
balorc_writer.writerow(['RESERVA DO RPPS', reserva_rpps[0], reserva_rpps[1], reserva_rpps[2], reserva_rpps[3], reserva_rpps[4], reserva_rpps[5]])
if subtotal_despesas[0] == 0 and subtotal_despesas[1] == 0:
balorc_writer.writerow(['SUBTOTAL DAS DESPESAS', subtotal_despesas[0], subtotal_despesas[1], subtotal_despesas[2], subtotal_despesas[3], subtotal_despesas[4], subtotal_despesas[5]])
if amortizacao_divida_refinanciamento[0] == 0 and amortizacao_divida_refinanciamento[1] == 0:
balorc_writer.writerow(
['AMORTIZACAO DA DIVIDA - REFINANCIAMENTO', amortizacao_divida_refinanciamento[0], amortizacao_divida_refinanciamento[1], amortizacao_divida_refinanciamento[2], amortizacao_divida_refinanciamento[3], amortizacao_divida_refinanciamento[4], amortizacao_divida_refinanciamento[5]])
if subtotal_refinanciamento_d[0] == 0 and subtotal_refinanciamento_d[1] == 0:
balorc_writer.writerow(['SUBTOTAL COM REFINANCIAMENTO (XV)', subtotal_refinanciamento_d[0], subtotal_refinanciamento_d[1], subtotal_refinanciamento_d[2], subtotal_refinanciamento_d[3], subtotal_refinanciamento_d[4], subtotal_refinanciamento_d[5]])
new = file_parts[0]
with open(diretorio + file_parts[0] + "_tratado.csv", mode='w+') as balorc_file:
balorc_writer = csv.writer(balorc_file, delimiter=';', quoting=csv.QUOTE_NONNUMERIC)
balorc_writer.writerow(["Key", "1", "2", "3", "4", "5", "6"])
despesas_correntes = buscaKeyParts(diretorio, file, 'DESPESAS CORRENTES')
print("despesas_correntes", despesas_correntes)
if despesas_correntes[0] != 0 or despesas_correntes[1] != 0:
balorc_writer.writerow(['DESPESAS CORRENTES', despesas_correntes[0], despesas_correntes[1], despesas_correntes[2], despesas_correntes[3], despesas_correntes[4], despesas_correntes[5]])
pessoal_encargos_sociais = buscaKeyParts(diretorio, file, 'PESSOAL E ENCARGOS SOCIAIS')
print("pessoal_encargos_sociais", pessoal_encargos_sociais)
if pessoal_encargos_sociais[0] != 0 or pessoal_encargos_sociais[1] != 0:
balorc_writer.writerow(['PESSOAL E ENCARGOS SOCIAIS', pessoal_encargos_sociais[0], pessoal_encargos_sociais[1], pessoal_encargos_sociais[2], pessoal_encargos_sociais[3], pessoal_encargos_sociais[4], pessoal_encargos_sociais[5]])
juros_encargos_divida = buscaKeyParts(diretorio, file, 'JUROS E ENCARGOS DA DIVIDA')
print("juros_encargos_divida", juros_encargos_divida)
if juros_encargos_divida[0] != 0 or juros_encargos_divida[1] != 0:
balorc_writer.writerow(['JUROS E ENCARGOS DA DIVIDA', juros_encargos_divida[0], juros_encargos_divida[1], juros_encargos_divida[2], juros_encargos_divida[3], juros_encargos_divida[4], juros_encargos_divida[5]])
outras_despesas_correntes = buscaKeyParts(diretorio, file, 'OUTRAS DESPESAS CORRENTES')
print("outras_despesas_correntes", outras_despesas_correntes)
if outras_despesas_correntes[0] != 0 or outras_despesas_correntes[1] != 0:
balorc_writer.writerow(['OUTRAS DESPESAS CORRENTES', outras_despesas_correntes[0], outras_despesas_correntes[1], outras_despesas_correntes[2], outras_despesas_correntes[3], outras_despesas_correntes[4], outras_despesas_correntes[5]])
despesas_capital = buscaKeyParts(diretorio, file, 'DESPESAS DE CAPITAL')
print("despesas_capital", despesas_capital)
if despesas_capital[0] != 0 or despesas_capital[1] != 0:
balorc_writer.writerow(['DESPESAS DE CAPITAL', despesas_capital[0], despesas_capital[1], despesas_capital[2], despesas_capital[3], despesas_capital[4], despesas_capital[5]])
investimentos = buscaKeyParts(diretorio, file, 'INVESTIMENTOS')
print("investimentos", investimentos)
if investimentos[0] != 0 or investimentos[1] != 0:
balorc_writer.writerow(['INVESTIMENTOS', investimentos[0], investimentos[1], investimentos[2], investimentos[3], investimentos[4], investimentos[5]])
inversoes_financeiras = buscaKeyParts(diretorio, file, 'INVERSOES FINANCEIRAS')
print("inversoes_financeiras", inversoes_financeiras)
if inversoes_financeiras[0] != 0 or inversoes_financeiras[1] != 0:
balorc_writer.writerow(
['INVERSOES FINANCEIRAS', inversoes_financeiras[0], inversoes_financeiras[1], inversoes_financeiras[2], inversoes_financeiras[3], inversoes_financeiras[4], inversoes_financeiras[5]])
amortizacao_divida = buscaKeyParts(diretorio, file, 'AMORTIZACAO DA DIVIDA')
print("amortizacao_divida", amortizacao_divida)
if amortizacao_divida[0] != 0 or amortizacao_divida[1] != 0:
balorc_writer.writerow(['AMORTIZACAO DA DIVIDA', amortizacao_divida[0], amortizacao_divida[1], amortizacao_divida[2], amortizacao_divida[3], amortizacao_divida[4], amortizacao_divida[5]])
reserva_contingencia = buscaKeyParts(diretorio, file, 'RESERVA DE CONTINGENCIA')
print("reserva_contingencia", reserva_contingencia)
if reserva_contingencia[0] != 0 or reserva_contingencia[1] != 0:
balorc_writer.writerow(['RESERVA DE CONTINGENCIA', reserva_contingencia[0], reserva_contingencia[1], reserva_contingencia[2], reserva_contingencia[3], reserva_contingencia[4], reserva_contingencia[5]])
reserva_rpps = buscaKeyParts(diretorio, file, 'RESERVA DO RPPS')
print("reserva_rpps", reserva_rpps)
if reserva_rpps[0] != 0 or reserva_rpps[1] != 0:
balorc_writer.writerow(['RESERVA DO RPPS', reserva_rpps[0], reserva_rpps[1], reserva_rpps[2], reserva_rpps[3], reserva_rpps[4], reserva_rpps[5]])
subtotal_despesas = buscaKeyParts(diretorio, file, 'SUBTOTAL DAS DESPESAS')
print("subtotal_despesas", subtotal_despesas)
if subtotal_despesas[0] != 0 or subtotal_despesas[1] != 0:
balorc_writer.writerow(['SUBTOTAL DAS DESPESAS', subtotal_despesas[0], subtotal_despesas[1], subtotal_despesas[2], subtotal_despesas[3], subtotal_despesas[4], subtotal_despesas[5]])
amortizacao_divida_refinanciamento = buscaKeyParts(diretorio, file, 'AMORTIZACAO DA DIVIDA - REFINANCIAMENTO')
print("amortizacao_divida_refinanciamento", amortizacao_divida_refinanciamento)
if amortizacao_divida_refinanciamento[0] != 0 or amortizacao_divida_refinanciamento[1] != 0:
balorc_writer.writerow(
['AMORTIZACAO DA DIVIDA - REFINANCIAMENTO', amortizacao_divida_refinanciamento[0], amortizacao_divida_refinanciamento[1], amortizacao_divida_refinanciamento[2], amortizacao_divida_refinanciamento[3], amortizacao_divida_refinanciamento[4], amortizacao_divida_refinanciamento[5]])
subtotal_refinanciamento_d = buscaKeyParts(diretorio, file, 'SUBTOTAL COM REFINANCIAMENTO (XV)')
print("subtotal_refinanciamento_d", subtotal_refinanciamento_d)
if subtotal_refinanciamento_d[0] != 0 or subtotal_refinanciamento_d[1] != 0:
balorc_writer.writerow(['SUBTOTAL COM REFINANCIAMENTO (XV)', subtotal_refinanciamento_d[0], subtotal_refinanciamento_d[1], subtotal_refinanciamento_d[2], subtotal_refinanciamento_d[3], subtotal_refinanciamento_d[4], subtotal_refinanciamento_d[5]])
else:
with open(diretorio + file_parts[0] + "_tratado.csv", mode='a+') as balorc_file:
balorc_writer = csv.writer(balorc_file, delimiter=';', quoting=csv.QUOTE_NONNUMERIC)
if despesas_correntes[0] == 0 and despesas_correntes[1] == 0:
despesas_correntes = buscaKeyParts(diretorio, file, '<NAME>')
print("despesas_correntes", despesas_correntes)
if despesas_correntes[0] != 0 or despesas_correntes[1] != 0:
balorc_writer.writerow(['DESPESAS CORRENTES', despesas_correntes[0], despesas_correntes[1], despesas_correntes[2], despesas_correntes[3], despesas_correntes[4], despesas_correntes[5]])
if pessoal_encargos_sociais[0] == 0 and pessoal_encargos_sociais[1] == 0:
pessoal_encargos_sociais = buscaKeyParts(diretorio, file, 'PESSOAL E ENCARGOS SOCIAIS')
print("pessoal_encargos_sociais", pessoal_encargos_sociais)
if pessoal_encargos_sociais[0] != 0 or pessoal_encargos_sociais[1] != 0:
balorc_writer.writerow(['PESSOAL E ENCARGOS SOCIAIS', pessoal_encargos_sociais[0], pessoal_encargos_sociais[1], pessoal_encargos_sociais[2], pessoal_encargos_sociais[3], pessoal_encargos_sociais[4], pessoal_encargos_sociais[5]])
if juros_encargos_divida[0] == 0 and juros_encargos_divida[1] == 0:
juros_encargos_divida = buscaKeyParts(diretorio, file, 'JUROS E ENCARGOS DA DIVIDA')
print("juros_encargos_divida", juros_encargos_divida)
if juros_encargos_divida[0] != 0 or juros_encargos_divida[1] != 0:
balorc_writer.writerow(['JUROS E ENCARGOS DA DIVIDA', juros_encargos_divida[0], juros_encargos_divida[1], juros_encargos_divida[2], juros_encargos_divida[3], juros_encargos_divida[4], juros_encargos_divida[5]])
if outras_despesas_correntes[0] == 0 and outras_despesas_correntes[1] == 0:
outras_despesas_correntes = buscaKeyParts(diretorio, file, 'OUTRAS DESPESAS CORRENTES')
print("outras_despesas_correntes", outras_despesas_correntes)
if outras_despesas_correntes[0] != 0 or outras_despesas_correntes[1] != 0:
balorc_writer.writerow(['OUTRAS DESPESAS CORRENTES', outras_despesas_correntes[0], outras_despesas_correntes[1], outras_despesas_correntes[2], outras_despesas_correntes[3], outras_despesas_correntes[4], outras_despesas_correntes[5]])
if despesas_capital[0] == 0 and despesas_capital[1] == 0:
despesas_capital = buscaKeyParts(diretorio, file, 'DESPESAS DE CAPITAL')
print("despesas_capital", despesas_capital)
if despesas_capital[0] != 0 or despesas_capital[1] != 0:
balorc_writer.writerow(['DESPESAS DE CAPITAL', despesas_capital[0], despesas_capital[1], despesas_capital[2], despesas_capital[3], despesas_capital[4], despesas_capital[5]])
if investimentos[0] == 0 and investimentos[1] == 0:
investimentos = buscaKeyParts(diretorio, file, 'INVESTIMENTOS')
print("investimentos", investimentos)
if investimentos[0] != 0 or investimentos[1] != 0:
balorc_writer.writerow(['INVESTIMENTOS', investimentos[0], investimentos[1], investimentos[2], investimentos[3], investimentos[4], investimentos[5]])
if inversoes_financeiras[0] == 0 and inversoes_financeiras[1] == 0:
inversoes_financeiras = buscaKeyParts(diretorio, file, 'INVERSOES FINANCEIRAS')
print("inversoes_financeiras", inversoes_financeiras)
if inversoes_financeiras[0] != 0 or inversoes_financeiras[1] != 0:
balorc_writer.writerow(
['INVERSOES FINANCEIRAS', inversoes_financeiras[0], inversoes_financeiras[1], inversoes_financeiras[2], inversoes_financeiras[3], inversoes_financeiras[4], inversoes_financeiras[5]])
if amortizacao_divida[0] == 0 and amortizacao_divida[1] == 0:
amortizacao_divida = buscaKeyParts(diretorio, file, 'AMORTIZACAO DA DIVIDA')
print("amortizacao_divida", amortizacao_divida)
if amortizacao_divida[0] != 0 or amortizacao_divida[1] != 0:
| |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import division
'''MFEprimer: Multiple factor evaluation of the specificity of PCR primers'''
Program = 'MFEprimer'
Author = '<NAME>, <NAME> & <NAME>, BIRM, China'
AuthorEmail = '<NAME> <<EMAIL>>'
Date = 'Nov-8-2011'
License = 'Please contact <NAME> <<EMAIL>>'
Version = 'V2.0'
import sys, os
import time
import math
import textwrap
import subprocess
import platform
import argparse
from operator import itemgetter
import sqlite3
from pprint import pprint
import shutil
from MFEprimer.chilli import chilli
from MFEprimer.chilli import Seq
from MFEprimer.chilli import SeqCheck
from MFEprimer.chilli import TmDeltaG
from MFEprimer.chilli import chilli
from MFEprimer.chilli import FastaFormatParser
from MFEprimer.chilli import DegenerateSeqConvetor
def get_os():
if platform.system() == 'Darwin':
return 'mac'
else:
return 'linux'
def get_bit():
return platform.architecture()[0].replace('bit', '')
src_path = os.path.split(os.path.realpath(sys.argv[0]))[0]
bin_path = os.path.join(src_path, 'bin', get_os(), get_bit())
global degenerate
degenerate = 'no'
nn_mm_data = {
'GA' : ['GC'],
'GG' : ['GT', 'GC'],
'GT' : ['GC'],
'CG' : ['CC', 'CA'],
'CT' : ['CC', 'CA'],
'AG' : ['AC'],
}
def get_opt():
'''Check and parsing the opts'''
parser = argparse.ArgumentParser(prog='MFEprimer', description='MFEprimer: A fast and thermodynamics-based PCR primer specificity checking program.', usage='%(prog)s.py [options] -i primers.fasta -d Human.genomic.fasta')
parser.add_argument('-i', '--infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin, help='[Required] Primer sequences for specificity checking. [File]', required=True)
parser.add_argument('-d', '--database', nargs='+', type=str,
help='[Required] Database name for specificity checking.', required=True)
parser.add_argument('-o', '--outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout, help='[Optional] Output file name for storing the results, default is screen. [File]', required=False)
parser.add_argument('-k', '--k_value', nargs='?', type=int,
default=9, help='[Optional] K value, must be identical to the k value when indexing the database. [Integer]', required=False)
parser.add_argument('--amplicon', action='store_true',
help='[Optional] Produce the amplicons sequence in Fasta format, only works for normal output format (not tabular).')
parser.add_argument('--tab', action='store_true',
help='[Optional] Output in tabular format.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 2.0')
filter_group = parser.add_argument_group('Results filter settings:' ,'set these arguments for filtering the results.')
filter_group.add_argument('--ppc', nargs='?', type=float, default=30, help='[Optional] Lower limit of the PPC. Legal value: [0~99]. Default = 30. [Float]', required=False)
filter_group.add_argument('--size_start', nargs='?', help='[Optional] Lower limit of the expected amplicon size range in bp, default = 50. [Integer]', default=50, type=int)
filter_group.add_argument('--size_stop', nargs='?', help='[Optional] Upper limit of the expected amplicon size range in bp, default = 5000. [Integer]', default=2000, type=int)
filter_group.add_argument('--tm_start', nargs='?', help='[Optional] Lower limit of the Melting temperature (Tm) [Celsius degree], default = 0. [Float]', default=0, type=float)
filter_group.add_argument('--tm_stop', nargs='?', help='[Optional] Upper limit of the Melting temperature (Tm) [Celsius degree], default = unlimited. [Float]', default=sys.maxint, type=float)
filter_group.add_argument('--dg_start', nargs='?', help='[Optional] Lower limit of the Gibbs free energy for the last 5 resides at 3\' end of the primer [kcal/mol], default = unlimited. [Float]', default=-sys.maxint, type=float)
filter_group.add_argument('--dg_stop', nargs='?', help='[Optional] Upper limit of the Gibbs free energy for the last 5 resides at 3\' end of the primer [kcal/mol], default = 0. [Float]', default=0, type=float)
thermo_group = parser.add_argument_group('Experimental settings:' ,'set these arguments for your real experimental values.')
thermo_group.add_argument('--mono_conc', nargs='?', type=float,
default=50, help='[Optional] Concentration of monovalent cations [mM], default = 50.0 [Float]', required=False)
thermo_group.add_argument('--diva_conc', nargs='?', type=float,
default=1.5, help='[Optional] Concentration of divalent cations [mM], default = 1.5 [Float]', required=False)
thermo_group.add_argument('--oligo_conc', nargs='?', type=float,
default=50, help='[Optional] Concentration of annealing oligos [nM], default = 50.0 [Float]', required=False)
thermo_group.add_argument('--dntp_conc', nargs='?', type=float,
default=0.25, help='[Optional] Concentration of dNTPs [nM], default = 0.25 [Float]', required=False)
options = parser.parse_args()
if options.ppc < 0 or options.ppc > 100:
print 'Error: Illegal value for ppc'
exit()
if options.size_start > options.size_stop or options.size_start < 0:
print 'Illegal value for size_start or size_stop'
exit()
if options.tm_start > options.tm_stop or options.tm_start < 0:
print 'Illegal value for tm_start or tm_stop'
exit()
if options.dg_start > options.dg_stop or options.dg_start > 0:
print 'Illegal value for dg_start or DeltagG_stop'
exit()
return options
def print_head(out, options):
'''Format head information for the output'''
linesep = os.linesep
out.append('%s %s [%s]%s' % (Program, Version, Date, linesep))
#out.append(''*4 + Author + linesep * 2)
reference = '<NAME>, <NAME>, <NAME>, <NAME> and <NAME>. (2009) MFEprimer: multiple factor evaluation of the specificity of PCR primers, Bioinformatics, 25(2), 276-278'
reference = textwrap.fill(reference, 80)
out.append('%s.%s' % (reference, linesep*2))
return out
def primer_analysis(product, options, oligos, session_dir, fcdict, db):
'''Analysis the candidate forward and reverse primer and check whether they can amplify an amplicon'''
mid_seq_id_list = []
tmp_list = []
amp_list = []
filter_product = []
# Forward primer
for i in xrange(len(product)):
# Reverse primer
#print i
amp = product[i]
hid = amp['hid']
pid = amp['pid']
mid = amp['mid']
f_len = amp['plen']
r_len = amp['mlen']
pseq = amp['pseq']
mseq = amp['mseq']
size = amp['size']
f_3_pos = amp['f3_pos']
r_3_pos = amp['r3_pos']
p_qseq = amp['p_qseq']
p_aseq = amp['p_aseq']
p_sseq = amp['p_sseq']
p_tail = amp['p_tail']
m_qseq = amp['m_qseq']
m_aseq = amp['m_aseq']
m_sseq = amp['m_sseq']
m_tail = amp['m_tail']
p_Tm = amp['p_Tm']
p_DeltaG = amp['p_DeltaG']
m_Tm = amp['m_Tm']
m_DeltaG = amp['m_DeltaG']
#print 2*i, 2*i + 1
p_3_DeltaG = TmDeltaG.calDeltaG(p_qseq[-5:], Seq.complement(p_sseq[-5:]), mono_conc=options.mono_conc, diva_conc=options.diva_conc, dntp_conc=options.dntp_conc)
m_3_DeltaG = TmDeltaG.calDeltaG(m_qseq[:5], Seq.complement(m_sseq[:5]), mono_conc=options.mono_conc, diva_conc=options.diva_conc, dntp_conc=options.dntp_conc)
# Filter DeltaG
if p_3_DeltaG < float(options.dg_start) or p_3_DeltaG > float(options.dg_stop):
continue
if m_3_DeltaG < float(options.dg_start) or m_3_DeltaG > float(options.dg_stop):
continue
ppc = cal_PPC(len(p_qseq), f_len, len(m_qseq), r_len)
# Filter by PPC
if ppc < options.ppc:
continue
mid_seq_id = '%s:%s-%s' % (hid, f_3_pos, r_3_pos)
mid_seq_id_list.append(mid_seq_id)
ave_Tm = (p_Tm + m_Tm) / 2 # For sort
to_be_added = (ave_Tm, ppc, p_3_DeltaG, m_3_DeltaG)
tmp_list.append(to_be_added)
filter_product.append(amp)
mid_seq_list = get_mid_seq(mid_seq_id_list, options, session_dir, db)
for i in xrange(len(mid_seq_list)):
mid_seq = mid_seq_list[i]
(ave_Tm, ppc, p_3_DeltaG, m_3_DeltaG) = tmp_list[i]
amp = filter_product[i]
pid = amp['pid']
mid = amp['mid']
hid = int(amp['hid'])
real_hid = fcdict[str(hid)]['id']
hdesc = fcdict[str(hid)]['desc']
amp_graphic = draw_graphical_alignment_primer(amp, oligos, options, mid_seq)
size = amp['size']
amp['p_3_DeltaG'] = p_3_DeltaG
amp['m_3_DeltaG'] = m_3_DeltaG
amp['real_hid'] = real_hid
amp['hdesc'] = hdesc
amp['mid_seq'] = mid_seq
amp['amp_graphic'] = amp_graphic
amp_list.append([ave_Tm, ppc, size, amp])
return amp_list
def cal_PPC(f_match, p_len, r_match, m_len):
'''Cal PPC parameter'''
ave = (f_match + r_match) / 2
stdev = math.sqrt((f_match - ave)**2 + (r_match - ave)**2)
cv = 1 - stdev/ave
ppc = f_match / p_len * r_match / m_len * cv * 100
return ppc
def format_output_primer(amp_list, oligos, options, start_time, session_dir):
'''Format output in primer task'''
linesep = os.linesep
out = []
out = print_head(out, options)
ID_list = []
for i in xrange(len(oligos)):
ID_list.append(oligos[i]['id'])
query_line = textwrap.fill('Query = %s' % ('; '.join(ID_list)), 80)
out.append(query_line)
out.append(' %s primer sequences' % (len(oligos)))
out.append(linesep)
out.append('Database = %s' % textwrap.fill(', '.join([os.path.basename(db) for db in options.database]), 80))
#out.append(' %s sequences' % (len(fcdict)))
out.append(linesep)
out.append('Reports Beginning'.ljust(80, '.'))
out.append(linesep * 2)
amp_num = len(amp_list)
if amp_num > 1:
out.append('Distribution of %s potential PCR amplicons predicted by MFEprimer-2.0 on the query primers' % amp_num)
else:
out.append('Distribution of %s potential PCR amplicon predicted by MFEprimer-2.0 on the query primers' % amp_num)
out.append(linesep)
out.append('[Sorted by average Tm in descending order]')
out.append('FP '.rjust(69) + 'RP '.rjust(8) + 'FP '.rjust(8) + 'RP '.rjust(8) + 'FP '.rjust(8) + 'RP '.rjust(8))
# Δ takes two characters position
out.append('Size'.rjust(53) + 'PPC '.rjust(8) + 'Tm '.rjust(8) + 'Tm '.rjust(8) + ('%sG' % u'\u0394').rjust(7) + ('%sG' % u'\u0394').rjust(7) + ('3\'%sG' % u'\u0394').rjust(8) + ('3\'%sG' % u'\u0394').rjust(7))
out.append('Primers producing potential PCR products:'.ljust(42) + '(bp)'.rjust(11) + '(%) '.rjust(8) + u'\u2103'.rjust(6) + u'\u2103'.rjust(7) + '(kcal/mol)'.center(22) + '(kcal/mol)'.center(14))
out.append(linesep)
detail_line = []
fa_file = []
sn = 0
#amp_list.append([amp_len, ave_Tm, p, m, ppc, amp_graphic, mid_seq, real_hid, hdesc])
amp_list.sort(key=itemgetter(1, 2), reverse=True)
for ave_Tm, ppc, amp_len, amp in amp_list:
sn = sn + 1
hid = amp['real_hid']
desc = '%s: %s' % (sn, hid)
amp_len = amp['size']
p_qid = amp['pid']
f_len = amp['plen']
pseq = amp['pseq']
f_3_pos = amp['f3_pos']
p_3_DeltaG = amp['p_3_DeltaG']
p_qseq = amp['p_qseq']
p_aseq = amp['p_aseq']
p_sseq = amp['p_sseq']
p_tail = amp['p_tail']
p_Tm = amp['p_Tm']
p_DeltaG = amp['p_DeltaG']
p_sb = f_3_pos - len(p_aseq) + 1
m_qid = amp['mid']
r_len = amp['mlen']
mseq = amp['mseq']
r_3_pos = amp['r3_pos']
m_3_DeltaG = amp['m_3_DeltaG']
m_qseq = amp['m_qseq']
m_aseq = amp['m_aseq']
m_sseq = amp['m_sseq']
m_tail = amp['m_tail']
m_Tm = amp['m_Tm']
m_DeltaG = amp['m_DeltaG']
m_se = r_3_pos + len(m_aseq)
amp_graphic = amp['amp_graphic']
mid_seq = amp['mid_seq']
real_hid = amp['real_hid']
hdesc = amp['hdesc']
amp_seq = p_tail + p_qseq + mid_seq + m_qseq + m_tail
amp_GC = chilli.cal_GC_content(amp_seq, + amp_len)
if len(desc) > 42:
desc = desc[:42] + '...'
if p_qid == m_qid:
ppc = '-%.1f' % ppc
else:
ppc = '%.1f' % ppc
out.append(desc.ljust(42) + (str(amp_len)).rjust(11) + ppc.rjust(8) + ('%.1f' % p_Tm).rjust(8) + ('%.1f' % m_Tm).rjust(8) + ('%.1f' % | |
#!/usr/bin/env python
import os
import glob
import json
import traceback
from collections import defaultdict
from datetime import datetime
from github import Github
REPO_NAME = "openforcefield/qca-dataset-submission"
DATASET_GLOB = "dataset*.json*"
COMPUTE_GLOB = "compute*.json*"
PRIORITIES = {'priority-low': 0, 'priority-normal': 1, 'priority-high': 2}
class Submission:
"""A submission, corresponding to a single PR, possibly multiple datasets.
A submission has a lifecycle with well-defined states.
This class represents the current state of a submission,
and provides the machinery for execution of lifecycle processes based on that state.
All lifecycle state is stored on Github in the original PR for the submission,
mapped onto states in the "Datset Tracking" project board.
"""
def __init__(self, pr, ghapi, repo=None, priority=1, computetag='openff'):
"""Create a new Submission instance that performs operations on PR
card state based on data in the PR itself.
Since a submission can have multiple DataSets tied together, this
allows for control of card state based on what's going on in the
collection of DataSets the PR is linked to.
Parameters
----------
pr : github.PullRequest
PullRequest corresponding to the dataset submission.
ghapi : github.Github
An authenticated Github Python API client object.
repo : str
Github repo where datasets are tracked.
priority : int
Priority to use for the dataset if set by method calls;
one of 0, 1, or 2, in increasing-priority order.
computetag : str
Compute tag to use for the dataset if set by method calls;
tasks with a given compute tag will only be computed by managers
configured to service that tag.
"""
self.pr = pr
self.ghapi = ghapi
self.priority = priority
self.computetag = computetag
if repo is None:
self.repo = ghapi.get_repo(REPO_NAME)
else:
self.repo = repo
self.datasets = self._gather_datasets()
self.computes = self._gather_computes()
def _gather_datasets(self):
files = self.pr.get_files()
datasets = list(filter(
lambda x: glob.fnmatch.fnmatch(os.path.basename(x), DATASET_GLOB),
map(lambda x: x.filename, files)))
# we only want files that actually exist
# it can rarely be the case that a PR features changes to a path that is a file deletion
datasets = [ds for ds in datasets if os.path.exists(ds)]
return datasets
def _gather_computes(self):
files = self.pr.get_files()
computes = list(filter(
lambda x: glob.fnmatch.fnmatch(os.path.basename(x), COMPUTE_GLOB),
map(lambda x: x.filename, files)))
# we only want files that actually exist
# it can rarely be the case that a PR features changes to a path that is a file deletion
computes = [cs for cs in computes if os.path.exists(cs)]
return computes
@staticmethod
def _get_board_card_state(board, pr):
pr_state = None
pr_card = None
for state, cards in board.items():
for card in cards:
if card.get_content().number == pr.number:
pr_state = state
pr_card = card
break
return pr_card, pr_state
@staticmethod
def _get_column(repo, column):
proj = [
proj for proj in repo.get_projects() if proj.name == "Dataset Tracking"
][0]
cols = list(proj.get_columns())
return [col for col in cols if col.name == column][0]
def set_backlog(self):
backlog = self._get_column(self.repo, "Backlog")
backlog.create_card(content_id=self.pr.id, content_type="PullRequest")
def execute_state(self, board=None, states=None,
reset_errors=False, set_priority=False,
set_computetag=False):
"""Based on current state of the PR, perform appropriate actions.
"""
if board is None:
board = _get_full_board(self.repo)
pr_card, pr_state = self._get_board_card_state(board, self.pr)
# if card not on board, then it starts in the Backlog
if pr_state is None:
pr_state = self.set_backlog()
# reload board, since we just added this card
board = _get_full_board(self.repo)
pr_card, pr_state = self._get_board_card_state(board, self.pr)
# exit early if states specified, and this PR is not
# in one of those
if states is not None:
if pr_state not in states:
return
if pr_state == "Backlog":
return self.execute_backlog(pr_card, pr_state)
elif pr_state == "Queued for Submission":
return self.execute_queued_submit(pr_card, pr_state)
elif pr_state == "Error Cycling":
return self.execute_errorcycle(pr_card, pr_state,
reset_errors=reset_errors, set_priority=set_priority,
set_computetag=set_computetag)
elif pr_state == "Requires Scientific Review":
return self.execute_requires_scientific_review(pr_card, pr_state)
elif pr_state == "End of Life":
return self.execute_end_of_life(pr_card, pr_state)
elif pr_state == "Archived/Complete":
return self.execute_archived_complete(pr_card, pr_state)
def resolve_new_state(self, dataset_results):
"""If new state agreed upon by dataset results, that state is returned.
Otherwise, returns `None`.
"""
# get unique states recommended by datasets for this PR
# may not always be the same, say, if e.g. submission fails for one
# of many datasets in this submission
new_card_state = set(res["new_state"] for res in dataset_results)
# if all datasets agree on the new card state, we change to that state
if len(new_card_state) == 1:
new_state = list(new_card_state)[0]
return new_state
else:
return None
def evolve_state(self, pr_card, pr_state, new_state):
# no need to move if we are already in the new state
if pr_state != new_state:
state_col = self._get_column(self.repo, new_state)
pr_card.move(position="top", column=state_col)
def execute_backlog(self, pr_card, pr_state):
"""If PR is in the backlog and is merged, it will get moved to the
queued for submission state.
"""
if self.pr.is_merged():
comment = f"""
## Lifecycle - Backlog
Merged dataset moved from "Backlog" to "Queued for Submission".
"""
# postprocess due to raw spacing above
comment = "\n".join([substr.strip() for substr in comment.split("\n")])
# submit comment
self.pr.create_issue_comment(comment)
self.evolve_state(pr_card, pr_state, "Queued for Submission")
return {"new_state": "Queued for Submission"}
else:
return {"new state": "Backlog"}
def execute_queued_submit(self, pr_card, pr_state):
"""Submit datasets, perhaps with some retry logic.
"""
results = []
for dataset in self.datasets:
print(f"Processing dataset '{dataset}'")
ds = DataSet(dataset, self, self.ghapi)
results.append(ds.execute_queued_submit())
for compute in self.computes:
print(f"Processing compute '{compute}'")
ct = Compute(compute, self, self.ghapi)
results.append(ct.execute_queued_submit())
new_state = self.resolve_new_state(results)
if new_state is not None:
self.evolve_state(pr_card, pr_state, new_state)
def execute_errorcycle(self, pr_card, pr_state,
reset_errors=False,
set_priority=False,
set_computetag=False):
"""Error cycle each dataset
"""
results = []
for dataset in self.datasets:
print(f"Processing dataset '{dataset}'")
ds = DataSet(dataset, self, self.ghapi,
priority=self.priority, computetag=self.computetag)
results.append(ds.execute_errorcycle(reset_errors=reset_errors,
set_priority=set_priority,
set_computetag=set_computetag))
for compute in self.computes:
print(f"Processing compute '{compute}'")
ct = Compute(compute, self, self.ghapi,
priority=self.priority, computetag=self.computetag)
results.append(ct.execute_errorcycle(reset_errors=reset_errors,
set_priority=set_priority,
set_computetag=set_computetag))
new_state = self.resolve_new_state(results)
if new_state is not None:
self.evolve_state(pr_card, pr_state, new_state)
if new_state == "Archived/Complete":
for dataset in self.datasets:
ds = DataSet(dataset, self, self.ghapi)
ds.comment_archived_complete()
class SubmittableBase:
def __init__(self, submittable, submission, ghapi, repo=None,
priority=1, computetag='openff'):
"""Create new Submittable instance linking a submission dataset to its PR.
Parameters
----------
submittable : path-like
Path to submission file.
submission : Submission
Submission instance corresponding to the dataset submission.
ghapi : github.Github
An authenticated Github Python API client object.
repo : str
Github repo where datasets are tracked.
priority : int
Priority to use for the dataset if set by method calls;
one of 0, 1, or 2, in increasing-priority order.
computetag : str
Compute tag to use for the dataset if set by method calls;
tasks with a given compute tag will only be computed by managers
configured to service that tag.
"""
self.submittable = submittable
self.submission = submission
self.pr = submission.pr
self.ghapi = ghapi
self.priority = priority
self.computetag = computetag
if repo is None:
self.repo = ghapi.get_repo(REPO_NAME)
else:
self.repo = repo
def _parse_spec(self):
spec = self._load_submittable()
dataset_name = spec["dataset_name"]
if "type" in spec:
dataset_type = spec["type"]
elif "dataset_type" in spec:
dataset_type = spec["dataset_type"]
dataset_specs = spec.get("qc_specifications", None)
return dataset_name, dataset_type, dataset_specs
def _load_submittable(self):
from openff.qcsubmit.serializers import deserialize
spec = deserialize(self.submittable)
return spec
def _get_qca_client(self):
import qcportal as ptl
client = ptl.FractalClient(
username=os.environ["QCA_USER"], password=os.environ["QCA_KEY"]
)
return client
def _get_meta(self):
import pandas as pd
datehr = datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC")
dataset_name, dataset_type, dataset_specs = self._parse_spec()
meta = {
"**Dataset Name**": dataset_name,
"**Dataset Type**": dataset_type,
"**UTC Datetime**": datehr,
}
return pd.DataFrame(pd.Series(meta, name=""))
def _version_info_report(self):
version = get_version_info()
comment = f"""
<details>
<summary><b>QCSubmit</b> version information(<i>click to expand</i>)</summary>
<!-- have to be followed by an empty line! -->
{version.to_markdown()}
</details>
"""
return comment
def execute_queued_submit(self, max_retries=3):
"""Submit, perhaps with some retry logic.
"""
from openff.qcsubmit.serializers import deserialize
client = self._get_qca_client()
# load dataset into QCSubmit class
ds = self._load_submittable()
dataset_qcs = create_dataset(ds)
try:
# submit to QCArchive
output = self.submit(dataset_qcs, client)
self._queued_submit_report(output, success=True)
except:
self._queued_submit_report(traceback.format_exc(), success=False)
return {"new_state": "Queued for Submission"}
else:
return {"new_state": "Error Cycling"}
def _queued_submit_report(self, output, success):
success_text = "**SUCCESS**" if success else "**FAILED**"
comment = f"""
## Lifecycle - QCSubmit Submission Report : {success_text}
{self._get_meta().to_markdown()}
Response from public QCArchive:
```
{output}
```
----------
{self._version_info_report()}
"""
# postprocess due to raw spacing above
comment = "\n".join([substr.strip() for substr in comment.split("\n")])
# submit comment
self.pr.create_issue_comment(comment)
def execute_errorcycle(self,
reset_errors=False,
set_priority=False,
set_computetag=False):
"""Obtain complete, incomplete, error stats | |
_set_lacp_mode)
system_id_mac = __builtin__.property(_get_system_id_mac, _set_system_id_mac)
system_priority = __builtin__.property(_get_system_priority, _set_system_priority)
_pyangbind_elements = OrderedDict([('name', name), ('interval', interval), ('lacp_mode', lacp_mode), ('system_id_mac', system_id_mac), ('system_priority', system_priority), ])
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-lacp - based on the path /lacp/interfaces/interface/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration data for each LACP aggregate interface
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__interval','__lacp_mode','__system_id_mac','__system_priority',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['name'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
self.__interval = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
self.__lacp_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
self.__system_id_mac = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['lacp', 'interfaces', 'interface', 'config']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /lacp/interfaces/interface/config/name (oc-if:base-interface-ref)
YANG Description: Reference to the interface on which LACP should be
configured. The type of the target interface must be
ieee8023adLag
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /lacp/interfaces/interface/config/name (oc-if:base-interface-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Reference to the interface on which LACP should be
configured. The type of the target interface must be
ieee8023adLag
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['name'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with oc-if:base-interface-ref""",
'defined-type': "oc-if:base-interface-ref",
'generated-type': """YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['name'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=ReferenceType(referenced_path='/oc-if:interfaces/oc-if:interface/oc-if:name', caller=self._path() + ['name'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-if:base-interface-ref', is_config=True)
def _get_interval(self):
"""
Getter method for interval, mapped from YANG variable /lacp/interfaces/interface/config/interval (lacp-period-type)
YANG Description: Set the period between LACP messages -- uses
the lacp-period-type enumeration.
"""
return self.__interval
def _set_interval(self, v, load=False):
"""
Setter method for interval, mapped from YANG variable /lacp/interfaces/interface/config/interval (lacp-period-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interval() directly.
YANG Description: Set the period between LACP messages -- uses
the lacp-period-type enumeration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interval must be of a type compatible with lacp-period-type""",
'defined-type': "openconfig-lacp:lacp-period-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)""",
})
self.__interval = t
if hasattr(self, '_set'):
self._set()
def _unset_interval(self):
self.__interval = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FAST': {}, 'SLOW': {}},), default=six.text_type("SLOW"), is_leaf=True, yang_name="interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-period-type', is_config=True)
def _get_lacp_mode(self):
"""
Getter method for lacp_mode, mapped from YANG variable /lacp/interfaces/interface/config/lacp_mode (lacp-activity-type)
YANG Description: ACTIVE is to initiate the transmission of LACP packets.
PASSIVE is to wait for peer to initiate the transmission of
LACP packets.
"""
return self.__lacp_mode
def _set_lacp_mode(self, v, load=False):
"""
Setter method for lacp_mode, mapped from YANG variable /lacp/interfaces/interface/config/lacp_mode (lacp-activity-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_lacp_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lacp_mode() directly.
YANG Description: ACTIVE is to initiate the transmission of LACP packets.
PASSIVE is to wait for peer to initiate the transmission of
LACP packets.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lacp_mode must be of a type compatible with lacp-activity-type""",
'defined-type': "openconfig-lacp:lacp-activity-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)""",
})
self.__lacp_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_lacp_mode(self):
self.__lacp_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACTIVE': {}, 'PASSIVE': {}},), default=six.text_type("ACTIVE"), is_leaf=True, yang_name="lacp-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='lacp-activity-type', is_config=True)
def _get_system_id_mac(self):
"""
Getter method for system_id_mac, mapped from YANG variable /lacp/interfaces/interface/config/system_id_mac (oc-yang:mac-address)
YANG Description: The MAC address portion of the node's System ID. This is
combined with the system priority to construct the 8-octet
system-id
"""
return self.__system_id_mac
def _set_system_id_mac(self, v, load=False):
"""
Setter method for system_id_mac, mapped from YANG variable /lacp/interfaces/interface/config/system_id_mac (oc-yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_id_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_id_mac() directly.
YANG Description: The MAC address portion of the node's System ID. This is
combined with the system priority to construct the 8-octet
system-id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_id_mac must be of a type compatible with oc-yang:mac-address""",
'defined-type': "oc-yang:mac-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)""",
})
self.__system_id_mac = t
if hasattr(self, '_set'):
self._set()
def _unset_system_id_mac(self):
self.__system_id_mac = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'}), is_leaf=True, yang_name="system-id-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='oc-yang:mac-address', is_config=True)
def _get_system_priority(self):
"""
Getter method for system_priority, mapped from YANG variable /lacp/interfaces/interface/config/system_priority (uint16)
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
return self.__system_priority
def _set_system_priority(self, v, load=False):
"""
Setter method for system_priority, mapped from YANG variable /lacp/interfaces/interface/config/system_priority (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_priority() directly.
YANG Description: Sytem priority used by the node on this LAG interface.
Lower value is higher priority for determining which node
is the controlling system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_priority must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)""",
})
self.__system_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_system_priority(self):
self.__system_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="system-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/lacp', defining_module='openconfig-lacp', yang_type='uint16', is_config=True)
name = __builtin__.property(_get_name, _set_name)
interval = __builtin__.property(_get_interval, _set_interval)
lacp_mode = __builtin__.property(_get_lacp_mode, _set_lacp_mode)
system_id_mac = __builtin__.property(_get_system_id_mac, _set_system_id_mac)
system_priority = __builtin__.property(_get_system_priority, _set_system_priority)
_pyangbind_elements = OrderedDict([('name', name), ('interval', interval), ('lacp_mode', lacp_mode), ('system_id_mac', system_id_mac), ('system_priority', system_priority), | |
# FUNCTIONS
# These completions are here instead of functions.py to avoid a circular import.
a = ("a()", "a(${1})")
alpha = ("alpha()", "alpha(${1})")
annotation = ("annotation()", "annotation(${1})")
attr = ("attr()", "attr(${1:name})")
auto = ("auto()", "auto(\"${1}\")")
b = ("b()", "b(${1}${2:0}%)")
blackness = ("blackness()", "blackness(${1}${2:0}%)")
blend = ("blend()", "blend(${1:<color>} ${2:0}%${3})")
blenda = ("blenda()", "blenda(${1:<color>} ${2:0}%${3})")
blue = ("blue()", "blue(${1:0})")
blur = ("blur()", "blur(${1:<length>})")
brightness = ("brightness()", "brightness(${1})")
calc = ("calc()", "calc(${1})")
character_variant = ("character-variant()", "character-variant(${1})")
child = ("child()", "child(${1:0})")
cielab = ("cielab()", "cielab(${1:<lightness>}, ${2:a}, ${3:b})")
cielchab = ("cielchab()", "cielchab(${1:<lightness>}, ${2:<chroma>}, ${3:<hue>})")
circle = ("circle()", "circle(${1})")
clamp = ("clamp()", "clamp(${1})")
color_func = ("color()", "color(${1})")
color_adjust_func = ("color-adjust()", "color-adjust(${1})")
color_contrast_func = ("color-contrast()", "color-contrast(${1})")
color_mix_func = ("color-mix()", "color-mix(${1})")
conic_gradient = ("conic-gradient()", "conic-gradient(${1})")
content = ("content()", "content(${1})")
contrast = ("contrast()", "contrast(${1})")
counter = ("counter()", "counter(${1:<identifier>})")
counters = ("counters()", "counters(${1:<identifier>}, '${2:<string>}'${3})")
cross_fade = (
"cross-fade()",
"cross-fade(${1:<mixing-image>}${2:, ${3:<final-image>}})",
)
cubic_bezier = ("cubic-bezier()", "cubic-bezier(${1})")
device_cmyk = (
"device-cmyk()",
"device-cmyk(${1:0}, ${2:0}, ${3:0}, ${4:0}${5:, ${6:1.0}}${7:, ${8:<color>}})",
)
device_gray = ("device-gray()", "device-gray(${1:0})")
device_nchannel = ("device-nchannel()", "device-nchannel(${1:0})")
device_rgb = ("device-rgb()", "device-rgb(${1:0}, ${2:0}, ${3:0})")
drop_shadow = ("drop-shadow()", "drop-shadow(${1:<length>} ${2:<length>})")
element = ("element()", "element(#${1:id})")
ellipse = ("ellipse()", "ellipse(${1})")
filter_func = ("filter()", "filter(${1})")
fit_content = ("fit-content()", "fit-content(${1})")
format_func = ("format()", 'format("${1}")')
frames = ("frames()", "frames(${1})")
gray = ("gray()", "gray(${1}${2:, ${3:1.0}}})")
grayscale = ("grayscale()", "grayscale(${1})")
green = ("green()", "green(${1:0})")
h = ("h()", "h(${1}${2:<angle>})")
hsl = ("hsl()", "hsl(${1:<hue>}, ${2:0}%, ${3:0}%)")
hsla = ("hsla()", "hsla(${1:<hue>}, ${2:0}%, ${3:0}%, ${4:1.0})")
hue = ("hue()", "hue(${1}${2:<angle>})")
hue_rotate = ("hue-rotate()", "hue-rotate(${1:<angle>})")
hwb = ("hwb()", "hwb(${1:<hue>}, ${2:0}%, ${3:0}%${4:, ${5:1.0}})")
icc_color_func = ("icc-color()", "icc-color(${1:name}, ${2:0})")
icc_named_color = (
"icc-named-color()",
"icc-named-color(${1:name}, ${2:<named-colo>r})",
)
image_func = ("image()", "image(${1})")
image_set = ("image-set()", "image-set(${1})")
inset = ("inset()", "inset(${1})")
invert = ("invert()", "invert(${1})")
l = ("l()", "l(${1}${2:0}%)")
lab_func = ("lab()", "lab(${1})")
lch_func = ("lch()", "lch(${1})")
leader = ("leader()", "leader(${1})")
lightness = ("lightness()", "lightness(${1}${2:0}%)")
linear_gradient = ("linear-gradient()", "linear-gradient(${1})")
local = ("local()", "local(${1})")
matrix = ("matrix()", "matrix(${1:0}, ${2:0}, ${3:0}, ${4:0}, ${5:0}, ${6:0})")
matrix3d = (
"matrix3d()",
"matrix3d(${1:0}, ${2:0}, ${3:0}, ${4:0}, ${5:0}, ${6:0}, ${7:0}, ${8:0}, ${9:0}, ${10:0}, ${11:0}, ${12:0}, ${13:0}, ${14:0}, ${15:0}, ${16:0})",
)
max = ("max()", "max(${1})")
min = ("min()", "min(${1})")
minmax = ("minmax()", "minmax(${1:<min>}, ${2:<max>})")
opacity = ("opacity()", "opacity(${1})")
ornaments = ("ornaments()", "ornaments(${1})")
path = ("path()", "path('${1}')")
perspective = ("perspective()", "perspective(${1:length})")
polygon = ("polygon()", "polygon(${1})")
radial_gradient = ("radial-gradient()", "radial-gradient(${1})")
ray = ("ray()", "ray(${1})")
red = ("red()", "red(${1:0})")
repeat = ("repeat()", "repeat(${1})")
repeating_conic_gradient = (
"repeating-conic-gradient()",
"repeating-conic-gradient(${1})",
)
repeating_linear_gradient = (
"repeating-linear-gradient()",
"repeating-linear-gradient(${1})",
)
repeating_radial_gradient = (
"repeating-radial-gradient()",
"repeating-radial-gradient(${1})",
)
rgb = ("rgb()", "rgb(${1:0}, ${2:0}, ${3:0})")
rgba = ("rgba()", "rgba(${1:0}, ${2:0}, ${3:0}, ${4:1.0})")
rotate = ("rotate()", "rotate(${1:angle})")
rotate3d = ("rotate3d()", "rotate3d(${1:0}, ${2:0}, ${3:0}, ${4:angle})")
rotateX = ("rotateX()", "rotateX(${1:angle})")
rotateY = ("rotateY()", "rotateY(${1:angle})")
rotateZ = ("rotateZ()", "rotateZ(${1:angle})")
s = ("s()", "s(${1}${2:0}%)")
saturate = ("saturate()", "saturate(${1})")
saturation = ("saturation()", "saturation(${1}${2:0}%)")
scale = ("scale()", "scale(${1:0}${2:, ${3:0}})")
scale3d = ("scale3d()", "scale3d(${1:0}, ${2:0}, ${3:0})")
scaleX = ("scaleX()", "scaleX(${1:0})")
scaleY = ("scaleY()", "scaleY(${1:0})")
scaleZ = ("scaleZ()", "scaleZ(${1:0})")
select = ("select()", "select(${1})")
sepia = ("sepia()", "sepia(${1})")
shade = ("shade()", "shade(${1:0}%)")
skew = ("skew()", "skew(${1:angle}${2:, ${3:angle}})")
skewX = ("skewX()", "skewX(${1:angle})")
skewY = ("skewY()", "skewY(${1:angle})")
snap_block = ("snap-block()", "snap-block(${1:<length>})")
snap_inline = ("snap-inline()", "snap-inline(${1:<length>})")
steps = ("steps()", "steps(${1})")
styleset = ("styleset()", "styleset(${1})")
stylistic = ("stylistic()", "stylistic(${1})")
swash = ("swash()", "swash(${1})")
symbols = ("symbols()", "symbols(${1})")
target_counter = ("target-counter()", "target-counter(${1})")
target_counters = ("target-counters()", "target-counters(${1})")
target_text = ("target-text()", "target-text(${1})")
tint = ("tint()", "tint(${1:0}%)")
translate = ("translate()", "translate(${1:length}${2:, ${3:length}})")
translate3d = ("translate3d()", "translate3d(${1:length}, ${2:length}, ${3:length})")
translateX = ("translateX()", "translateX(${1:length})")
translateY = ("translateY()", "translateY(${1:length})")
translateZ = ("translateZ()", "translateZ(${1:length})")
url = ("url()", "url('${1}')")
var = ("var()", "var(--${1:name})")
w = ("w()", "w(${1}${2:0}%)")
whiteness = ("whiteness()", "whiteness(${1}${2:0}%)")
all_values = [("inherit",), ("initial",), ("revert",), ("unset",), var]
# TYPES
counter_style_name = ("<counter-style-name>", "${1:<counter-style-name>}")
hex_color = ("<hex-color>", "#${1}")
family_name = ("<family-name>", "${1:<family-name>}")
font_face_name = ("<font-face-name>", "local(${1})")
identifier = ("<identifier>", "${1:<identifier>}")
line_names = ("<line-names>", "[${1:<identifier>}]")
string = ("<string>", "'${1}'")
urange = ("<urange>", "U+${1}")
# COMPOSITE TYPES
alignment_baseline = [
("alphabetic",),
("baseline",),
("bottom",),
("center",),
("central",),
("ideographic",),
("mathematical",),
("middle",),
("text-bottom",),
("text-top",),
("top",),
]
angle = [("<angle>", "${1:<angle>}"), calc, min, max, clamp]
animateable_feature = [("contents",), ("scroll-position",), identifier]
aspect_ratio = [("<aspect-ratio>", "${1:1}ar"), calc, min, max, clamp]
attachment = [("fixed",), ("local",), ("scroll",)]
auto_repeat = [repeat]
baseline_position = [
("baseline",),
("last-baseline",),
("first-baseline",),
]
baseline_source = [
("auto",),
("first",),
("last",),
]
basic_shape = [circle, ellipse, inset, polygon]
blend_mode = [
("color",),
("color-burn",),
("color-dodge",),
("darken",),
("difference",),
("exclusion",),
("hard-light",),
("hue",),
("lighten",),
("luminosity",),
("multiply",),
("normal",),
("overlay",),
("saturation ",),
("screen",),
("soft-light",),
]
block_ellipsis = [("auto",), ("none",), string]
border_style = [
("dashed",),
("dotted",),
("double",),
("groove",),
("hidden",),
("inset",),
("logical",),
("none",),
("outset",),
("ridge",),
("solid",),
]
box = [("border-box",), ("content-box",), ("padding-box",)]
caret_shape = [
("auto",),
("bar",),
("block",),
("underscore",),
]
color = [
("aliceblue",),
("antiquewhite",),
("aqua",),
("aquamarine",),
("azure",),
("beige",),
("bisque",),
("black",),
("blanchedalmond",),
("blue",),
("blueviolet",),
("brown",),
("burlywood",),
("cadetblue",),
("chartreuse",),
("chocolate",),
("coral",),
("cornflowerblue",),
("cornsilk",),
("crimson",),
("currentcolor",),
("cyan",),
("darkblue",),
("darkcyan",),
("darkgoldenrod",),
("darkgray",),
("darkgreen",),
("darkgrey",),
("darkkhaki",),
("darkmagenta",),
("darkolivegreen",),
("darkorange",),
("darkorchid",),
("darkred",),
("darksalmon",),
("darkseagreen",),
("darkslateblue",),
("darkslategray",),
("darkslategrey",),
("darkturquoise",),
("darkviolet",),
("deeppink",),
("deepskyblue",),
("dimgray",),
("dimgrey",),
("dodgerblue",),
("firebrick",),
("floralwhite",),
("forestgreen",),
("fuchsia",),
("gainsboro",),
("ghostwhite",),
("gold",),
("goldenrod",),
("gray",),
("green",),
("greenyellow",),
("grey",),
("honeydew",),
("hotpink",),
("indianred",),
("indigo",),
("ivory",),
("khaki",),
("lavender",),
("lavenderblush",),
("lawngreen",),
("lemonchiffon",),
("lightblue",),
("lightcoral",),
("lightcyan",),
("lightgoldenrodyellow",),
("lightgray",),
("lightgreen",),
("lightgrey",),
("lightpink",),
("lightsalmon",),
("lightseagreen",),
("lightskyblue",),
("lightslategray",),
("lightslategrey",),
("lightsteelblue",),
("lightyellow",),
("lime",),
("limegreen",),
("linen",),
("magenta",),
("maroon",),
("mediumaquamarine",),
("mediumblue",),
("mediumorchid",),
("mediumpurple",),
("mediumseagreen",),
("mediumslateblue",),
("mediumspringgreen",),
("mediumturquoise",),
("mediumvioletred",),
("midnightblue",),
("mintcream",),
("mistyrose",),
("moccasin",),
("navajowhite",),
("navy",),
("oldlace",),
("olive",),
("olivedrab",),
("orange",),
("orangered",),
("orchid",),
("palegoldenrod",),
("palegreen",),
("paleturquoise",),
("palevioletred",),
("papayawhip",),
("peachpuff",),
("peru",),
("pink",),
("plum",),
("powderblue",),
("purple",),
("rebeccapurple",),
("red",),
("rosybrown",),
("royalblue",),
("saddlebrown",),
("salmon",),
("sandybrown",),
("seagreen",),
("seashell",),
("sienna",),
("silver",),
("skyblue",),
("slateblue",),
("slategray",),
("slategrey",),
("snow",),
("springgreen",),
("steelblue",),
("tan",),
("teal",),
("thistle",),
("tomato",),
("transparent",),
("turquoise",),
("violet",),
("wheat",),
("white",),
("whitesmoke",),
("yellow",),
("yellowgreen",),
a,
alpha,
b,
blackness,
blend,
blenda,
blue,
cielab,
cielchab,
color_func,
color_adjust_func,
color_contrast_func,
color_mix_func,
contrast,
device_cmyk,
device_gray,
device_nchannel,
device_rgb,
gray,
green,
h,
hsl,
hsla,
hue,
hwb,
icc_color_func,
icc_named_color,
l,
lab_func,
lch_func,
lightness,
red,
rgb,
rgba,
s,
saturation,
shade,
tint,
w,
whiteness,
]
common_lig_values = [("common-ligatures",), ("no-common-ligatures",)]
compositing_operator = [("add",), ("exclude",), ("intersect",), ("subtract",)]
content_distribution = [
("space-around",),
("space-between",),
("space-evenly",),
("stretch",),
]
content_position = [
("center",),
("end",),
("flex-end",),
("flex-start",),
("start",),
]
contextual_alt_values = [("contextual",), ("no-contextual",)]
counter_style = [
counter_style_name,
symbols,
]
cubic_bezier_timing_function = [
("ease",),
("ease-in",),
("ease-in-out",),
("ease-out",),
cubic_bezier,
]
decibel = [("<decibel>", "${1:0}dB"), calc, min, max, clamp]
discretionary_lig_values = [
("discretionary-ligatures",),
("no-discretionary-ligatures",),
]
display_box = [("contents",), ("none",)]
display_inside = [
("flex",),
("flow",),
("flow-root",),
("grid",),
("ruby",),
("subgrid",),
("table",),
]
display_internal = [
("ruby-base",),
("ruby-base-container",),
("ruby-text",),
("ruby-text-container",),
("table-caption",),
("table-cell",),
("table-column",),
("table-column-group",),
("table-footer-group",),
("table-header-group",),
("table-row",),
("table-row-group",),
]
display_legacy = [
("inline-block",),
("inline-flex",),
("inline-grid",),
("inline-table",),
]
display_listitem = [("flow",), ("flow-root",), ("list-item",)]
display_outside = [("block",), ("inline",), ("run-in",)]
east_asian_variant_values = [
("jis04",),
("jis78",),
("jis83",),
("jis90",),
("simplified",),
("traditional",),
]
east_asian_width_values = [("full-width",), ("proportional-width",)]
extent_keyword = [
("closest-corner",),
("closest-side",),
("farthest-corner",),
("farthest-side",),
("sides",),
]
fill_rule = [("evenodd",), ("nonzero",)]
fixed_repeat = [repeat]
flex = [("<flex>", "${1:0}fr"), calc, min, max, clamp]
flex_direction = [("column",), ("column-reverse",), ("row",), ("row-reverse",)]
flex_wrap = [("nowrap",), ("wrap",), ("wrap-reverse",)]
font_family_generic = [
("cursive",),
("emoji",),
("fangsong",),
("fantasy",),
("math",),
("monospace",),
("sans-serif",),
("serif",),
("system-ui",),
]
font_family_name = [identifier, string]
frames_timing_function = [frames]
frequency = [("<frequency>", "${1:0}Hz"), calc, min, max, clamp]
gradient = [
conic_gradient,
repeating_conic_gradient,
linear_gradient,
repeating_linear_gradient,
radial_gradient,
repeating_radial_gradient,
]
historical_lig_values = [("no-historical-ligatures",), ("historical-ligatures",)]
icc_color = [
cielab,
cielchab,
device_gray,
device_rgb,
device_nchannel,
icc_color_func,
]
image = [cross_fade, element, image_func, image_set, url,] + gradient
integer = [("<integer>", "${1:0}"), calc, min, max, clamp]
isolation_mode = [("auto",), ("isolate",)]
length = [("<length>", "${1:<length>}"), calc, min, max, clamp]
line_style = [
("dashed",),
("dotted",),
("double",),
("groove",),
("hidden",),
("inset",),
("none",),
("outset",),
("ridge",),
("solid",),
]
line_width = [("medium",), ("thick",), ("thin",)] + length
overflow_position = [
("safe",),
("unsafe",),
]
marker_ref = [("child",), select, url]
mask_reference = [("none",), url] + image
masking_mode = [("alpha",), ("luminance",), ("match-source",)]
media_types = [
("all", "all "),
("and", "and "),
("not", "not "),
("only", "only "),
("print", "print "),
("screen", "screen "),
]
number = [("<number>", "${1:0}"), calc, min, max, clamp]
numeric_figure_values = [("lining-nums",), ("oldstyle-nums",)]
numeric_fraction_values = [("diagonal-fractions",), ("stacked-fractions",)]
numeric_spacing_values = [("proportional-nums",), ("tabular-nums",)]
page_size = [
("A3",),
("A4",),
("A5",),
("B4",),
("B5",),
("ledger",),
("legal",),
("letter",),
]
paint = [
("child",),
("context-fill",),
("context-stroke",),
("none",),
child,
url,
] + color
percentage = [("<percentage>", "${1:0}%"), calc, min, max, clamp]
position = (
[("bottom",), ("center",), ("left",), ("right",), ("top",),] + length + percentage
)
quote = [
("close-quote",),
("no-close-quote",),
("no-open-quote",),
("open-quote",),
]
ratio = ("<ratio>", "${1}/${2}")
repeat_style = [
("no-repeat",),
("repeat",),
("repeat-x",),
("repeat-y",),
("round",),
("space",),
]
resolution = [("<resolution>", "${1:<resolution>}"), calc, min, max, clamp]
rgb_component = number + percentage
self_position = [
("center",),
("end",),
("flex-end",),
("flex-start",),
("self-end",),
("self-start",),
("start",),
]
semitones = [("<semitones>", "${1:0}st"), calc, min, max, clamp]
shape_arg = length + percentage
shape_box = [("margin-box",)] + box
shape_radius = [("closest-side",), ("farthest-side",)] + length + percentage
side_or_corner = [("bottom",), ("left",), ("right",), ("top",)]
single_animation_composition = [("accumulate",), ("add",), ("replace",)]
single_animation_direction = [
("alternate",),
("alternate-reverse",),
("normal",),
("reverse",),
]
single_animation_fill_mode = [("backwards",), ("both",), ("forwards",), ("none",)]
single_animation_iteration_count = [("infinite",)] + number
single_animation_name = [("none",), identifier]
single_animation_play_state = [("paused",), ("running",)]
steps_timing_function = [("step-end",), ("step-start",), steps]
single_timing_function = (
[("linear",)]
+ cubic_bezier_timing_function
+ frames_timing_function
+ steps_timing_function
)
size = extent_keyword + length + percentage
supports_condition_operator = [("and",), ("not",), ("or",)]
symbol = [identifier, string] + image
target = [target_counter, target_counters, target_text]
time = [("<time>", "${1:0}s"), calc, min, max, clamp]
track_breadth = (
[("auto",), ("max-content",), ("min-content",),] + flex + length + percentage
)
track_repeat = [repeat]
track_size = [fit_content, minmax] + track_breadth
width = [
("fill",),
("fit-content",),
("max-content",),
("min-content",),
fit_content,
min,
max,
clamp,
]
baseline_shift = [("bottom",), ("center",), ("sub",), ("super",), ("top",)] + length + percentage
bg_image = [("none",)] + image
bg_size = [("auto",), ("contain",), ("cover",),] + length + percentage
background = bg_image + position + bg_size + repeat_style + attachment + box + color
border_width = [("logical",), ("medium",), ("thick",), ("thin",)] + length
color_stop = angle + color + percentage
content_list = [("contents",), ("document-url",), leader, | |
<gh_stars>100-1000
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official,
# policies either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
__docformat__ = 'reStructuredText'
import logging
import datetime
import pytz
import sys
import grequests
# requests should be imported after grequests as requests imports ssl and grequests patches ssl
import requests
import pkg_resources
from volttron.platform.agent import utils
from volttron.platform.vip.agent import RPC
from volttron.platform.agent.utils import format_timestamp
from volttron.platform.agent.base_weather import BaseWeatherAgent
from volttron.platform import jsonapi
_log = logging.getLogger(__name__)
utils.setup_logging()
__version__ = "0.1"
def ambient(config_path, **kwargs):
"""
Parses the Agent configuration and returns an instance of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: Ambient
:rtype: Ambient
"""
try:
config = utils.load_config(config_path)
except Exception:
config = {}
if not config:
_log.error("Ambient agent configuration: ".format(config))
for key in ["api_key", "application_key"]:
if not config.get(key) or not isinstance(config.get(key), str):
raise RuntimeError("Ambient agent must be configured with '{}' key.".format(key))
_log.debug("config_dict before init: {}".format(config))
utils.update_kwargs_with_config(kwargs, config)
return Ambient(**kwargs)
class Ambient(BaseWeatherAgent):
"""
The Ambient agent requires having an API key to interact with the remote API. The agent offers a performance_mode
configuration option which allows users to limit the amount of data returned by the API.
"""
def __init__(self, application_key="", **kwargs):
super(Ambient, self).__init__(**kwargs)
_log.debug("vip_identity: " + self.core.identity)
self.headers = {"Accept": "application/json",
"Accept-Language": "en-US"
}
self.remove_service("get_hourly_historical")
self.remove_service("get_hourly_forecast")
self.app_key = application_key
self.last_service_call_timestamp = None
@RPC.export
def get_version(self):
"""
Provides the current version of the agent.
:return: current version number in string format.
"""
return __version__
def validate_location(self, service_name, location):
"""
Indicates whether the location dictionary provided matches the format required by the remote weather API
:param service_name: name of the remote API service
:param location: location dictionary to provide in the remote API url
:return: True if the location matches the required format else False
"""
return isinstance(location.get("location", None), str)
def get_update_interval(self, service_name):
"""
Indicates the interval between remote API updates
:param service_name: requested service endpoint
:return: datetime timedelta representing the time interval
"""
if service_name == "get_current_weather":
return datetime.timedelta(minutes=5)
else:
return None
def get_api_description(self, service_name):
"""
Provides a human-readable description of the various endpoints provided by the agent
:param service_name: requested service endpoint
:return: Human-readable description string
"""
if service_name is "get_current_weather":
"Provides current weather observations for locations by their corresponding Ambient weather station name " \
"via RPC (Requires {'location': <station location string>})"
else:
raise RuntimeError(
"Service {} is not implemented by Ambient.".format(service_name))
def get_point_name_defs_file(self):
"""
Constructs the point name mapping dict from the mapping csv.
:return: dictionary containing a mapping of service point names to standard point names with optional
"""
# returning resource file instead of stream, as csv.DictReader require file path or file like object opened in
# text mode.
return pkg_resources.resource_filename(__name__, "data/name_mapping.csv")
def query_current_weather(self, location):
"""
Retrieve data from the Ambient API, return formatted current data and store forecast data in cache
:param location: location dictionary requested by the user
:return: Timestamp and data for current data from the Ambient API
"""
ambient_response = self.make_request()
location_response = None
current_time = None
for record in ambient_response:
record_location = None
record_info = record.pop("info")
if record_info:
record_location = record_info.get("location", "")
if record_location:
weather_data = record.get("lastData", {})
weather_data["macAddress"] = record.pop("macAddress", "")
weather_data["name"] = record_info.get("name", "")
# "date": "2019-04-25T17:09:00.000Z"
weather_tz_string = weather_data.get('tz', None)
if weather_tz_string:
weather_tz = pytz.timezone(weather_tz_string)
else:
weather_tz = pytz.utc
weather_date = datetime.datetime.strptime(
weather_data.pop("date"), "%Y-%m-%dT%H:%M:%S.%fZ").astimezone(weather_tz)
if location["location"] == record_location:
current_time = format_timestamp(weather_date)
location_response = weather_data
else:
weather_data = self.apply_mapping(weather_data)
self.store_weather_records("get_current_weather",
[jsonapi.dumps({"location": record_location}),
weather_date,
jsonapi.dumps(weather_data)])
else:
raise RuntimeError("API record contained improper 'info' format")
return current_time, location_response
def query_forecast_service(self, service, location, quantity, forecast_start):
"""
Unimplemented method stub
:param service: forecast service type of weather data to return
:param location: location dictionary requested during the RPC call
:param quantity: number of records to return, used to generate Time Machine requests after the forecast request
:param forecast_start: forecast results that are prior to this timestamp will be filtered by base weather agent
:return: Timestamp and data returned by the Ambient weather API response
"""
raise NotImplementedError
def make_request(self):
"""
Request data from the Ambient Weather API
An example of the return value is as follows
[
{
"macAddress": "18:93:D7:3B:89:0C",
"lastData": {
"dateutc": 1556212140000,
"tempinf": 71.9,
"humidityin": 31,
"battout": "1",
"temp1f": 68.7,
"humidity1": 36,
"batt1": "1",
"date": "2019-04-25T17:09:00.000Z"
},
"info": {
"name": "Home B WS",
"location": "Lab Home B"
}
},
{
"macAddress": "50:F1:4A:F7:3C:C4",
"lastData": {
"dateutc": 1556211960000,
"tempinf": 82.5,
"humidityin": 27,
"battout": "1",
"temp1f": 68.5,
"humidity1": 42,
"batt1": "1",
"date": "2019-04-25T17:06:00.000Z"
},
"info": {
"name": "Home A WS",
"location": "Lab Home A"
}
}
]
:return:
"""
# AuthenticationTwo API Keys are required for all REST API requests:applicationKey - identifies the
# developer / application. To request an application key please email <EMAIL>apiKey -
# grants access to past/present data for a given user's devices. A typical consumer-facing application will
# initially ask the user to create an apiKey on their Ambient.net account page
# (https://dashboard.ambientweather.net/account) and paste it into the app. Developers for personal or
# in-house apps will also need to create an apiKey on their own account page.
# Rate LimitingAPI requests are capped at 1 request/second for each user's apiKey and 3 requests/second
# per applicationKey. When this limit is exceeded, the API will return a 429 response code.
# Please be kind to our servers :)
# If the previous call to the API was at least 3 seconds ago - this is a constraint set by Ambient
if not self.last_service_call_timestamp or (
datetime.datetime.now() - self.last_service_call_timestamp).total_seconds() > 3:
url = 'https://api.ambientweather.net/v1/devices?applicationKey=' + self.app_key + '&apiKey=' | |
<filename>VisualVictim.py
# -*- coding:utf-8 -*-
# ==========Import Modules==========#
# OpenCV
import cv2
# print("OpenCV ver : ") + (cv2.__version__)
# Subprocess
import subprocess
# Time
import time
#numpy
import numpy as np
# Serial Communication
import serial
# Struct
import struct
print("----------Import Modules Clear----------")
# ==========Define GlobalValue==========#
## Debug Switch
FLAG_DEBUG = 'ON'
# FLAG_DEBUG = 'OFF' #Competition Mode
# Debug Monitor
FLAG_DEBUG_MONITOR = 'ON'
# FLAG_DEBUG_MONITOR = 'OFF'
## SerialCom
# FLAG_SERIAL = 'ON'
FLAG_SERIAL = 'OFF'
## Detect Area Range
# 0: Min
# 1: Max
AREA_RANGE = [2000, 17000]
## Set Bilateral Filter Prameta
# 0: Operator Size
# 1: Standard Deviation on Color Space
# 2: Standard Deviation on Distance Space
Pram_BF = [7, 18, 31]
## Set Threshold Parameta
# 0: Min
# 1: Max
Pram_Threshold = [0, 54]
## Dsiplay Text
String_Victim = ' '
## Original Data (VisualVictim)
Victim_Num = 3
#After warp covert Param
pts2 = np.float32([[0, 0], [240 * 0.75, 0], [0, 240], [240 * 0.75, 240]]) #Result Img
## Setting Victim "S"
# 0: img_src(gray),
# 1: inverted img_src,
# 2: set img_src point,
# 3: Convert img_src point to pts2
# 4: warp perspective img_src
Original_victim_S = [ 0 for i in range(0, 5) ] # Initialize Array
Original_victim_S[0] = cv2.imread("Original/S.png", 0) # Import img "S"
Original_victim_S[1] = cv2.bitwise_not(Original_victim_S[0]) # img inverted
#set point
Original_victim_S[2] = np.float32([[0, 0], #Left Up
[Original_victim_S[1].shape[1], 0], #Right Up
[0, Original_victim_S[1].shape[0]],#Left Down
[Original_victim_S[1].shape[1], Original_victim_S[1].shape[0]]])#Right Down
# Warp Perspective
Original_victim_S[3] = cv2.getPerspectiveTransform(Original_victim_S[2], pts2)
Original_victim_S[4] = cv2.warpPerspective(Original_victim_S[1], Original_victim_S[3], (int(240 * 0.76), 240))
## Setting Victim "H"
# 0: img_src(gray),
# 1: inverted img_src,
# 2: set img_src point,
# 3: Convert img_src point to pts2
# 4: warp perspective img_src
Original_victim_H = [ 0 for i in range(0, 5) ] # Initialize Array
Original_victim_H[0] = cv2.imread("Original/H.png", 0) # Import img "S"
Original_victim_H[1] = cv2.bitwise_not(Original_victim_H[0]) # img inverted
#set point
Original_victim_H[2] = np.float32([[0, 0], #Left Up
[Original_victim_H[1].shape[1], 0], #Right Up
[0, Original_victim_H[1].shape[0]],#Left Down
[Original_victim_H[1].shape[1], Original_victim_H[1].shape[0]]])#Right Down
# Warp Perspective
Original_victim_H[3] = cv2.getPerspectiveTransform(Original_victim_H[2], pts2)
Original_victim_H[4] = cv2.warpPerspective(Original_victim_H[1], Original_victim_H[3], (int(240 * 0.76), 240))
## Setting Victim "U"
# 0: img_src(gray),
# 1: inverted img_src,
# 2: set img_src point,
# 3: Convert img_src point to pts2
# 4: warp perspective img_src
Original_victim_U = [ 0 for i in range(0, 5) ] # Initialize Array
Original_victim_U[0] = cv2.imread("Original/U.png", 0) # Import img "S"
Original_victim_U[1] = cv2.bitwise_not(Original_victim_U[0]) # img inverted
#set point
Original_victim_U[2] = np.float32([[0, 0], #Left Up
[Original_victim_U[1].shape[1], 0], #Right Up
[0, Original_victim_U[1].shape[0]],#Left Down
[Original_victim_U[1].shape[1], Original_victim_U[1].shape[0]]])#Right Down
# Warp Perspective
Original_victim_U[3] = cv2.getPerspectiveTransform(Original_victim_U[2], pts2)
Original_victim_U[4] = cv2.warpPerspective(Original_victim_U[1], Original_victim_U[3], (int(240 * 0.76), 240))
## Capture Victim && Original Victim Result
#[0, x]: Victim "S", x=0: Area Size, 1: Perimeter, 2: Roundness
#[1, x]: Victim "H", x=0: Area Size, 1: Perimeter, 2: Roundness
#[2, x]: Victim "U", x=0: Area Size, 1: Perimeter, 2: Roundness
Result_victim_S = [ 0 for i in range(0, 3) ]
Result_victim_H = [ 0 for i in range(0, 3) ]
Result_victim_U = [ 0 for i in range(0, 3) ]
print("----------Define GlobalValue Clear----------")
# ==========Define Function==========#
# TrackBar
if (FLAG_DEBUG == 'ON') & (FLAG_DEBUG_MONITOR == 'ON'):
TrackBarArray = [0 for i in range(0, 7)]
def TrackbarEvent(Value):
global TrackBarArray
TrackBarArray[0] = cv2.getTrackbarPos('G_Lower', 'img_ColorCircle')
TrackBarArray[1] = cv2.getTrackbarPos('G_Upper', 'img_ColorCircle')
TrackBarArray[2] = 2 * cv2.getTrackbarPos('OpeSize', 'img_ColorCircle') + 1
TrackBarArray[3] = cv2.getTrackbarPos('C_deviation', 'img_ColorCircle')
TrackBarArray[4] = cv2.getTrackbarPos('D_deviation', 'img_ColorCircle')
#print('[DebugTrackbar] 0=%d, 1=%d, 2=%d, 3=%d, 4=%d, 5=%d' %
# (TrackBarArray[0] ,TrackBarArray[1] ,TrackBarArray[2] ,TrackBarArray[3] ,TrackBarArray[4] ,TrackBarArray[5]))
# トラックバーを表示させるための色相環をインポート
img_ColorCircle = cv2.imread('ColorCircle.jpg', 1)
cv2.namedWindow('img_ColorCircle', cv2.WINDOW_NORMAL)
cv2.createTrackbar('G_Lower', 'img_ColorCircle', 0, 255, TrackbarEvent)
cv2.createTrackbar('G_Upper', 'img_ColorCircle', 0, 255, TrackbarEvent)
cv2.createTrackbar('OpeSize', 'img_ColorCircle', 0, 20, TrackbarEvent)
cv2.createTrackbar('C_deviation', 'img_ColorCircle', 0, 255, TrackbarEvent)
cv2.createTrackbar('D_deviation', 'img_ColorCircle', 0, 255, TrackbarEvent)
#LaberingProcess
#1:ImportImg, 2:Upper Object Num, 3:Objet Area Max, 4:Object Area Min
def LabelingProcess(ImportImg, LabelNumUpper, AreaMax, AreaMin):
Data = [0 for i in range(0, 9)]
#connectedComponents:ラベリング結果(二値画像)のみを返す
#connectedComponentsWithStats:詳細なデータ(重心,面積,バウンディングボックス(x,y,with,height))
labelnum, labeling, contours, GoCs = cv2.connectedComponentsWithStats(ImportImg)
#ラベリング個数が一定個数のみ処理を実行(時短のため)
if (1 <= labelnum & labelnum <= LabelNumUpper):
for label in range(1, labelnum):
center_x, center_y = GoCs[label] #重心座標取得
square_x, square_y, w, h, size = contours[label] #オブジェクトの左上の座標(x,y),横幅,縦幅,面積取得
#デバッグ効率を上げるため検知したオブジェクトを四角形で囲む
#img_src = cv2.rectangle(img_src, (square_x, square_y),(square_x + w, square_y + h),(255, 255, 0), 1)
#こっちは重心を赤い点で表す
#img_src = cv2.circle(img_src, (int(center_x), int(center_y)), 1,(0, 0, 255), -1)
#面積サイズが一定以内ならば、重心を取得
if ((AreaMin <= size) & (size <= AreaMax)):
Data[0] = label #割り当てられたラベルナンバー(あんまり使わない?)
Data[1] = size #ラベルナンバーの面積
Data[2] = center_x #重心(x座標)
Data[3] = center_y #重心(y座標)
Data[4] = square_x #認識したオブジェクトの左上のx座標
Data[5] = square_y #認識したオブジェクトの左上のy座標
Data[6] = w #横幅
Data[7] = h #縦幅
#Data[8] = 0#算出結果(二値画像)
#print('label=%d x=%d y=%d w=%d h=%d size=%d ' % (Data[0], Data[2], Data[3], Data[6], Data[7], Data[1]))
return Data
# Import Camera & Setting Device
# Import Camera
cap = cv2.VideoCapture(1)
# Setting Device
# Prameta
CAMERA_SETTING_WIDTH = 320
CAMERA_SETTING_HEIGHT = 240
CAMERA_SETTING_FPS = 30
cap.set(3, CAMERA_SETTING_WIDTH) # Width
cap.set(4, CAMERA_SETTING_HEIGHT) # Height
cap.set(5, CAMERA_SETTING_FPS) # FPS
print("[CamSetRst]Width=%d Height=%d FPS=%d" % (cap.get(3), cap.get(4), cap.get(5)))
print("Import Camera Clear")
# Logical AND Img
# 1:ImportImg , 2:Upper(int, max255) , 3:Lower(intm min0)
def LogicalANDRange(InportImg, UpperRange, LowerRange):
# Upeer
_, img_upper = cv2.threshold(
InportImg, # ImportImage
UpperRange, # Parameta
255, # Pixe,lMax(255)
cv2.THRESH_BINARY_INV) # Options
# Lower
_, img_lower = cv2.threshold(
InportImg, # ImportImage
LowerRange, # Parameta
255, # Pixe,lMax(255)
cv2.THRESH_BINARY) # Options
# LogicalAnd
img_logicaland = cv2.bitwise_and(img_upper, img_lower)
return img_logicaland
## Object Extraction
def ObjectExtraction(ImportImg):
# Contour
contours = cv2.findContours(ImportImg, # Import Img
cv2.RETR_EXTERNAL, # [輪郭抽出モード]最も外側の輪郭のみ抽出
cv2.CHAIN_APPROX_SIMPLE # [輪郭近似手法]すべての輪郭点を完全に格納
)[1]
# Area size
Area_Size = cv2.contourArea(contours[0])
# Exception
if Area_Size <= 0:
Area_Size = 1
# Perimeter
Perimeter = cv2.arcLength(np.array(contours[0]), True)
# Exception
if Perimeter <= 0:
Perimeter = 1
# Roundness
Roundness = 4.0 * np.pi * Area_Size / Perimeter / Perimeter
return Area_Size, Perimeter, Roundness
print("----------Define Function Clear----------")
# ====================Program Start====================#
while True:
## Record Start Time
START_TIME = time.time()
## image capture
ret, img_src = cap.read()
if ret != True:
print("[ERROR]Faild Camera")
break
## Img Convert RGB to Gray
img_gray = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
## Bilateral Filter
if (FLAG_DEBUG == 'ON') & (FLAG_DEBUG_MONITOR == 'ON'):
img_gray_BF = cv2.bilateralFilter(img_gray, TrackBarArray[2], TrackBarArray[3], TrackBarArray[4])
cv2.namedWindow("[DEBUG]img_gray_BF", cv2.WINDOW_NORMAL)
cv2.imshow("[DEBUG]img_gray_BF", img_gray_BF)
else:
img_gray_BF = cv2.bilateralFilter(img_gray, Pram_BF[0], Pram_BF[1], Pram_BF[2])
## Threshold Process
if (FLAG_DEBUG == 'ON') & (FLAG_DEBUG_MONITOR == 'ON'):
img_result_gray = LogicalANDRange(img_gray_BF, TrackBarArray[1], TrackBarArray[0])
cv2.namedWindow("[DEBUG]img_result_gray", cv2.WINDOW_NORMAL)
cv2.imshow("[DEBUG]img_result_gray", img_result_gray)
else:
img_result_gray = LogicalANDRange(img_gray_BF, Pram_Threshold[1], Pram_Threshold[0])
# My Labering Process
LabelingResult = [0 for i in range(0, 9)]
LabelingResult = LabelingProcess(img_result_gray, # Img Import (Binary img)
5, # Detect Objectq
AREA_RANGE[1], # Area Max Range
AREA_RANGE[0]) # Area Min Range
if FLAG_DEBUG == 'ON':
# オブジェクトを四角形で囲む
img_result_Labering = cv2.rectangle(img_src, #Write Image
(LabelingResult[4], LabelingResult[5]),
(LabelingResult[4] + LabelingResult[6], LabelingResult[5] + LabelingResult[7]),
(0, 0, 255),
2)
# こっちは重心を赤い点で表す
img_result_Labering = cv2.circle(img_result_Labering, (int(LabelingResult[2]), int(LabelingResult[3])), 3, (0, 0, 255), -1)
# Display Text
cv2.putText(
img_result_Labering, # DrawImge
String_Victim, # String
(LabelingResult[4], LabelingResult[5] - 10), # Position
cv2.FONT_HERSHEY_COMPLEX, # Font
0.5, # StringSize
(0, 0, 255)) # Color
#Result
cv2.namedWindow("[DEBUG]img_result_Labering", cv2.WINDOW_NORMAL)
cv2.imshow("[DEBUG]img_result_Labering", img_result_Labering)
# Warp Perspective
# Capture
pts1 = np.float32([[LabelingResult[4], LabelingResult[5]], # Left Up
[LabelingResult[4] + LabelingResult[6], LabelingResult[5]], # Right Up
[LabelingResult[4], LabelingResult[5] + LabelingResult[7]], # Left Down
[LabelingResult[4] + LabelingResult[6], LabelingResult[5] + LabelingResult[7]]]) # Right Down
if LabelingResult[0] != 0:
#Original Img
# Moment img_src
M = cv2.getPerspectiveTransform(pts1, pts2)
img_result_victim = cv2.warpPerspective(img_result_gray, M, (int(240 * 0.76), 240))
# Original Victim && Capture Victim
img_result_S = cv2.bitwise_and(img_result_victim, Original_victim_S[4])
img_result_H = cv2.bitwise_and(img_result_victim, Original_victim_H[4])
img_result_U = cv2.bitwise_and(img_result_victim, Original_victim_U[4])
# Import Object Data
Result_victim_S = ObjectExtraction(img_result_S)
Result_victim_H = ObjectExtraction(img_result_H)
Result_victim_U = ObjectExtraction(img_result_U)
## Detect Area Renge
if Result_victim_S[0] >= 13000:
print("Detect S")
if (FLAG_DEBUG == 'ON') & (FLAG_DEBUG_MONITOR == 'ON'):
## Set String
String_Victim = 'Victim: S, Area: %d' % (Result_victim_S[0])
elif Result_victim_H[0] >= 13000:
print("Detect H")
if (FLAG_DEBUG == 'ON') & (FLAG_DEBUG_MONITOR == 'ON'):
## Set String
String_Victim = 'Victim: H, Area: %d' % (Result_victim_H[0])
elif Result_victim_U[0] >= 13000:
print("Detect U")
if (FLAG_DEBUG == 'ON') & (FLAG_DEBUG_MONITOR == 'ON'):
## Set String
String_Victim = 'Victim: U, Area: %d' % (Result_victim_U[0])
else:
print("No Detect")
if (FLAG_DEBUG == 'ON') & (FLAG_DEBUG_MONITOR == 'ON'):
## Set String
String_Victim = 'Victim: ?, Area: ?'
if FLAG_DEBUG == 'ON':
if FLAG_DEBUG_MONITOR == 'ON':
#concat img
img_result_add = cv2.hconcat([img_result_S, img_result_H, img_result_U, img_result_victim])
# Result
cv2.namedWindow("img_result_add[S, H, U, victim(Th)]", cv2.WINDOW_NORMAL)
cv2.imshow("img_result_add[S, H, U, victim(Th)]", img_result_add)
# print("[DEBUG_S]Area=%d, Perimeter=%d, Roundness=%0.2f" % (Result_victim_S[0], Result_victim_S[1], Result_victim_S[2]))
# print("[DEBUG_H]Area=%d, Perimeter=%d, Roundness=%0.2f" % (Result_victim_H[0], Result_victim_H[1], Result_victim_H[2]))
# print("[DEBUG_U]Area=%d, Perimeter=%d, Roundness=%0.2f" % (Result_victim_U[0], Result_victim_U[1], Result_victim_U[2]))
# Recode End Time
END_TIME = time.time()
passegetime = END_TIME - START_TIME
frequancy = 1 / passegetime
#print("Time=%0.2f frequancy=%0.2f[Hz]" % (passegetime, frequancy))
# Finish Program
key = cv2.waitKey(1)
if key == ord('q'):
print("[UserSet BilateralFilter Prameta]\n"
"OperatorSize: %d \n"
"Standard Deviation on Color Space: %d \n"
"Standard Deviation on Distance Space: | |
elif len(args) == 6:
minx, maxx, miny, maxy, minz, maxz = tuple([[arg] for arg in args])
else:
raise ValueError("_Getlimits expects 0 or 6 arguments.")
# Get limits of children
for ob in self.children:
tmp = ob._GetLimits()
if tmp is not None:
limx, limy, limz = tmp
minx.append(limx.min); maxx.append(limx.max)
miny.append(limy.min); maxy.append(limy.max)
minz.append(limz.min); maxz.append(limz.max)
# Do we have limits?
if not (minx and maxx and miny and maxy and minz and maxz):
return None
# Take min and max
x1, y1, z1 = tuple([min(val) for val in [minx, miny, minz]])
x2, y2, z2 = tuple([max(val) for val in [maxx, maxy, maxz]])
# Make pointset of eight cornerpoints
pp = Pointset(3)
for x in [x1, x2]:
for y in [y1, y2]:
for z in [z1, z2]:
pp.append(x,y,z)
# Transform these points
for i in range(len(pp)):
p = pp[i]
for t in reversed(self._transformations):
if isinstance(t, Transform_Translate):
p.x += t.dx
p.y += t.dy
p.z += t.dz
elif isinstance(t, Transform_Scale):
p.x *= t.sx
p.y *= t.sy
p.z *= t.sz
elif isinstance(t, Transform_Rotate):
angle = float(t.angle * np.pi / 180.0)
q = Quaternion.create_from_axis_angle(angle, t.ax, t.ay, t.az)
p = q.rotate_point(p)
# Update
pp[i] = p
# Return limits
xlim = misc.Range( pp[:,0].min(), pp[:,0].max() )
ylim = misc.Range( pp[:,1].min(), pp[:,1].max() )
zlim = misc.Range( pp[:,2].min(), pp[:,2].max() )
return xlim, ylim, zlim
def _Transform(self):
""" _Transform()
Apply all listed transformations of this wobject.
"""
for t in self.transformations:
if not isinstance(t, Transform_Base):
continue
elif isinstance(t, Transform_Translate):
gl.glTranslate(t.dx, t.dy, t.dz)
elif isinstance(t, Transform_Scale):
gl.glScale(t.sx, t.sy, t.sz)
elif isinstance(t, Transform_Rotate):
gl.glRotate(t.angle, t.ax, t.ay, t.az)
class Position(object):
""" Position(x,y,w,h, wibject_instance)
The position class stores and manages the position of wibjects. Each
wibject has one Position instance associated with it, which can be
obtained (and updated) using its position property.
The position is represented using four values: x, y, w, h. The Position
object can also be indexed to get or set these four values.
Each element (x,y,w,h) can be either:
* The integer amount of pixels relative to the wibjects parent's position.
* The fractional amount (float value between 0.0 and 1.0) of the parent's width or height.
Each value can be negative. For x and y this simply means a negative
offset from the parent's left and top. For the width and height the
difference from the parent's full width/height is taken.
An example: a position (-10, 0.5, 150,-100), with a parent's size of
(500,500) is equal to (-10, 250, 150, 400) in pixels.
Remarks:
* fractional, integer and negative values may be mixed.
* x and y are considered fractional on <-1, 1>
* w and h are considered fractional on [-1, 1]
* the value 0 can always be considered to be in pixels
The position class also implements several "long-named" properties that
express the position in pixel coordinates. Internally a version in pixel
coordinates is buffered, which is kept up to date. These long-named
(read-only) properties are:
left, top, right, bottom, width, height,
Further, there are a set of properties which express the position in
absolute coordinates (not relative to the wibject's parent):
absLeft, absTop, absRight, absBottom
Finally, there are properties that return a two-element tuple:
topLeft, bottomRight, absTopLeft, absBottomRight, size
The method InPixels() returns a (copy) Position object which represents
the position in pixels.
"""
def __init__(self, x, y, w, h, owner):
# test owner
if not isinstance(owner , Wibject):
raise ValueError('A positions owner can only be a wibject.')
# set
self._x, self._y, self._w, self._h = x, y, w, h
# store owner using a weak reference
self._owner = weakref.ref(owner)
# init position in pixels and absolute (as a tuples)
self._inpixels = None
self._absolute = None
# do not _update() here, beacause the owner will not have assigned
# this object to its _position attribute yet.
# but we can calculate our own pixels
self._CalculateInPixels()
def Copy(self):
""" Copy()
Make a copy of this position instance.
"""
p = Position(self._x, self._y, self._w, self._h, self._owner())
p._inpixels = self._inpixels
p._absolute = self._absolute
return p
def InPixels(self):
""" InPixels()
Return a copy, but in pixel coordinates.
"""
p = Position(self.left,self.top,self.width,self.height, self._owner())
p._inpixels = self._inpixels
p._absolute = self._absolute
return p
def __repr__(self):
return "<Position %1.2f, %1.2f, %1.2f, %1.2f>" % (
self.x, self.y, self.w, self.h)
## For keeping _inpixels up-to-date
def _Update(self):
""" _Update()
Re-obtain the position in pixels. If the obtained position
differs from the current position-in-pixels, _Changed()
is called.
"""
# get old version, obtain and store new version
ip1 = self._inpixels + self._absolute
self._CalculateInPixels()
ip2 = self._inpixels + self._absolute
if ip2:
if ip1 != ip2: # also if ip1 is None
self._Changed()
def _Changed(self):
""" _Changed()
To be called when the position was changed.
Will fire the owners eventPosition and will call
_Update() on the position objects of all the owners
children.
"""
# only notify if this is THE position of the owner (not a copy)
owner = self._owner()
if owner and owner._position is self:
if hasattr(owner, 'eventPosition'):
owner.eventPosition.Fire()
#print 'firing position event for', owner
for child in owner._children:
if hasattr(child, '_position'):
child._position._Update()
def _GetFractionals(self):
""" Get a list which items are considered relative.
Also int()'s the items which are not.
"""
# init
fractionals = [0,0,0,0]
# test
for i in range(2):
if self[i] > -1 and self[i] < 1 and self[i]!=0:
fractionals[i] = 1
for i in range(2,4):
if self[i] >= -1 and self[i] <= 1 and self[i]!=0:
fractionals[i] = 1
# return
return fractionals
def _CalculateInPixels(self):
""" Return the position in screen coordinates as a tuple.
"""
# to test if this is easy
fractionals = self._GetFractionals()
negatives = [int(self[i]<0) for i in range(4)]
# get owner
owner = self._owner()
# if owner is a figure, it cannot have relative values
if hasattr(owner, '_SwapBuffers'):
self._inpixels = (self._x, self._y, self._w, self._h)
self._absolute = self._inpixels
return
# test if we can calculate
if not isinstance(owner, Wibject):
raise Exception("Can only calculate the position in pixels"+
" if the position instance is owned by a wibject!")
# else, the owner must have a parent...
if owner.parent is None:
print owner
raise Exception("Can only calculate the position in pixels"+
" if the owner has a parent!")
# get width/height of parent
ppos = owner.parent.position
whwh = ppos.width, ppos.height
whwh = (whwh[0], whwh[1], whwh[0], whwh[1])
# calculate!
pos = [self._x, self._y, self._w, self._h]
if max(fractionals)==0 and max(negatives)==0:
pass # no need to calculate
else:
for i in range(4):
if fractionals[i]:
pos[i] = pos[i]*whwh[i]
if i>1 and negatives[i]:
pos[i] = whwh[i] + pos[i]
# make sure it's int (even if user supplied floats > 1)
pos[i] = int(pos[i])
# abs pos is based on the inpixels version, but x,y corrected.
apos = [p for p in pos]
if ppos._owner().parent:
apos[0] += ppos.absLeft
apos[1] += ppos.absTop
# store
self._inpixels = tuple(pos)
self._absolute = tuple(apos)
## For getting and setting
@misc.DrawAfter
def Set(self, *args):
""" Set(*args)
Set(x, y, w, h) or Set(x, y).
"""
# if tuple or list was given
if len(args)==1 and hasattr(args[0],'__len__'):
args = args[0]
# apply
if len(args)==2:
self._x = args[0]
self._y = args[1]
elif len(args)==4:
self._x = args[0]
self._y = args[1]
self._w = args[2]
self._h = args[3]
else:
raise ValueError("Invalid number of arguments to position.Set().")
# we need an update now
self._Update()
@misc.DrawAfter
def Correct(self, dx=0, | |
self.key = "Yes"
# self.plotFunc()
# else:#only one parameter has been loaded in viewgraph
# #only one parameter is in history key and there are more than one point in the historyList for this parameters
# if (len(self.historyKeys)==1) and (len(self.historyList[self.historyKeys[-1][0]])>1):
# self.historyList[self.historyKeys[-1][0]].pop()
# tempHistoryList = self.historyList[self.historyKeys[-1][0]][-1]
# for i in range(len(self.parVals[self.historyKeys[-1][0]])):
# self.parVals[self.historyKeys[-1][0]][i]=round(tempHistoryList[i],self.numPrecisionY)
# if max(self.parVals[self.historyKeys[-1][0]])<=0:
# self.yScale = [-100,100]
# elif (max(self.parVals[self.historyKeys[-1][0]])-min(self.parVals[self.historyKeys[-1][0]]))<=100:
# self.yScale = [int(ceil(-2*max(self.parVals[self.historyKeys[-1][0]]))),int(ceil(2*max(self.parVals[self.historyKeys[-1][0]])))]
# else:
# self.yScale = [int(ceil(min(self.parVals[self.historyKeys[-1][0]])-0.1*(max(self.parVals[self.historyKeys[-1][0]])-min(self.parVals[self.historyKeys[-1][0]])))),int(ceil(max(self.parVals[self.historyKeys[-1][0]])+0.1*(max(self.parVals[self.historyKeys[-1][0]])-min(self.parVals[self.historyKeys[-1][0]]))))]
# self.key = "Yes"
# self.plotFunc()
# else:#number of elements in the history list for last parameter is 1
# #pop up a messageBox saying history list is exhausted
# #
# self.showInformation()
# elif str.lower(keyComb.encode('ascii','ignore')) == "ctrl+y":
# #copy the existing state
# if len(self.redo)>0:
# self.scaleChange = self.redo[-1][0]
# self.choice = self.redo[-1][1]
# self.par = self.redo[-1][2]
# self.unitMeas = self.redo[-1][3]
# self.before= self.redo[-1][4]
# self.numPrecisionX = self.redo[-1][5]
# self.numPrecisionY = self.redo[-1][6]
# self.NUR = self.redo[-1][7]
# self.parVals = deepcopy(self.redo[-1][8])
# self.historyList = deepcopy(self.redo[-1][9])
# self.historyKeys = self.redo[-1][10][:]
# self.historyKeysDup = deepcopy(self.redo[-1][11])
# self.xScale = self.redo[-1][12][:]
# self.yScale = self.redo[-1][13][:]
# self.mMotion = self.redo[-1][14][:]
# self.mPress = self.redo[-1][15][:]
# self.mRelease = self.redo[-1][16][:]
# self.redo.pop()
# self.key="Yes"
# self.plotFunc()
# else:
# self.showInformation()
def showInformation(self):
"""Show the information message
Keyword arguments:
self -- main window being displayed i.e. the current instance of the mainWindow class
Returns:
None
Displays a messagebox that informs user there's no previous action to be undone
"""
QtGui.QMessageBox.information(self, "Information", "History list is exhausted")
def firstPlot(self):
"""Plots data from file
Keyword arguments:
self -- main window being displayed i.e. the current instance of the mainWindow class
Returns:
None
Produces view graph from historyList
"""
# ax = self.figure.add_subplot(111)
self.ax.clear()
self.ax.set_xlim(self.xScale[0],self.xScale[1])
self.ax.set_ylim(self.yScale[0],self.yScale[1])
self.ax.set_xlabel("RADI (arcsec)")
self.ax.set_ylabel(self.par + "( "+self.unitMeas+ " )")
self.ax.plot(self.parValRADI, self.historyList[len(self.historyList)-1],'--bo')
self.ax.set_title('Plot')
self.ax.set_xticks(self.parValRADI)
# ax.set_yticks(np.arange(min(self.parVals[self.par]),max(self.parVals[self.par])+1,500))
plt.tight_layout()
self.canvas.draw()
self.key = "No"
def plotFunc(self):
"""Plots data from file
Keyword arguments:
self -- main window being displayed i.e. the current instance of the mainWindow class
Returns:
None
Produces view graph from historyList or parVals
"""
if self.scaleChange == "Yes":
for i in range(len(self.par)):
self.ax[i].clear()
self.ax[i].set_xlim(self.xScale[0],self.xScale[1])
if (max(self.parVals[self.par[i]])-min(self.parVals[self.par[i]]))<=100:
self.yScale[self.par[i]] = [int(ceil(-2*max(self.parVals[self.par[i]]))),int(ceil(2*max(self.parVals[self.par[i]])))]
else:
self.yScale[self.par[i]] = [int(ceil(min(self.parVals[self.par[i]])-0.1*(max(self.parVals[self.par[i]])-min(self.parVals[self.par[i]])))),int(ceil(max(self.parVals[self.par[i]])+0.1*(max(self.parVals[self.par[i]])-min(self.parVals[self.par[i]]))))]
self.ax[i].set_ylim(self.yScale[self.par[i]][0],self.yScale[self.par[i]][1])
# for axes in self.figure.get_axes():
# axes.set_xlabel("RADI (arcsec)")
# axes.set_ylabel(self.par[i] + "( "+self.unitMeas[i]+ " )")
self.ax[i].set_xlabel("RADI (arcsec)")
self.ax[i].set_ylabel(self.par[i] + "( "+self.unitMeas[i]+ " )")
self.ax[i].plot(self.parVals['RADI'], self.historyList[self.par[i]][len(self.historyList[self.par[i]])-1],'--bo')
# self.ax[i].set_title('Plot')
self.ax[i].set_xticks(self.parVals['RADI'])
plt.tight_layout()
self.canvas.draw()
self.key = "No"
if self.key=="Yes":
self.firstPlot()
#this re-plots the graph as long as the mouse is in motion and the right data point is clicked
else:
# counter = 0
for j in range(len(self.parValRADI)):
if (self.mPress[0] < (self.parValRADI[j])+3) and (self.mPress[0] > (self.parValRADI[j])-3) and (self.mRelease[0]==None): #and (self.mPress[1] < (self.parVals[j])+3) and (self.mPress[1] > (self.parVals[j])-3):
# print self.parVals[self.par][j]
# print "before: ", self.parVals[self.plotArea][j]
self.parVals[j] = self.mMotion[0]
# self.mPress[1] = self.mMotion[0]
# self.ax = self.figure.add_subplot(111)
self.ax.clear()
self.ax.set_xlim(self.xScale[0],self.xScale[1])
if self.choice == "Beyond Viewgraph":
if self.mMotion[0] >= 0.85*self.yScale[1]:
# if np.subtract(max(self.parVals[i]),min(self.parVals[i]))==0:
# self.yScale =
self.yScale = [int(ceil(min(self.parVals)-0.1*(max(self.parVals)-min(self.parVals)))),int(ceil(max(self.parVals)+0.1*(max(self.parVals)-min(self.parVals))))]
elif abs(self.mMotion[0]) <= abs(1.15*self.yScale[0]):
self.yScale = [int(ceil(min(self.parVals)-0.1*(max(self.parVals)-min(self.parVals)))),int(ceil(max(self.parVals)+0.1*(max(self.parVals)-min(self.parVals))))]
# self.yScale[0] -= (self.yScale[0]*0.5) if self.yScale[0]>0 else (self.yScale[0]*-0.5)
# self.yScale[1] += (self.yScale[1]*0.5) if self.yScale[1]>0 else (self.yScale[1]*-0.5)
elif self.choice == "Free":
if (max(self.parVals)-min(self.parVals))<=100:
self.yScale = [int(ceil(min(self.parVals)-0.1*(max(self.parVals)-min(self.parVals)))),int(ceil(max(self.parVals)+0.1*(max(self.parVals)-min(self.parVals))))]
# self.yScale = [int(ceil(-2*max(self.parVals[self.par]))),int(ceil(2*max(self.parVals[self.par])))]
else:
self.yScale = [int(ceil(min(self.parVals)-0.1*(max(self.parVals)-min(self.parVals)))),int(ceil(max(self.parVals)+0.1*(max(self.parVals)-min(self.parVals))))]
self.ax.set_ylim(self.yScale[0],self.yScale[1])
# for axes in self.figure.get_axes():
# axes.set_xlabel("RADI (arcsec)")
# axes.set_ylabel(self.par[i] + "( "+self.unitMeas[i]+ " )")
self.ax.set_xlabel("RADI (arcsec)")
self.ax.set_ylabel(self.par + "( "+self.unitMeas+ " )")
# self.ax.plot(self.parVals['RADI'], self.historyList[self.par][len(self.historyList[self.par])-1],'--bo')
self.ax.plot(self.parValRADI, self.parVals,'--bo')
# self.ax[i].set_title('Plot')
self.ax.set_xticks(self.parValRADI)
plt.tight_layout()
self.canvas.draw()
self.key = "No"
break
# else:
# print "false",counter
# counter += 1
class SMWindow(QtGui.QWidget):
def __init__(self,par,xVal,gwDict):
super(SMWindow, self).__init__()
self.xMinVal = xVal[0]
self.xMaxVal = xVal[1]
# self.yMinVal = yVal[0]
# self.yMaxVal = yVal[1]
self.par = par
self.gwDict = gwDict
self.prevParVal = ""
self.counter = 0
self.parameter = QtGui.QComboBox()
# self.parameter.setEditable(True)
self.parameter.addItem("Select Parameter")
for i in self.par:
self.parameter.addItem(i)
self.parameter.setAutoCompletion(True)
self.parameter.setStyleSheet("QComboBox { combobox-popup: 0; }");
self.parameter.setMaxVisibleItems(5)
index = self.parameter.findText("Select Parameter",QtCore.Qt.MatchFixedString)
self.parameter.setCurrentIndex(index)
self.parameter.currentIndexChanged.connect(self.onChangeEvent)
#run a for loop here to gather all they loaded parameters and populate as many text boxes
self.xLabel = QtGui.QLabel("RADI")
self.xMin = QtGui.QLineEdit()
self.xMin.setPlaceholderText("RADI min ("+str(self.xMinVal)+")")
self.xMax = QtGui.QLineEdit()
self.xMax.setPlaceholderText("RADI max ("+str(self.xMaxVal)+")")
self.xGrid = QtGui.QGridLayout()
self.xGrid.setSpacing(10)
self.xGrid.addWidget(self.xLabel,1,0)
self.xGrid.addWidget(self.xMin,2,0)
self.xGrid.addWidget(self.xMax,2,1)
self.yMin = QtGui.QLineEdit()
self.yMax = QtGui.QLineEdit()
self.yGrid = QtGui.QGridLayout()
self.yGrid.setSpacing(10)
self.yGrid.addWidget(self.parameter,1,0)
self.yGrid.addWidget(self.yMin,2,0)
self.yGrid.addWidget(self.yMax,2,1)
self.hbox = QtGui.QHBoxLayout()
self.hbox.addStretch(1)
self.radioFree = QtGui.QRadioButton("Free")
self.radioViewG = QtGui.QRadioButton("Beyond Viewgraph")
self.hbox.addWidget(self.radioFree)
self.hbox.addWidget(self.radioViewG)
self.hboxBtns = QtGui.QHBoxLayout()
self.hboxBtns.addStretch(1)
self.btnUpdate = QtGui.QPushButton('Update', self)
# self.btnUpdate.clicked.connect(self.updateScale)
self.btnCancel = QtGui.QPushButton('Cancel', self)
#self.btnCancel.clicked.connect(self.close)
self.hboxBtns.addWidget(self.btnUpdate)
self.hboxBtns.addWidget(self.btnCancel)
self.fbox = QtGui.QFormLayout()
self.fbox.addRow(self.xGrid)
self.fbox.addRow(self.yGrid)
# self.fbox.addRow(self.parameter)
# self.fbox.addRow(self.yhbox)
self.fbox.addRow(QtGui.QLabel("Scale Behaviour"),self.hbox)
self.fbox.addRow(self.hboxBtns)
self.setLayout(self.fbox)
self.setFocus()
self.setWindowTitle("Scale Manager")
self.setGeometry(300, 300, 300, 150)
center(self)
self.setFocus()
def onChangeEvent(self):
if not(len(self.yMin.text()) == 0):
self.gwDict[self.prevParVal][0][0] = int(str(self.yMin.text()))
self.gwDict[self.prevParVal][0][1] = int(str(self.yMax.text()))
self.gwDict[self.prevParVal][1] = "Free" if self.radioFree.isChecked() else "Beyond Viewgraph"
for i in self.par:
if str(self.parameter.currentText()) == i:
self.yMin.clear()
self.yMin.setPlaceholderText(i+" min ("+str(self.gwDict[i][0][0])+")")
self.yMax.clear()
self.yMax.setPlaceholderText(i+" max ("+str(self.gwDict[i][0][1])+")")
if str(self.gwDict[i][1]) == "Free":
self.radioFree.setChecked(True)
self.radioViewG.setChecked(False)
else:
self.radioFree.setChecked(False)
self.radioViewG.setChecked(True)
self.prevParVal = i
class ParamSpec(QtGui.QWidget):
def __init__(self,par):
super(ParamSpec, self).__init__()
self.par = par
self.parameterLabel = QtGui.QLabel("Parameter")
self.parameter = QtGui.QComboBox()
self.parameter.setEditable(True)
self.parameter.addItem("Select Parameter")
for i in self.par:
self.parameter.addItem(i)
self.parameter.setAutoCompletion(True)
self.parameter.setStyleSheet("QComboBox { combobox-popup: 0; }");
self.parameter.setMaxVisibleItems(6)
index = self.parameter.findText("Select Parameter",QtCore.Qt.MatchFixedString)
self.parameter.setCurrentIndex(index)
self.uMeasLabel = QtGui.QLabel("Unit Measurement")
self.unitMeasurement = QtGui.QLineEdit()
# self.unitMeasurement.setPlaceholderText("Unit Measurement")
self.grid = QtGui.QGridLayout()
self.grid.setSpacing(10)
self.grid.addWidget(self.parameterLabel, 1, 0)
self.grid.addWidget(self.parameter, 1, 1)
self.grid.addWidget(self.uMeasLabel, 2, 0)
self.grid.addWidget(self.unitMeasurement, 2, 1)
self.btnOK = QtGui.QPushButton('OK', self)
self.btnCancel = QtGui.QPushButton('Cancel', self)
self.hbox = QtGui.QHBoxLayout()
self.hbox.addStretch(1)
self.hbox.addWidget(self.btnOK)
self.hbox.addWidget(self.btnCancel)
self.grid.addLayout(self.hbox,3,1)
self.setLayout(self.grid)
self.setWindowTitle("Add Parameter")
self.setGeometry(300, 300, 300, 150)
center(self)
self.setFocus()
class mainWindow(QtGui.QMainWindow):
key = "Yes"
ncols = 1; nrows = 4
currAx = "ax"; plotArea = "par"; ax = "someAxis"
scaleChange = "No" ; scaleChangeR = 0
choice = "Beyond Viewgraph"; choiceR = 0
INSET = 'None'
par = ['VROT','SBR','INCL','PA']; parR = 0
unitMeas = ['km/s','Jy km/s/sqarcs','degrees','degrees']; unitMeasR = 0
tmpDeffile = os.getcwd() + "/tmpDeffile.def"
gwObjects = []
t = 0
scrollWidth = 0; scrollHeight = 0
before = 0; beforeR = 0
numPrecisionY = 0; numPrecisionYR = 0
numPrecisionX = 0; numPrecisionXR = 0
NUR = 0;NURR = 0
data = []
parVals = {}; parValsR = 0
historyList = {}; historyListR = {}
historyKeys = [['VROT','km/s',1]]; historyKeysR = [[]]
historyKeysDup = {'VROT':1}; historyKeysDupR = {}
xScale=[0,0]; xScaleR=[0,0]
yScale={'VROT':[0,0]}; yScaleR=[0,0];
redo = []
mPress=[-5]; mPressR = 0
mRelease=['None']; mReleaseR = 0
mMotion=[-5]; mMotionR = 0
# yVal = 0; yValR =0
def __init__(self):
super(mainWindow, self).__init__()
self.initUI()
def initUI(self):
self.showMaximized()
self.setWindowTitle('TiRiFiG')
self.cWidget = QtGui.QWidget(self)
self.setCentralWidget(self.cWidget)
self.vLayout = QtGui.QVBoxLayout(self.cWidget)
btn1 = QtGui.QPushButton('&Open File', self.cWidget) #you can ignore the parent and it will still work
btn1.setFixedSize(80,30)
# btn1.setFlat(True)
btn1.setToolTip('Open .def file')
btn1.clicked.connect(self.openDef)
self.vLayout.addWidget(btn1)
self.scrollArea = QtGui.QScrollArea(self.cWidget) # you can ignore the parent and it will still work
self.scrollArea.setWidgetResizable(True)
self.scrollAreaContent = QtGui.QWidget(self.scrollArea)
self.gridLayoutScroll = QtGui.QGridLayout(self.scrollAreaContent)
self.scrollArea.setWidget(self.scrollAreaContent)
self.vLayout.addWidget(self.scrollArea)
# print self.width(), self.height()
self.createActions()
self.createMenus()
def createActions(self):
self.exitAction = QtGui.QAction("&Exit", self)
self.exitAction.setShortcut("Ctrl+Q")
self.exitAction.setStatusTip('Leave the app')
self.exitAction.triggered.connect(self.quitApp)
self.openFile = QtGui.QAction("&Open File", self)
# self.openFile.setShortcut("Ctrl+O")
self.openFile.setStatusTip('Load .def file to be plotted')
self.openFile.triggered.connect(self.openDef)
#
self.saveChanges = QtGui.QAction("&Save", self)
## openEditor.setShortcut("Ctrl+S")
self.saveChanges.setStatusTip('Save changes to .def file')
self.saveChanges.triggered.connect(self.saveAll)
#
self.saveAsFile = QtGui.QAction("&Save as...",self)
self.saveAsFile.setStatusTip('Create another .def file with current paramater values')
self.saveAsFile.triggered.connect(self.saveAsAll)
#
self.openTextEditor = QtGui.QAction("&Open Text Editor...",self)
self.openTextEditor.setStatusTip('View the current open .def file in preferred text editor')
self.openTextEditor.triggered.connect(self.openEditor) # function yet to be written
#
self.startTF = QtGui.QAction("&Start TiriFiC",self)
self.startTF.setStatusTip('Starts TiRiFiC from terminal')
self.startTF.triggered.connect(self.startTiriFiC)
#
self.winSpec = QtGui.QAction("&Window Specification",self)
self.winSpec.setStatusTip('Determines the number of rows and columns in a plot')
self.winSpec.triggered.connect(self.setRowCol)
#
self.scaleMan = QtGui.QAction("&Scale Manager",self)
self.scaleMan.setStatusTip('Manages behaviour of scale and min and max values')
self.scaleMan.triggered.connect(self.SMobj)
#
self.paraDef = QtGui.QAction("&Parameter Definition",self)
self.paraDef.setStatusTip('Determines which parameter is plotted')
self.paraDef.triggered.connect(self.paraObj)
# self.sm.radioFree.clicked.connect(self.getOptF)
# self.sm.radioViewG.clicked.connect(self.getOptV)
# self.sm.btnUpdate.clicked.connect(self.updateScale)
# self.sm.btnCancel.clicked.connect(self.sm.close)
def createMenus(self):
mainMenu = self.menuBar()
self.fileMenu = mainMenu.addMenu('&File')
self.fileMenu.addAction(self.openFile)
self.fileMenu.addAction(self.saveChanges)
self.fileMenu.addAction(self.saveAsFile)
self.fileMenu.addAction(self.exitAction)
# editMenu = mainMenu.addMenu('&Edit')
self.runMenu = mainMenu.addMenu('&Run')
self.runMenu.addAction(self.openTextEditor)
self.runMenu.addAction(self.startTF)
self.prefMenu = mainMenu.addMenu('&Preferences')
self.prefMenu.addAction(self.scaleMan)
self.prefMenu.addAction(self.paraDef)
self.prefMenu.addAction(self.winSpec)
# def reloadSM(self):
# self.sm = SMWindow(self.gw.xScale[0],self.gw.xScale[1],self.gw.yScale[0],self.gw.yScale[1],self.gw.par)
# self.sm.show()
# self.sm.radioFree.clicked.connect(self.getOptF)
# self.sm.radioViewG.clicked.connect(self.getOptV)
# self.sm.btnUpdate.clicked.connect(self.updateScale)
# self.sm.btnCancel.clicked.connect(self.sm.close)
# self.ps.btnOK.clicked.connect(self.paramDef)
# self.ps.btnCancel.clicked.connect(self.close)
#
# def getOptF(self):
# self.gw.choice = "Free"
#
# def getOptV(self):
# self.gw.choice = "Beyond Viewgraph"
#
def quitApp(self):
if self.t != 0:
self.t.cancel()
| |
<reponame>sblack-usu/ulmo<filename>test/usgs_nwis_hdf5_test.py
from builtins import range
import os
import shutil
import datetime
import freezegun
import pandas
import pytest
from ulmo.usgs import nwis
import test_util
TEST_FILE_DIR = os.path.abspath('tmp')
@pytest.fixture
def test_file_path(request):
return os.path.join(TEST_FILE_DIR, request.function.__name__)
def setup_module(module):
if os.path.exists(TEST_FILE_DIR):
shutil.rmtree(TEST_FILE_DIR)
os.makedirs(TEST_FILE_DIR)
def teardown_module(module):
shutil.rmtree(TEST_FILE_DIR)
def test_update_site_list(test_file_path):
site_files = [
os.path.join('usgs','nwis', 'RI_daily.xml'),
os.path.join('usgs','nwis', 'RI_instantaneous.xml'),
]
for site_file in site_files:
test_site_file = test_util.get_test_file_path(site_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=test_site_file, autorepack=False)
sites = nwis.hdf5.get_sites(test_file_path)
assert len(sites) == 64
test_sites = {
# uses_dst == False
'01111410': {
'agency': 'USGS',
'code': '01111410',
'county': '44007',
'huc': '01090003',
'location': {
'latitude': '41.9409318',
'longitude': '-71.6481214',
'srs': 'EPSG:4326'
},
'name': 'CHEPACHET RIVER WEST OF GAZZA RD AT GAZZAVILLE, RI',
'state_code': '44',
'network': 'NWIS',
'site_type': 'ST',
'timezone_info': {
'default_tz': {
'abbreviation': 'EST',
'offset': '-05:00'
},
'dst_tz': {
'abbreviation': 'EDT',
'offset': '-04:00',
},
'uses_dst': False,
}
},
# only in RI_daily
'01116300': {
'agency': 'USGS',
'code': '01116300',
'county': '44007',
'huc': '01090004',
'location': {
'latitude': '41.7564892',
'longitude': '-71.4972824',
'srs': 'EPSG:4326'
},
'name': '<NAME> CRANSTON, RI',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True
},
},
# only in RI_instantaneous
'01115170': {
'agency': 'USGS',
'code': '01115170',
'county': '44007',
'huc': '01090004',
'location': {
'latitude': '41.84093269',
'longitude': '-71.584508',
'srs': 'EPSG:4326',
},
'name': 'MOSWANSICUT STREAM NR NORTH SCITUATE, RI',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True
},
},
}
for test_code, test_value in test_sites.items():
assert sites[test_code] == test_value
def test_update_site_list_with_changes(test_file_path):
site_files = [
(os.path.join('usgs','nwis', 'RI_daily.xml'), {
'agency': 'USGS',
'code': '01106000',
'county': '44005',
'huc': '01090002',
'location': {'latitude': '41.5584366',
'longitude': '-71.12921047',
'srs': 'EPSG:4326'},
'name': '<NAME> ADAMSVILLE, RI',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True}}),
(os.path.join('usgs','nwis', 'RI_daily_update.xml'), {
'agency': 'USGS',
'code': '01106000',
'county': '44005',
'huc': '01090002',
'location': {'latitude': '41.5584366',
'longitude': '-71.12921047',
'srs': 'EPSG:4326'},
'name': '<NAME>',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True}}),
]
for test_file, test_site in site_files:
test_site_file = test_util.get_test_file_path(test_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=test_site_file, autorepack=False)
sites = nwis.hdf5.get_sites(path=test_file_path)
test_code = test_site['code']
assert sites[test_code] == test_site
def test_sites_table_remains_unique(test_file_path):
test_file_path = os.path.join(test_file_path, "test.h5")
site_files = [
os.path.join('usgs','nwis', 'RI_daily.xml'),
os.path.join('usgs','nwis', 'RI_instantaneous.xml'),
]
for site_file in site_files:
test_site_file = test_util.get_test_file_path(site_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=test_site_file, autorepack=False)
with pandas.io.pytables.get_store(test_file_path) as store:
sites_df = store.select('sites')
assert len(sites_df) == len(set(sites_df.index))
def test_get_site(test_file_path):
site_code = '08068500'
site_data_file = os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code)
input_file = test_util.get_test_file_path(site_data_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=input_file, autorepack=False)
site = nwis.hdf5.get_site(site_code, path=test_file_path)
assert site == {
'agency': 'USGS',
'code': '08068500',
'county': '48339',
'huc': '12040102',
'location': {
'latitude': '30.11049517',
'longitude': '-95.4363275',
'srs': 'EPSG:4326'
},
'name': 'Spring Ck nr Spring, TX',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '48',
'timezone_info': {
'default_tz': {'abbreviation': 'CST', 'offset': '-06:00'},
'dst_tz': {'abbreviation': 'CDT', 'offset': '-05:00'},
'uses_dst': True
},
}
def test_get_sites_isnt_cached_between_calls(test_file_path):
test_file_path = os.path.join(test_file_path, "test.h5")
site_data_file = os.path.join('usgs', 'nwis', 'RI_daily.xml')
input_file = test_util.get_test_file_path(site_data_file)
nwis.hdf5.update_site_list(input_file=input_file, path=test_file_path,
autorepack=False)
sites = nwis.hdf5.get_sites(path=test_file_path)
assert len(sites) > 0
if os.path.exists(test_file_path):
os.remove(test_file_path)
sites = nwis.hdf5.get_sites(path=test_file_path)
assert len(sites) == 0
def test_empty_update_list_doesnt_error(test_file_path):
site_code = '98068500'
site_data_file = os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code)
input_file = test_util.get_test_file_path(site_data_file)
sites = nwis.hdf5.get_sites(path=test_file_path)
assert sites == {}
nwis.hdf5.update_site_list(path=test_file_path,
input_file=input_file, autorepack=False)
sites = nwis.hdf5.get_sites(path=test_file_path)
assert sites == {}
def test_get_site_for_missing_raises_lookup(test_file_path):
site_code = '08068500'
site_data_file = os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code)
input_file = test_util.get_test_file_path(site_data_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=input_file, autorepack=False)
with pytest.raises(LookupError):
missing_code = '98068500'
nwis.hdf5.get_site(missing_code, path=test_file_path)
def test_non_usgs_site(test_file_path):
site_code = '07335390'
site_data_file = test_util.get_test_file_path(
os.path.join('usgs','nwis', 'site_%s_instantaneous.xml' % site_code))
nwis.hdf5.update_site_data(site_code, period='all',
path=test_file_path, input_file=site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
assert len(site_data['00062:00011']['values']) > 1000
def test_remove_values(test_file_path):
from datetime import datetime
site_code = '07335390'
parameter_code = '00062:00011'
values_to_remove = {
parameter_code: ['2012-10-25 06:00', '2012-10-25 23:00',
'2012-10-30 15:00:00', datetime(2012, 11, 15, 13)]
}
site_data_file = test_util.get_test_file_path(
os.path.join('usgs','nwis', 'site_%s_instantaneous.xml' % site_code))
nwis.hdf5.update_site_data(site_code, period='all',
path=test_file_path, input_file=site_data_file, autorepack=False)
nwis.hdf5.remove_values(site_code, values_to_remove, path=test_file_path,
autorepack=False)
test_values = [
dict(datetime="2012-10-25T01:00:00-05:00", last_checked=None, last_modified=None, qualifiers="P", value=None),
dict(datetime="2012-10-25T18:00:00-05:00", last_checked=None, last_modified=None, qualifiers="P", value=None),
dict(datetime="2012-10-30T10:00:00-05:00", last_checked=None, last_modified=None, qualifiers="P", value=None),
dict(datetime="2012-11-15T07:00:00-06:00", last_checked=None, last_modified=None, qualifiers="P", value=None),
]
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
site_values = site_data[parameter_code]['values']
for test_value in test_values:
assert test_value in site_values
def test_remove_values_with_missing_code(test_file_path):
site_code = '08068500'
values_to_remove = {
'12345:0000': ['2010-01-01'],
'00010:00002': ['2012-12-10']
}
site_data_file = test_util.get_test_file_path(os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code))
nwis.hdf5.update_site_data(site_code, period='all', path=test_file_path,
input_file=site_data_file, autorepack=False)
nwis.hdf5.remove_values(site_code, values_to_remove, path=test_file_path,
autorepack=False)
test_value = dict(datetime="2012-12-10T00:00:00", last_checked=None, last_modified=None, qualifiers="P", value=None)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
site_values = site_data['00010:00002']['values']
assert test_value in site_values
def test_site_data_is_sorted(test_file_path):
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
values = site_data['00060:00003']['values']
assert all(
values[i]['datetime'] < values[i+1]['datetime']
for i in range(len(values) - 1))
def test_update_site_data_basic_data_parsing(test_file_path):
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
test_values = [
dict(datetime="1964-01-23T00:00:00", last_checked=None, last_modified=None, qualifiers="A", value='240'),
dict(datetime="1964-08-22T00:00:00", last_checked=None, last_modified=None, qualifiers="A", value='7.9'),
dict(datetime="2011-12-15T00:00:00", last_checked=None, last_modified=None, qualifiers="P Eqp", value='-999999'),
dict(datetime="2012-01-15T00:00:00", last_checked=None, last_modified=None, qualifiers="P e", value='97'),
dict(datetime="2012-06-05T00:00:00", last_checked=None, last_modified=None, qualifiers="P", value='74'),
]
site_values = site_data['00060:00003']['values']
for test_value in test_values:
assert test_value in site_values
def test_site_data_filter_by_one_parameter_code(test_file_path):
site_code = '08068500'
parameter_code = '00065:00003'
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
all_site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
site_data = nwis.hdf5.get_site_data(site_code, parameter_code=parameter_code, path=test_file_path)
assert site_data[parameter_code] == all_site_data[parameter_code]
def test_site_data_filter_by_multiple_parameter_codes(test_file_path):
site_code = '08068500'
parameter_code = ['00060:00003', '00065:00003', 'nonexistent']
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
all_site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
site_data = nwis.hdf5.get_site_data(site_code, parameter_code=parameter_code, path=test_file_path)
for code in parameter_code:
if code in list(site_data.keys()):
assert site_data[code] == all_site_data[code]
def test_site_data_filter_by_date_all_param(test_file_path):
site_code = '08068500'
parameter_code = '00065:00003'
date_str = '2000-01-01'
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path, start=date_str)
for par, data in site_data.items():
first_value = data['values'][0]
assert datetime.datetime.strptime(first_value["datetime"], '%Y-%m-%dT%H:%M:%S') >= datetime.datetime.strptime(date_str, '%Y-%m-%d')
def test_site_data_filter_by_date_single_param(test_file_path):
site_code = '08068500'
parameter_code = '00065:00003'
date_str = '2000-01-01'
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path, start=date_str)
first_value = site_data[parameter_code]['values'][0]
assert datetime.datetime.strptime(first_value["datetime"], '%Y-%m-%dT%H:%M:%S') >= datetime.datetime.strptime(date_str, '%Y-%m-%d')
def test_site_data_update_site_list_with_multiple_updates(test_file_path):
first_timestamp = '2013-01-01T01:01:01'
second_timestamp = '2013-02-02T02:02:02'
site_code = '01117800'
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
with test_util.mocked_urls(site_data_file):
with freezegun.freeze_time(first_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
last_value = site_data['00060:00003']['values'][-1]
assert first_timestamp == last_value['last_checked'] == last_value['last_modified']
update_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily_update.xml' % site_code))
with test_util.mocked_urls(update_data_file):
with freezegun.freeze_time(second_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
updated_site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
updated_values = updated_site_data['00060:00003']['values']
last_value = updated_values[-1]
assert last_value['last_checked'] != first_timestamp
assert second_timestamp == last_value['last_checked'] == last_value['last_modified']
original_timestamp = first_timestamp
modified_timestamp = second_timestamp
test_values = [
dict(datetime="1963-01-23T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="A", value='7'),
dict(datetime="1964-01-23T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="A", value='1017'),
dict(datetime="1964-01-24T00:00:00", last_checked=original_timestamp, last_modified=original_timestamp, qualifiers="A", value='191'),
dict(datetime="1964-08-22T00:00:00", last_checked=original_timestamp, last_modified=original_timestamp, qualifiers="A", value='7.9'),
dict(datetime="1969-05-26T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="A", value='1080'),
dict(datetime="2011-12-06T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='222'),
dict(datetime="2011-12-15T00:00:00", last_checked=original_timestamp, last_modified=original_timestamp, qualifiers="P Eqp", value='-999999'),
dict(datetime="2012-01-15T00:00:00", last_checked=original_timestamp, last_modified=original_timestamp, qualifiers="P e", value='97'),
dict(datetime="2012-05-25T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='56'),
dict(datetime="2012-05-26T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='55'),
dict(datetime="2012-05-27T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="A", value='52'),
dict(datetime="2012-05-28T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='48'),
dict(datetime="2012-05-29T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='1099'),
dict(datetime="2012-05-30T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='1098'),
dict(datetime="2012-05-31T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='41'),
dict(datetime="2012-06-01T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='37'),
dict(datetime="2012-06-02T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='1097'),
dict(datetime="2012-06-03T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='69'),
dict(datetime="2012-06-04T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='81'),
dict(datetime="2012-06-05T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='1071'),
dict(datetime="2012-06-06T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='2071'),
]
for test_value in test_values:
assert updated_values.index(test_value) >= 0
def test_last_refresh_gets_updated(test_file_path):
test_file_path = os.path.join(test_file_path, "test.h5")
first_timestamp = '2013-01-01T01:01:01'
second_timestamp = '2013-02-02T02:02:02'
forth_timestamp = '2013-03-03T03:03:03'
site_code = '01117800'
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
with test_util.mocked_urls(site_data_file):
with freezegun.freeze_time(first_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
first_refresh = nwis.hdf5._get_last_refresh(site_code, test_file_path)
assert first_refresh == first_timestamp
with freezegun.freeze_time(second_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
second_refresh = nwis.hdf5._get_last_refresh(site_code, test_file_path)
assert second_refresh == second_timestamp
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
third_refresh = nwis.hdf5._get_last_refresh(site_code, test_file_path)
assert third_refresh == None
with freezegun.freeze_time(forth_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
forth_refresh = nwis.hdf5._get_last_refresh(site_code, test_file_path)
assert forth_refresh is not None
assert forth_refresh == forth_timestamp
def test_update_site_data_updates_site_list(test_file_path):
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily_update.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
site = nwis.hdf5.get_site(site_code, path=test_file_path)
test_site = {
'agency': 'USGS',
'code': '01117800',
'county': '44009',
'huc': '01090005',
'location': {
'latitude': '41.5739884',
'longitude': '-71.72062318',
'srs': 'EPSG:4326'
},
'name': '<NAME>, RI',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True
}
}
assert site == test_site
def test_handles_empty_updates(test_file_path):
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily.xml' % site_code))
empty_site_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily_empty.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=empty_site_data_file, autorepack=False)
| |
if len(rec) == 7 else 1
self.demag.append(
{k: floatnan(r) for r, k in zip(rec, keys[i])})
if demag and 'segment' in demag:
self.demag[-1].update(demag)
else:
for v in [["Limit Hc value", "lim_hc"],
["Max. Magnetization", "br_max"],
["Demagnetisation", "segment"],
["Min. Magnetization", "br_min"],
["Area demagnetized", "area"]]:
if l.find(v[0]) > -1:
if len(rec) > 0:
if v[1] == 'segment':
demag[v[1]] = int(rec[-1])
else:
demag[v[1]] = floatnan(rec[-1])
break
if demag:
if not 'segment' in demag:
self.demag.append(demag)
def __read_short_circuit(self, content):
"read short circuit section"
if content[2].startswith('Time'):
m = []
for l in content:
rec = l.split()
if len(rec) == 5 and not rec[0].startswith('Time'):
m.append([floatnan(x) for x in rec])
m = np.array(m).T
self.scData['time'] = m[0].tolist()
self.scData['ia'] = m[1].tolist()
self.scData['ib'] = m[2].tolist()
self.scData['ic'] = m[3].tolist()
self.scData['torque'] = m[4].tolist()
return
l = content[-1]
rec = l.split()
self.scData['speed'] = floatnan(rec[0])
self.scData['ikd'] = floatnan(rec[1])
self.scData['tkd'] = floatnan(rec[2])
self.scData['iks'] = floatnan(rec[3])
self.scData['tks'] = floatnan(rec[4])
def __read_peak_winding_currents(self, content):
self.scData['peakWindingCurrents'] = [float(x)
for x in re.findall(r'[-0-9.]+',
''.join(content))]
def __read_general_machine_data(self, content):
for l in content:
if l.find('Armature Length [mm]:') > -1:
self.armatureLength = floatnan(l.split()[-1])
elif l.find('Magn. Fluss Psim RMS') > 0:
self.machine['psim'] = floatnan(l.split()[-1])
elif l.find('Number of Pole pairs') > -1:
self.machine['p'] = int(l.split()[-1])
elif l.find('Number of Poles simulated') > -1:
self.machine['p_sim'] = int(l.split()[-1])
elif l.find('Total Number of Slots') > -1:
self.machine['Q'] = int(l.split()[-1])
elif l.find('Number of Slot-Sides sim.') > -1:
self.machine['qs_sim'] = int(l.split()[-1])
elif l.find('POC-File used in calculation') > -1:
self.machine['pocfile'] = l.split(
':')[-1].strip().replace('\\', '\\\\')
elif l.find('MC-File used in calculation') > -1:
self.machine['mcfile'] = l.split(
)[-1].strip().replace('\\', '\\\\')
elif l.find('Rotation Fraction') > -1:
self.machine['period_frac'] = int(l.split()[-1])
def __read_characteristics(self, content):
characteristics = {}
for i, l in enumerate(content):
if l.startswith('[[***'):
break
for v in [['Voltage (operat. limit)', 'u1nom'],
['Current (operat. limit)', 'i1nom'],
['Angle I VS Up (speed = 0)', 'beta0'],
['Resistance stator winding', 'r1'],
['Inductance(I,Angle I-Up)', 'Ldnom'],
['Inductance(I,Angle I-Up)', 'Lqnom'],
['Power (operating limit)', 'Pnom'],
['Stator ewdg inductance', 'Le'],
['Stator external inductance Lex', 'Lex'],
['Magn. flux (RMS)', 'psimnom'],
['Effect. armature length', 'lfe'],
['Power cut-off speed NC', 'nc'],
['Number of Pole pairs', 'p'],
['Max. current (RMS)', 'i1max'],
['Rel. number wdg turns(wdg.1)', 'relw'],
['Number of Phases', 'm'],
['Min. speed', 'nmin'],
['Max. speed', 'nmax']]:
if l.find(v[0]) > -1:
rec = self.__findNums(l)
if len(rec) > 0:
characteristics[v[1]] = floatnan(rec[-1])
break
characteristics['ldq'] = {}
m = []
if content[i+1].startswith('Wdg'):
for k, l in enumerate(content[i+3:]):
if l.startswith('[[***'):
break
rec = l.split('\t')
if len(rec) == 6:
m.append([floatnan(x) for x in rec])
else:
k = -3
if m:
m = np.array(m).T
ncols = len(set(m[1]))
i1 = np.reshape(m[0], (-1, ncols)).T[0]
nrows = len(i1)
logger.info('characteristics ld-lq %d x %d', nrows, ncols)
characteristics['ldq'] = {
'beta': m[1][:ncols][::-1].tolist(),
'i1': i1.tolist(),
'ld': (characteristics['lfe']*np.reshape(
m[2], (nrows, ncols)).T[::-1]).tolist(),
'lq': (characteristics['lfe']*np.reshape(
m[3], (nrows, ncols)).T[::-1]).tolist(),
'psim': (characteristics['lfe']*np.reshape(
m[4], (nrows, ncols)).T[::-1]).tolist(),
'torque': (characteristics['lfe']*np.reshape(
m[5], (nrows, ncols)).T[::-1]).tolist()}
m = []
columns = [['n', 'id', 'iq', 'torque', 'p2'],
['beta', 'cos_phi', 'u1', 'um'],
['lang', 'ud', 'uq', 'i1'],
['lang', 'ld', 'lq', 'psim']]
nsec = 0
characteristics['speed_torque'] = {}
for l in content[k+i+6:]:
if l.startswith('[[***'):
break
if not l:
continue
if l.startswith('Speed') and m:
if nsec == 0:
m = np.array(m).T
else:
m = np.array(m).T[1:]
for j, k in enumerate(columns[nsec]):
characteristics['speed_torque'][k] = m[j].tolist()
m = []
nsec += 1
else:
rec = self.__findNums(l)
if len(rec) > 3:
m.append([floatnan(x) for x in rec])
self.characteristics.append(characteristics)
def __read_flux(self, content):
"read and append flux section"
f = {'displ': [], 'flux_k': [], 'voltage_dpsi': [],
'voltage_four': [], 'current_k': [], 'voltage_ir': []}
for l in content:
rec = l.split()
if l.startswith('Flux-Area'):
areas = self.__findNums(l)
if not areas:
continue
self.wdg = areas[0] if len(areas) == 1 else '{}-{}'.format(
areas[0], areas[1])
if self.wdg not in self.flux:
self.flux[self.wdg] = []
elif len(rec) == 7:
f['displ'].append(floatnan(rec[1].strip()))
f['flux_k'].append(floatnan(rec[2].strip()))
f['voltage_dpsi'].append(floatnan(rec[3].strip()))
f['voltage_four'].append(floatnan(rec[4].strip()))
f['current_k'].append(floatnan(rec[5].strip()))
f['voltage_ir'].append(floatnan(rec[6].strip()))
elif rec and rec[0].startswith('['):
f['displunit'] = re.search(r"\[([^\]]*)\]", l).group(1).strip()
self.flux[self.wdg].append(f)
self._fft = Reader.__read_flux_fft
def __read_linear_force(self, content):
"read and append linear force section"
cosys = 'xy'
f = {'displ': [], 'magnet_1': [], 'force_x': [],
'force_y': [], 'f_idpsi': []}
for l in content:
rec = self.__findNums(l)
if len(rec) > 4:
f['displ'].append(floatnan(rec[1].strip()))
f['magnet_1'].append(floatnan(rec[2].strip()))
f['force_x'].append(floatnan(rec[3].strip()))
f['force_y'].append(floatnan(rec[4].strip()))
# TODO f['f_idpsi'].append(floatnan(rec[5].strip()))
elif l.split()[-1] == 'Force_Z':
cosys = 'rz'
if cosys == 'rz':
f['force_r'] = f.pop('force_x')
f['force_z'] = f.pop('force_y')
if len(f['displ']) > 0:
if cosys == 'xy':
ripple = [max(f['force_x']) - min(f['force_x']),
max(f['force_y']) - min(f['force_y'])]
f['ripple_x'] = ripple[0]
f['ripple_y'] = ripple[1]
else:
ripple = [max(f['force_r']) - min(f['force_r']),
max(f['force_z']) - min(f['force_z'])]
f['ripple_r'] = ripple[0]
f['ripple_z'] = ripple[1]
self.linearForce.append(f)
self._fft = Reader.__read_linearForce_fft
def __read_linearForce_fft(self, content):
"read and append linear force fft section"
if not self._fft:
return
linearForce_fft = dict(order=[], force=[], force_perc=[],
a=[], b=[])
for l in content:
rec = self.__findNums(l)
if len(rec) > 2:
linearForce_fft['order'].append(int(rec[0].strip()))
linearForce_fft['force'].append(floatnan(rec[1].strip()))
linearForce_fft['force_perc'].append(floatnan(rec[2].strip()))
if len(rec) > 4:
linearForce_fft['a'].append(floatnan(rec[3].strip()))
linearForce_fft['b'].append(floatnan(rec[4].strip()))
else:
linearForce_fft['a'].append(0.0)
linearForce_fft['b'].append(0.0)
if linearForce_fft['order']:
self.linearForce_fft.append(linearForce_fft)
def __read_fft(self, content):
if self._fft:
self._fft(self, content)
def __read_flux_fft(self, content):
"read and append flux fft section"
flux_fft = dict(order=[], flux=[], flux_perc=[],
voltage=[], voltage_perc=[], a=[], b=[])
for l in content:
rec = self.__findNums(l)
if len(rec) > 4:
flux_fft['order'].append(int(rec[0].strip()))
flux_fft['flux'].append(floatnan(rec[1].strip()))
flux_fft['flux_perc'].append(floatnan(rec[2].strip()))
flux_fft['voltage'].append(floatnan(rec[3].strip()))
flux_fft['voltage_perc'].append(floatnan(rec[4].strip()))
if len(rec) > 3:
flux_fft['a'].append(floatnan(rec[3].strip())*1e-2)
flux_fft['b'].append(floatnan(rec[4].strip())*1e-2)
else:
flux_fft['a'].append(0.0)
flux_fft['b'].append(0.0)
if self.wdg not in self.flux_fft:
self.flux_fft[self.wdg] = []
self.flux_fft[self.wdg].append(flux_fft)
def __read_airgapInduction(self, content):
"read and append airgapInduction section"
import scipy.integrate as si
import math
logger.debug('read airgapInduction')
i1beta = False # format is either i1/beta or id/iq
if 'i1' in self.ldq and 'beta' in self.ldq:
i1 = self.ldq['i1']
beta = self.ldq['beta']
elif 'id' in self.psidq and 'iq' in self.psidq:
id = self.psidq['id']
iq = self.psidq['iq']
else:
i1 = []
beta = []
id = []
iq = []
an = [[], [], [], []]
bn = [[], [], [], []]
Bm = []
Ba = []
for line in content[5:]:
if line.startswith('[****'):
break
if line.startswith("Current"):
i1beta = True
continue
if line.startswith("C_STEP"):
return # ignore this section
try:
rec = self.__findNums(line)
if len(rec) == 10:
f = [float(s) for s in rec]
an[0].append(f[2])
bn[0].append(f[3])
an[1].append(f[4])
bn[1].append(f[5])
an[2].append(f[6])
bn[2].append(f[7])
an[3].append(f[8])
bn[3].append(f[9])
a = (an[0][-1], an[1][-1], an[2][-1], an[3][-1])
b = (bn[0][-1], bn[1][-1], bn[2][-1], bn[3][-1])
def B(x):
return sum(a[i] * np.cos((2 * i + 1) * x) +
b[i] * np.sin((2 * i + 1) * x)
for i in (0, 1, 2, 3) if not math.isnan(a[i]) and not math.isnan(b[i]))
def Bdc(x):
return abs(B(x))
Ba.append(si.quad(Bdc, 0, 2 * np.pi,
limit=250)[0] / (2 * np.pi))
Bm.append(max([B(x) for x in np.linspace(
0, 2 * np.pi, 100)]))
except Exception as e:
logger.debug("Conversion error: {} :: {}".format(e, line))
self.airgapInduction = dict()
if i1beta:
ncols = len(beta)
if ncols:
nrows = len(Ba)//ncols
else:
nrows = len(i1)
self.airgapInduction['beta'] = beta
self.airgapInduction['i1'] = i1[:nrows]
else:
ncols = len(iq)
self.airgapInduction['iq'] = iq
self.airgapInduction['id'] = id
nrows = len(self.airgapInduction['id'])
try:
self.airgapInduction['an'] = [np.reshape(an[j][:nrows*ncols],
(nrows, ncols)).T.tolist()
for j in (0, 1, 2, 3)]
self.airgapInduction['bn'] = [np.reshape(bn[j][:nrows*ncols],
(nrows, ncols)).T.tolist()
for j in (0, 1, 2, 3)]
self.airgapInduction['Bm'] = np.reshape(Bm[:nrows*ncols],
(nrows, ncols)).T.tolist()
self.airgapInduction['Ba'] = np.reshape(Ba[:nrows*ncols],
(nrows, ncols)).T.tolist()
# check for nan:
if len(self.airgapInduction['an'][0]) > 1 and \
len(self.airgapInduction['an'][0][0]) != len(self.airgapInduction['an'][0][1]):
self.airgapInduction['an'] = [self.airgapInduction['an'][i][1:]
for i in range(3)]
self.airgapInduction['bn'] = [self.airgapInduction['bn'][i][1:]
for i in range(3)]
self.airgapInduction['Ba'] = self.airgapInduction['Ba'][1:]
self.airgapInduction['Bm'] = self.airgapInduction['Bm'][1:]
if len(self.airgapInduction['an'][0]) > 1 and \
len(list(filter(lambda x: np.isnan(x),
list(zip(*self.airgapInduction['an'][0]))[0]))) > 0:
self.airgapInduction['an'] = [self.airgapInduction['an'][i][1:]
for i in range(3)]
self.airgapInduction['bn'] = [self.airgapInduction['bn'][i][1:]
for i in range(3)]
self.airgapInduction['Ba'] = zip(*zip(*self.airgapInduction['Ba'])
[1:])
self.airgapInduction['Bm'] = zip(*zip(*self.airgapInduction['Bm'])
[1:])
except ValueError:
print(self.airgapInduction['i1'])
def __read_torque_force(self, content):
"read and append force/torque section"
torque = {
'angle': [],
'current_1': [],
'force_x': [],
'force_y': [],
't_idpsi': [],
'torque': []}
for l in content:
rec = self.__findNums(l)
if len(rec) == 7:
torque['angle'].append(floatnan(rec[1].strip()))
torque['current_1'].append(floatnan(rec[2].strip()))
torque['force_x'].append(floatnan(rec[3].strip()))
torque['force_y'].append(floatnan(rec[4].strip()))
torque['t_idpsi'].append(floatnan(rec[5].strip()))
torque['torque'].append(floatnan(rec[6].strip()))
if len(torque['angle']) > 0:
ripple = max(torque['torque']) - min(torque['torque'])
torque['ripple'] = ripple
self.torque.append(torque)
self._fft = Reader.__read_torque_force_fft
def __read_torque_force_fft(self, content):
"read and append force/torque fft section"
columns = content[3].split()
if len(columns) > 1 and columns[1] == 'Torque':
torque_fft = dict(order=[],
torque=[],
torque_perc=[],
a=[],
b=[])
for l in content:
rec = self.__findNums(l)
if len(rec) > 2:
torque_fft['order'].append(int(rec[0].strip()))
torque_fft['torque'].append(floatnan(rec[1].strip()))
torque_fft['torque_perc'].append(floatnan(rec[2].strip()))
if len(rec) > 3:
torque_fft['a'].append(floatnan(rec[3].strip())*1e-2)
torque_fft['b'].append(floatnan(rec[4].strip())*1e-2)
else:
torque_fft['a'].append(0.0)
torque_fft['b'].append(0.0)
if torque_fft['order']:
self.torque_fft.append(torque_fft)
def __read_power_situation(self, content):
"read and append power situation section"
ps = dict()
beta = ''
| |
# can only "look back" in terms of UnitOn variable state (t-1) or this_lag time periods - whichever is smallest.
# the rest of the time period is captured in the unit T0 state, and is handled in the logic above.
return m.StartupCost[g, t] >= this_cost * m.UnitOn[g, t] - sum( (this_cost)*m.UnitOn[g, t - k] for k in range(1, min(t, startup_lags[0]+1))) \
- sum( sum( (this_cost - startup_costs[j])*m.UnitOn[g, t-k] for k in range(startup_lags[j]+1, startup_lags[j+1]+1) if k < t )\
for j in range(0, i-1) )
model.ComputeStartupCosts = Constraint(model.StartupCostsIndexSet, rule=compute_startup_costs_rule)
return
@add_model_attr(component_name, requires = {'data_loader': None,
'status_vars': None,
})
def CA_startup_costs(model, add_startup_cost_var=True):
'''
Equations (12) and (13) from
<NAME>. and <NAME>. (2006) A Computationally Efficient Mixed-Integer
Liner Formulation for the Thermal Unit Commitment Problem. IEEE Transactions
on Power Systems, Vol. 21, No. 3, Aug 2006.
'''
if add_startup_cost_var:
model.StartupCost = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals)
############################################################
# compute the per-generator, per-time period startup costs #
############################################################
def startup_costs_index_set_generator(m):
return ((g,t,i) for t in m.TimePeriods for g in m.ThermalGenerators for i in m.StartupCostIndices[g])
model.StartupCostsIndexSet = Set(initialize=startup_costs_index_set_generator, dimen=3)
def compute_startup_costs_rule(m, g, t, i):
# irios, Nov 18th: I had to change this because the last version didn't work with the last update in coopr.
this_lag = list(m.ScaledStartupLags[g])[i-1]
this_cost = list(m.StartupCosts[g])[i-1]
generator_t0_state = int(round(value(m.UnitOnT0State[g])/value(m.TimePeriodLengthHours)))
# if the lag involves time periods preceding t=1, then we need to determine if the T0
# state is consistent with the lag - if not, we can skip generation of the constraint.
assert(m.InitialTime == 1)
if this_lag >= t:
if generator_t0_state >= 0:
# the unit has been on - we can't meet the target lag.
return Constraint.Skip
time_diff = this_lag - t + 1
if (-generator_t0_state) < time_diff:
# the generator has not been off for a sufficient number of time periods.
return Constraint.Skip
# can only "look back" in terms of UnitOn variable state (t-1) or this_lag time periods - whichever is smallest.
# the rest of the time period is captured in the unit T0 state, and is handled in the logic above.
return m.StartupCost[g, t] >= this_cost * (m.UnitOn[g, t] - sum(m.UnitOn[g, t - k] for k in range(1, min(t, this_lag+1))))
model.ComputeStartupCosts = Constraint(model.StartupCostsIndexSet, rule=compute_startup_costs_rule)
return
@add_model_attr(component_name, requires = {'data_loader': None,
'status_vars': ['garver_3bin_vars', 'garver_3bin_relaxed_stop_vars', 'garver_2bin_vars', 'ALS_state_transition_vars'],
})
def ALS_startup_costs(model, add_startup_cost_var=True):
'''
Equation (19) from
<NAME>, <NAME>, and <NAME>. A state transition MIP
formulation for the unit commitment problem. IEEE Transactions on Power
Systems, 33(1):736–748, 2018.
'''
model.StartupCostOverHot = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals)
############################################################
# compute the per-generator, per-time period startup costs #
############################################################
def startup_costs_index_set_generator(m):
return ((g,t,i) for t in m.TimePeriods for g in m.ThermalGenerators for i in m.StartupCostIndices[g])
model.StartupCostsIndexSet = Set(initialize=startup_costs_index_set_generator, dimen=3)
def compute_startup_costs_rule(m, g, t, i):
## we don't need these for the first startup cost
if i == 1:
return Constraint.Skip
# irios, Nov 18th: I had to change this because the last version didn't work with the last update in coopr.
this_lag = list(m.ScaledStartupLags[g])[i-1]
this_cost = list(m.StartupCosts[g])[i-1]
startup_lags = list(m.ScaledStartupLags[g])
startup_costs = list(m.StartupCosts[g])
generator_t0_state = int(round(value(m.UnitOnT0State[g])/value(m.TimePeriodLengthHours)))
# if the lag involves time periods preceding t=1, then we need to determine if the T0
# state is consistent with the lag - if not, we can skip generation of the constraint.
assert(m.InitialTime == 1)
if this_lag >= t:
if generator_t0_state >= 0:
# the unit has been on - we can't meet the target lag.
return Constraint.Skip
time_diff = this_lag - t + 1
if (-generator_t0_state) < time_diff:
# the generator has not been off for a sufficient number of time periods.
return Constraint.Skip
# can only "look back" in terms of UnitOn variable state (t-1) or this_lag time periods - whichever is smallest.
# the rest of the time period is captured in the unit T0 state, and is handled in the logic above.
if t-this_lag == 0:
return m.StartupCostOverHot[g, t] >= (this_cost - m.StartupCosts[g].first())*(
m.UnitStart[g, t] \
- sum( m.UnitStart[g,t-i] for i in range(m.ScaledStartupLags[g].first(), this_lag) ) \
- m.UnitOnT0[g] )
elif t-this_lag < 0:
return m.StartupCostOverHot[g, t] >= (this_cost - m.StartupCosts[g].first())*(
m.UnitStart[g, t] \
- sum( m.UnitStart[g,t-i] for i in range(m.ScaledStartupLags[g].first(), min(t,this_lag)) ) )
else:
return m.StartupCostOverHot[g, t] >= (this_cost - m.StartupCosts[g].first())*(
m.UnitStart[g, t] \
- sum( m.UnitStart[g,t-i] for i in range(m.ScaledStartupLags[g].first(), this_lag) ) \
- m.UnitOn[g,t-this_lag] )
model.ComputeStartupCostsOverHot = Constraint(model.StartupCostsIndexSet, rule=compute_startup_costs_rule)
if add_startup_cost_var:
model.StartupCost = Var( model.ThermalGenerators, model.TimePeriods, within=Reals)
def compute_startup_costs_expr_rule(m, g, t):
return m.StartupCost[g,t] == m.StartupCostOverHot[g,t] + m.StartupCosts[g].first()*m.UnitStart[g,t]
model.ComputeStartupCosts = Constraint( model.ThermalGenerators, model.TimePeriods, rule=compute_startup_costs_expr_rule)
@add_model_attr(component_name, requires = {'data_loader': None,
'status_vars': ['garver_3bin_vars', 'garver_3bin_relaxed_stop_vars', 'garver_2bin_vars', 'ALS_state_transition_vars'],
})
def YZJMXD_startup_costs(model, add_startup_cost_var=True):
'''
Equations (34) from
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
A novel projected two-binary-variable formulation for unit commitment in
power systems. Applied energy, 187:732–745, 2017.
'''
model.StartupCostOverHot = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals)
############################################################
# compute the per-generator, per-time period startup costs #
############################################################
def startup_costs_index_set_generator(m):
return ((g,t,i) for t in m.TimePeriods for g in m.ThermalGenerators for i in m.StartupCostIndices[g])
model.StartupCostsIndexSet = Set(initialize=startup_costs_index_set_generator, dimen=3)
def compute_startup_costs_rule(m, g, t, i):
## we don't need these for the first startup cost
if i == 1:
return Constraint.Skip
# irios, Nov 18th: I had to change this because the last version didn't work with the last update in coopr.
this_lag = list(m.ScaledStartupLags[g])[i-1]
this_cost = list(m.StartupCosts[g])[i-1]
startup_lags = list(m.ScaledStartupLags[g])
startup_costs = list(m.StartupCosts[g])
generator_t0_state = int(round(value(m.UnitOnT0State[g])/value(m.TimePeriodLengthHours)))
# if the lag involves time periods preceding t=1, then we need to determine if the T0
# state is consistent with the lag - if not, we can skip generation of the constraint.
assert(m.InitialTime == 1)
if this_lag >= t:
if generator_t0_state >= 0:
# the unit has been on - we can't meet the target lag.
return Constraint.Skip
time_diff = this_lag - t + 1
if (-generator_t0_state) < time_diff:
# the generator has not been off for a sufficient number of time periods.
return Constraint.Skip
# can only "look back" in terms of UnitOn variable state (t-1) or this_lag time periods - whichever is smallest.
# the rest of the time period is captured in the unit T0 state, and is handled in the logic above.
return m.StartupCostOverHot[g, t] >= (this_cost - m.StartupCosts[g].first())*(
m.UnitStart[g, t] \
- sum( m.UnitOn[g,t-i] for i in range(1, min(this_lag+1,t)) ) )
model.ComputeStartupCostsOverHot = Constraint(model.StartupCostsIndexSet, rule=compute_startup_costs_rule)
if add_startup_cost_var:
model.StartupCost = Var( model.ThermalGenerators, model.TimePeriods, within=Reals)
def compute_startup_costs_expr_rule(m, g, t):
return m.StartupCost[g,t] == m.StartupCostOverHot[g,t] + m.StartupCosts[g].first()*m.UnitStart[g,t]
model.ComputeStartupCosts = Constraint( model.ThermalGenerators, model.TimePeriods, rule=compute_startup_costs_expr_rule)
@add_model_attr(component_name, requires = {'data_loader': None,
'status_vars': ['garver_3bin_vars', 'garver_3bin_relaxed_stop_vars', 'garver_2bin_vars', 'ALS_state_transition_vars'],
})
def KOW_3bin_startup_costs2(model, add_startup_cost_var=True):
'''
This is KOW_3bin_startup_costs but like Atakan et. al. and Yang et. al.,
we eliminate the constraints associated with the hottest start and
consider a hot start directly in the objective function
'''
model.StartupCostOverHot = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals)
############################################################
# compute the per-generator, per-time period startup costs #
############################################################
def startup_costs_index_set_generator(m):
return ((g,t,i) for t in m.TimePeriods for g in m.ThermalGenerators for i in m.StartupCostIndices[g])
model.StartupCostsIndexSet = Set(initialize=startup_costs_index_set_generator, dimen=3)
def compute_startup_costs_rule(m, g, t, i):
## we don't need these for the first startup cost
if i == 1:
return Constraint.Skip
# irios, Nov 18th: I had to change this because the last version didn't work with the last update in coopr.
this_lag = list(m.ScaledStartupLags[g])[i-1]
this_cost = list(m.StartupCosts[g])[i-1]
startup_lags = list(m.ScaledStartupLags[g])
startup_costs = list(m.StartupCosts[g])
generator_t0_state = int(round(value(m.UnitOnT0State[g])/value(m.TimePeriodLengthHours)))
# if the lag involves time periods preceding t=1, then we need to determine if the T0
# state is consistent with the lag - if not, we can skip generation of the constraint.
assert(m.InitialTime == 1)
if this_lag >= t:
if generator_t0_state >= 0:
# the unit has been on - we can't meet the target lag.
return Constraint.Skip
time_diff = this_lag - t + 1
| |
<filename>data/model/oci/test/test_oci_manifest.py
import json
from playhouse.test_utils import assert_query_count
from app import docker_v2_signing_key, storage
from digest.digest_tools import sha256_digest
from data.database import (Tag, ManifestBlob, ImageStorageLocation, ManifestChild,
ImageStorage, Image, RepositoryTag, get_epoch_timestamp_ms)
from data.model.oci.manifest import lookup_manifest, get_or_create_manifest
from data.model.oci.tag import filter_to_alive_tags, get_tag
from data.model.oci.shared import get_legacy_image_for_manifest
from data.model.oci.label import list_manifest_labels
from data.model.oci.retriever import RepositoryContentRetriever
from data.model.repository import get_repository, create_repository
from data.model.image import find_create_or_link_image
from data.model.blob import store_blob_record_and_temp_link
from data.model.storage import get_layer_path
from image.docker.schema1 import DockerSchema1ManifestBuilder, DockerSchema1Manifest
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
from image.docker.schema2.list import DockerSchema2ManifestListBuilder
from util.bytes import Bytes
from test.fixtures import *
def test_lookup_manifest(initialized_db):
found = False
for tag in filter_to_alive_tags(Tag.select()):
found = True
repo = tag.repository
digest = tag.manifest.digest
with assert_query_count(1):
assert lookup_manifest(repo, digest) == tag.manifest
assert found
for tag in Tag.select():
repo = tag.repository
digest = tag.manifest.digest
with assert_query_count(1):
assert lookup_manifest(repo, digest, allow_dead=True) == tag.manifest
def test_lookup_manifest_dead_tag(initialized_db):
dead_tag = Tag.select().where(Tag.lifetime_end_ms <= get_epoch_timestamp_ms()).get()
assert dead_tag.lifetime_end_ms <= get_epoch_timestamp_ms()
assert lookup_manifest(dead_tag.repository, dead_tag.manifest.digest) is None
assert (lookup_manifest(dead_tag.repository, dead_tag.manifest.digest, allow_dead=True) ==
dead_tag.manifest)
def create_manifest_for_testing(repository, differentiation_field='1'):
# Populate a manifest.
layer_json = json.dumps({
'config': {},
"rootfs": {
"type": "layers",
"diff_ids": []
},
"history": [],
})
# Add a blob containing the config.
_, config_digest = _populate_blob(layer_json)
remote_digest = sha256_digest('something')
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest(config_digest, len(layer_json))
builder.add_layer(remote_digest, 1234, urls=['http://hello/world' + differentiation_field])
manifest = builder.build()
created = get_or_create_manifest(repository, manifest, storage)
assert created
return created.manifest, manifest
def test_lookup_manifest_child_tag(initialized_db):
repository = create_repository('devtable', 'newrepo', None)
manifest, manifest_impl = create_manifest_for_testing(repository)
# Mark the hidden tag as dead.
hidden_tag = Tag.get(manifest=manifest, hidden=True)
hidden_tag.lifetime_end_ms = hidden_tag.lifetime_start_ms
hidden_tag.save()
# Ensure the manifest cannot currently be looked up, as it is not pointed to by an alive tag.
assert lookup_manifest(repository, manifest.digest) is None
assert lookup_manifest(repository, manifest.digest, allow_dead=True) is not None
# Populate a manifest list.
list_builder = DockerSchema2ManifestListBuilder()
list_builder.add_manifest(manifest_impl, 'amd64', 'linux')
manifest_list = list_builder.build()
# Write the manifest list, which should also write the manifests themselves.
created_tuple = get_or_create_manifest(repository, manifest_list, storage)
assert created_tuple is not None
# Since the manifests are not yet referenced by a tag, they cannot be found.
assert lookup_manifest(repository, manifest.digest) is None
assert lookup_manifest(repository, manifest_list.digest) is None
# Unless we ask for "dead" manifests.
assert lookup_manifest(repository, manifest.digest, allow_dead=True) is not None
assert lookup_manifest(repository, manifest_list.digest, allow_dead=True) is not None
def _populate_blob(content):
digest = str(sha256_digest(content))
location = ImageStorageLocation.get(name='local_us')
blob = store_blob_record_and_temp_link('devtable', 'newrepo', digest, location,
len(content), 120)
storage.put_content(['local_us'], get_layer_path(blob), content)
return blob, digest
@pytest.mark.parametrize('schema_version', [
1,
2,
])
def test_get_or_create_manifest(schema_version, initialized_db):
repository = create_repository('devtable', 'newrepo', None)
expected_labels = {
'Foo': 'Bar',
'Baz': 'Meh',
}
layer_json = json.dumps({
'id': 'somelegacyid',
'config': {
'Labels': expected_labels,
},
"rootfs": {
"type": "layers",
"diff_ids": []
},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
],
})
# Create a legacy image.
find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
# Add a blob containing the config.
_, config_digest = _populate_blob(layer_json)
# Add a blob of random data.
random_data = 'hello world'
_, random_digest = _populate_blob(random_data)
# Build the manifest.
if schema_version == 1:
builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
builder.add_layer(random_digest, layer_json)
sample_manifest_instance = builder.build(docker_v2_signing_key)
elif schema_version == 2:
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest(config_digest, len(layer_json))
builder.add_layer(random_digest, len(random_data))
sample_manifest_instance = builder.build()
# Create a new manifest.
created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage)
created = created_manifest.manifest
newly_created = created_manifest.newly_created
assert newly_created
assert created is not None
assert created.media_type.name == sample_manifest_instance.media_type
assert created.digest == sample_manifest_instance.digest
assert created.manifest_bytes == sample_manifest_instance.bytes.as_encoded_str()
assert created_manifest.labels_to_apply == expected_labels
# Verify it has a temporary tag pointing to it.
assert Tag.get(manifest=created, hidden=True).lifetime_end_ms
# Verify the legacy image.
legacy_image = get_legacy_image_for_manifest(created)
assert legacy_image is not None
assert legacy_image.storage.content_checksum == random_digest
# Verify the linked blobs.
blob_digests = [mb.blob.content_checksum for mb
in ManifestBlob.select().where(ManifestBlob.manifest == created)]
assert random_digest in blob_digests
if schema_version == 2:
assert config_digest in blob_digests
# Retrieve it again and ensure it is the same manifest.
created_manifest2 = get_or_create_manifest(repository, sample_manifest_instance, storage)
created2 = created_manifest2.manifest
newly_created2 = created_manifest2.newly_created
assert not newly_created2
assert created2 == created
# Ensure it again has a temporary tag.
assert Tag.get(manifest=created2, hidden=True).lifetime_end_ms
# Ensure the labels were added.
labels = list(list_manifest_labels(created))
assert len(labels) == 2
labels_dict = {label.key: label.value for label in labels}
assert labels_dict == expected_labels
def test_get_or_create_manifest_invalid_image(initialized_db):
repository = get_repository('devtable', 'simple')
latest_tag = get_tag(repository, 'latest')
parsed = DockerSchema1Manifest(Bytes.for_string_or_unicode(latest_tag.manifest.manifest_bytes),
validate=False)
builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
builder.add_layer(parsed.blob_digests[0], '{"id": "foo", "parent": "someinvalidimageid"}')
sample_manifest_instance = builder.build(docker_v2_signing_key)
created_manifest = get_or_create_manifest(repository, sample_manifest_instance, storage)
assert created_manifest is None
def test_get_or_create_manifest_list(initialized_db):
repository = create_repository('devtable', 'newrepo', None)
expected_labels = {
'Foo': 'Bar',
'Baz': 'Meh',
}
layer_json = json.dumps({
'id': 'somelegacyid',
'config': {
'Labels': expected_labels,
},
"rootfs": {
"type": "layers",
"diff_ids": []
},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
],
})
# Create a legacy image.
find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
# Add a blob containing the config.
_, config_digest = _populate_blob(layer_json)
# Add a blob of random data.
random_data = 'hello world'
_, random_digest = _populate_blob(random_data)
# Build the manifests.
v1_builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
v1_builder.add_layer(random_digest, layer_json)
v1_manifest = v1_builder.build(docker_v2_signing_key).unsigned()
v2_builder = DockerSchema2ManifestBuilder()
v2_builder.set_config_digest(config_digest, len(layer_json))
v2_builder.add_layer(random_digest, len(random_data))
v2_manifest = v2_builder.build()
# Write the manifests.
v1_created = get_or_create_manifest(repository, v1_manifest, storage)
assert v1_created
assert v1_created.manifest.digest == v1_manifest.digest
v2_created = get_or_create_manifest(repository, v2_manifest, storage)
assert v2_created
assert v2_created.manifest.digest == v2_manifest.digest
# Build the manifest list.
list_builder = DockerSchema2ManifestListBuilder()
list_builder.add_manifest(v1_manifest, 'amd64', 'linux')
list_builder.add_manifest(v2_manifest, 'amd32', 'linux')
manifest_list = list_builder.build()
# Write the manifest list, which should also write the manifests themselves.
created_tuple = get_or_create_manifest(repository, manifest_list, storage)
assert created_tuple is not None
created_list = created_tuple.manifest
assert created_list
assert created_list.media_type.name == manifest_list.media_type
assert created_list.digest == manifest_list.digest
# Ensure the child manifest links exist.
child_manifests = {cm.child_manifest.digest: cm.child_manifest
for cm in ManifestChild.select().where(ManifestChild.manifest == created_list)}
assert len(child_manifests) == 2
assert v1_manifest.digest in child_manifests
assert v2_manifest.digest in child_manifests
assert child_manifests[v1_manifest.digest].media_type.name == v1_manifest.media_type
assert child_manifests[v2_manifest.digest].media_type.name == v2_manifest.media_type
def test_get_or_create_manifest_list_duplicate_child_manifest(initialized_db):
repository = create_repository('devtable', 'newrepo', None)
expected_labels = {
'Foo': 'Bar',
'Baz': 'Meh',
}
layer_json = json.dumps({
'id': 'somelegacyid',
'config': {
'Labels': expected_labels,
},
"rootfs": {
"type": "layers",
"diff_ids": []
},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
],
})
# Create a legacy image.
find_create_or_link_image('somelegacyid', repository, 'devtable', {}, 'local_us')
# Add a blob containing the config.
_, config_digest = _populate_blob(layer_json)
# Add a blob of random data.
random_data = 'hello world'
_, random_digest = _populate_blob(random_data)
# Build the manifest.
v2_builder = DockerSchema2ManifestBuilder()
v2_builder.set_config_digest(config_digest, len(layer_json))
v2_builder.add_layer(random_digest, len(random_data))
v2_manifest = v2_builder.build()
# Write the manifest.
v2_created = get_or_create_manifest(repository, v2_manifest, storage)
assert v2_created
assert v2_created.manifest.digest == v2_manifest.digest
# Build the manifest list, with the child manifest repeated.
list_builder = DockerSchema2ManifestListBuilder()
list_builder.add_manifest(v2_manifest, 'amd64', 'linux')
list_builder.add_manifest(v2_manifest, 'amd32', 'linux')
manifest_list = list_builder.build()
# Write the manifest list, which should also write the manifests themselves.
created_tuple = get_or_create_manifest(repository, manifest_list, storage)
assert created_tuple is not None
created_list = created_tuple.manifest
assert created_list
assert created_list.media_type.name == manifest_list.media_type
assert created_list.digest == manifest_list.digest
# Ensure the child manifest links exist.
child_manifests = {cm.child_manifest.digest: cm.child_manifest
for cm in ManifestChild.select().where(ManifestChild.manifest == created_list)}
assert len(child_manifests) == 1
assert v2_manifest.digest in child_manifests
assert child_manifests[v2_manifest.digest].media_type.name == v2_manifest.media_type
# Try to create again and ensure we get back the same manifest list.
created2_tuple = get_or_create_manifest(repository, manifest_list, storage)
assert created2_tuple is not None
assert created2_tuple.manifest == created_list
def test_get_or_create_manifest_with_remote_layers(initialized_db):
repository = create_repository('devtable', 'newrepo', None)
layer_json = json.dumps({
'config': {},
"rootfs": {
"type": "layers",
"diff_ids": []
},
"history": [
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
{
"created": "2018-04-03T18:37:09.284840891Z",
"created_by": "do something",
},
],
})
# Add a blob containing the config.
_, config_digest = _populate_blob(layer_json)
# Add a blob of random data.
random_data = 'hello world'
_, random_digest = _populate_blob(random_data)
remote_digest = sha256_digest('something')
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest(config_digest, len(layer_json))
builder.add_layer(remote_digest, 1234, urls=['http://hello/world'])
builder.add_layer(random_digest, len(random_data))
manifest = builder.build()
assert remote_digest in manifest.blob_digests
assert remote_digest not in manifest.local_blob_digests
assert manifest.has_remote_layer
assert not manifest.has_legacy_image
assert manifest.get_schema1_manifest('foo', 'bar', 'baz', None) is None
# Write the manifest.
created_tuple = get_or_create_manifest(repository, manifest, storage)
assert created_tuple is not None
created_manifest = created_tuple.manifest
assert created_manifest
assert created_manifest.media_type.name == manifest.media_type
assert created_manifest.digest == manifest.digest
# Verify the legacy image.
legacy_image = get_legacy_image_for_manifest(created_manifest)
assert legacy_image is None
# Verify the linked blobs.
blob_digests = {mb.blob.content_checksum for mb
in ManifestBlob.select().where(ManifestBlob.manifest == created_manifest)}
assert random_digest in blob_digests
assert config_digest in blob_digests
assert remote_digest not in blob_digests
def create_manifest_for_testing(repository, differentiation_field='1', include_shared_blob=False):
# Populate a manifest.
layer_json = json.dumps({
'config': {},
"rootfs": {
"type": "layers",
"diff_ids": []
| |
containing the facet images added over vis_graph. We can now
# gather those images into one image
return delayed(gather_invert_results, nout=2, pure=True)(results, template_model_graph, facets=facets, **kwargs)
def create_invert_facet_wstack_graph(vis_graph_list, template_model_graph: delayed, dopsf=False,
normalize=True, facets=1, **kwargs) -> delayed:
""" Sum results from invert, iterating over the vis_graph_list, allows faceting
:param vis_graph_list:
:param template_model_graph: Model used to determine image parameters
:param facets: Number of facets per x, y axis)
:param kwargs: Parameters for functions in graphs
:return: delayed for invert
"""
return create_invert_facet_vis_scatter_graph(vis_graph_list, template_model_graph, dopsf=dopsf,
c_invert_vis_scatter_graph=create_invert_wstack_graph,
normalize=normalize,
facets=facets, **kwargs)
def create_invert_facet_timeslice_graph(vis_graph_list, template_model_graph: delayed, dopsf=False,
normalize=True, facets=1, **kwargs) -> delayed:
""" Sum results from invert, iterating over the vis_graph_list, allows faceting
:param vis_graph_list:
:param template_model_graph: Model used to determine image parameters
:param facets: Number of facets per x, y axis)
:param kwargs: Parameters for functions in graphs
:return: delayed for invert
"""
return create_invert_facet_vis_scatter_graph(vis_graph_list, template_model_graph, dopsf=dopsf,
c_invert_vis_scatter_graph=create_invert_timeslice_graph,
normalize=normalize, facets=facets, **kwargs)
def create_predict_graph(vis_graph_list, model_graph: delayed, predict=predict_2d, **kwargs):
"""Predict from model_graph, iterating over the vis_graph_list
:param vis_graph_list:
:param template_model_graph: Model used to determine image parameters
:param facets: Number of facets per x, y axis)
:param predict: Predict function to be used (predict_2d)
:param kwargs: Parameters for functions in graphs Parameters for functions in graphs
:return: List of vis_graphs
"""
def predict_and_sum(vis, model, **kwargs):
if vis is not None:
predicted = copy_visibility(vis)
predicted = predict(predicted, model, **kwargs)
return predicted
else:
return None
return [delayed(predict_and_sum, pure=True, nout=1)(v, model_graph, **kwargs) for v in vis_graph_list]
def create_predict_facet_graph(vis_graph_list, model_graph: delayed, predict=predict_2d, facets=2, **kwargs):
""" Predict visibility from a model using facets
:param vis_graph_list:
:param template_model_graph: Model used to determine image parameters
:param facets: Number of facets per x, y axis)
:param predict: Predict function to be used (predict_2d)
:param kwargs: Parameters for functions in graphs
:return: List of vis_graphs
"""
def predict_facets_and_accumulate(vis, model, **kwargs):
if vis is not None:
predicted = copy_visibility(vis)
predicted = predict(predicted, model, **kwargs)
vis.data['vis'] += predicted.data['vis']
return vis
else:
return None
# Note that we need to know the number of facets in order to define the size of facet_model_graphs
facet_model_graphs = delayed(image_scatter_facets, nout=facets ** 2, pure=True)(model_graph,
facets=facets)
accumulate_vis_graphs = list()
for vis_graph in vis_graph_list:
for ifacet, facet_model_graph in enumerate(facet_model_graphs):
# There is a dependency issue here so we chain the predicts
accumulate_vis_graph = None
if ifacet == 0:
accumulate_vis_graph = delayed(predict_facets_and_accumulate, pure=True, nout=1)(vis_graph,
facet_model_graph,
**kwargs)
else:
accumulate_vis_graph = delayed(predict_facets_and_accumulate, pure=True, nout=1)(
accumulate_vis_graph, facet_model_graph, **kwargs)
accumulate_vis_graphs.append(accumulate_vis_graph)
return accumulate_vis_graphs
def create_predict_vis_scatter_graph(vis_graph_list, model_graph: delayed, vis_slices,
predict, scatter, gather, **kwargs):
"""Predict, iterating over the scattered vis_graph_list
:param vis_graph_list:
:param template_model_graph: Model used to determine image parameters
:param vis_slices: Number of vis slices (w stack or timeslice)
:param predict: Predict function
:param scatter: Scatter function e.g. visibility_scatter_w
:param gather: Gatherer function e.g. visibility_gather_w
:param kwargs: Parameters for functions in graphs
:return: List of vis_graphs
"""
def predict_and_accumulate(vis, model, **kwargs):
if vis is not None:
predicted = copy_visibility(vis)
predicted = predict(predicted, model, **kwargs)
return predicted
else:
return None
predicted_vis_list = list()
for vis_graph in vis_graph_list:
scatter_vis_graphs = delayed(scatter, nout=vis_slices)(vis_graph, vis_slices=vis_slices, **kwargs)
predict_list = list()
for scatter_vis_graph in scatter_vis_graphs:
predict_list.append(delayed(predict_and_accumulate, pure=True, nout=1)(scatter_vis_graph,
model_graph,
**kwargs))
predicted_vis_list.append(delayed(gather, nout=1)(predict_list, vis_graph, vis_slices=vis_slices,
**kwargs))
return predicted_vis_list
def create_predict_wstack_graph(vis_graph_list, model_graph: delayed, vis_slices, **kwargs):
"""Predict using wstacking, iterating over the vis_graph_list and w
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param vis_slices: Number of vis slices (w stack or timeslice)
:param kwargs: Parameters for functions in graphs
:return: List of vis_graphs
"""
return create_predict_vis_scatter_graph(vis_graph_list, model_graph, vis_slices,
scatter=visibility_scatter_w,
gather=visibility_gather_w,
predict=predict_wstack_single, **kwargs)
def create_predict_timeslice_graph(vis_graph_list, model_graph: delayed, vis_slices,
**kwargs):
"""Predict using timeslicing, iterating over the vis_graph_list and time
wprojection is available with kernel='wprojection', wstep=some_number. This corresponds to the
default SKA approach wsnapshots.
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param vis_slices: Number of vis slices (w stack or timeslice)
:param kwargs: Parameters for functions in graphs
:return: List of vis_graphs
"""
return create_predict_vis_scatter_graph(vis_graph_list, model_graph, vis_slices,
scatter=visibility_scatter_time,
gather=visibility_gather_time,
predict=predict_timeslice_single, **kwargs)
def create_predict_facet_vis_scatter_graph(vis_graph_list, model_graph: delayed, vis_slices, facets,
predict, vis_scatter, vis_gather, **kwargs):
"""Predict, iterating over the scattered vis_graph_list and image
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param vis_slices: Number of vis slices (w stack or timeslice)
:param predict: Predict function
:param vis_scatter: Scatter function e.g. visibility_scatter_w
:param vis_gather: Gatherer function e.g. visibility_gather_w
:param kwargs: Parameters for functions in graphs
:return: List of vis_graphs
"""
def predict_facets_and_accumulate(vis, model, **kwargs):
if vis is not None:
predicted = copy_visibility(vis)
predicted = predict(predicted, model, **kwargs)
return predicted
else:
return None
# Note that we need to know the number of facets in order to define the size of facet_model_graphs
facet_model_graphs = delayed(image_scatter_facets, nout=facets ** 2, pure=True)(model_graph, facets=facets)
predicted_vis_list = list()
for vis_graph in vis_graph_list:
scatter_vis_graphs = delayed(vis_scatter, nout=vis_slices)(vis_graph, vis_slices=vis_slices, **kwargs)
accumulate_vis_graphs = list()
for scatter_vis_graph in scatter_vis_graphs:
for ifacet, facet_model_graph in enumerate(facet_model_graphs):
# if ifacet == 0:
# accumulate_vis_graph = delayed(predict_facets_and_accumulate,
# pure=True, nout=1)(scatter_vis_graph, facet_model_graphs[0],
# **kwargs)
# else:
# accumulate_vis_graph = delayed(predict_facets_and_accumulate,
# pure=True, nout=1)(accumulate_vis_graph, facet_model_graph,
# **kwargs)
accumulate_vis_graph = delayed(predict_facets_and_accumulate,
pure=True, nout=1)(scatter_vis_graph, facet_model_graphs[ifacet],
**kwargs)
accumulate_vis_graphs.append(accumulate_vis_graph)
predicted_vis_list.append(delayed(vis_gather, nout=1)(accumulate_vis_graphs, vis_graph,
vis_slices=vis_slices, **kwargs))
return predicted_vis_list
def create_predict_facet_wstack_graph(vis_graph_list, model_graph: delayed, vis_slices, facets,
**kwargs):
"""Predict using wstacking, iterating over the vis_graph_list and w
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param vis_slices: Number of vis slices (w stack or timeslice)
:param facets: Number of facets (in both x and y axes)
:param kwargs: Parameters for functions in graphs
:return: List of vis_graphs
"""
return create_predict_facet_vis_scatter_graph(vis_graph_list, model_graph, vis_slices=vis_slices,
facets=facets, predict=predict_wstack_single,
vis_scatter=visibility_scatter_w,
vis_gather=visibility_gather_w, **kwargs)
def create_predict_facet_timeslice_graph(vis_graph_list, model_graph: delayed, vis_slices, facets,
**kwargs):
"""Predict using wstacking, iterating over the vis_graph_list and w
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param vis_slices: Number of vis slices in timeslice
:param facets: Number of facets (in both x and y axes)
:param kwargs: Parameters for functions in graphs
:return: List of vis_graphs
"""
return create_predict_facet_vis_scatter_graph(vis_graph_list, model_graph, vis_slices=vis_slices,
facets=facets, predict=predict_timeslice_single,
vis_scatter=visibility_scatter_time,
vis_gather=visibility_gather_time, **kwargs)
def create_residual_graph(vis_graph_list, model_graph: delayed, **kwargs) -> delayed:
""" Create a graph to calculate residual image using facets
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param kwargs: Parameters for functions in graphs
:return:
"""
model_vis_graph_list = create_zero_vis_graph_list(vis_graph_list)
model_vis_graph_list = create_predict_graph(model_vis_graph_list, model_graph, **kwargs)
residual_vis_graph_list = create_subtract_vis_graph_list(vis_graph_list, model_vis_graph_list)
return create_invert_graph(residual_vis_graph_list, model_graph, dopsf=False, normalize=True, **kwargs)
def create_residual_facet_graph(vis_graph_list, model_graph: delayed, **kwargs) -> delayed:
""" Create a graph to calculate residual image using facets
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param facets: Number of facets (in both x and y axes)
:param kwargs: Parameters for functions in graphs
:return:
"""
model_vis_graph_list = create_zero_vis_graph_list(vis_graph_list)
model_vis_graph_list = create_predict_facet_graph(model_vis_graph_list, model_graph, **kwargs)
residual_vis_graph_list = create_subtract_vis_graph_list(vis_graph_list, model_vis_graph_list)
return create_invert_facet_graph(residual_vis_graph_list, model_graph, dopsf=False, normalize=True,
**kwargs)
def create_residual_wstack_graph(vis_graph_list, model_graph: delayed, **kwargs) -> delayed:
""" Create a graph to calculate residual image using w stacking
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param vis_slices: Number of vis slices (w stack or timeslice)
:param kwargs: Parameters for functions in graphs
:return:
"""
model_vis_graph_list = create_zero_vis_graph_list(vis_graph_list)
model_vis_graph_list = create_predict_wstack_graph(model_vis_graph_list, model_graph, **kwargs)
residual_vis_graph_list = create_subtract_vis_graph_list(vis_graph_list, model_vis_graph_list)
return create_invert_wstack_graph(residual_vis_graph_list, model_graph, dopsf=False, normalize=True,
**kwargs)
def create_residual_timeslice_graph(vis_graph_list, model_graph: delayed, **kwargs) -> delayed:
""" Create a graph to calculate residual image using timeslicing
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param kwargs: Parameters for functions in graphs
:return:
"""
model_vis_graph_list = create_zero_vis_graph_list(vis_graph_list)
model_vis_graph_list = create_predict_timeslice_graph(model_vis_graph_list, model_graph, **kwargs)
residual_vis_graph_list = create_subtract_vis_graph_list(vis_graph_list, model_vis_graph_list)
return create_invert_timeslice_graph(residual_vis_graph_list, model_graph, dopsf=False, normalize=True,
**kwargs)
def create_residual_facet_wstack_graph(vis_graph_list, model_graph: delayed, **kwargs) -> delayed:
""" Create a graph to calculate residual image using w stacking and faceting
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param vis_graph_list:
:param model_graph: Model used to determine image parameters
:param vis_slices: Number of vis slices (w stack or timeslice)
:param facets: Number of facets (in both x and y axes)
:param kwargs: Parameters for functions in graphs
:return:
"""
model_vis_graph_list = create_zero_vis_graph_list(vis_graph_list)
model_vis_graph_list = create_predict_facet_wstack_graph(model_vis_graph_list, model_graph, **kwargs)
residual_vis_graph_list = create_subtract_vis_graph_list(vis_graph_list, model_vis_graph_list)
return create_invert_facet_wstack_graph(residual_vis_graph_list, model_graph, dopsf=False, normalize=True,
**kwargs)
def create_deconvolve_graph(dirty_graph: delayed, psf_graph: delayed, | |
None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetGroupActiveInfoRequest(TeaModel):
def __init__(
self,
stat_date: str = None,
ding_group_id: str = None,
page_number: int = None,
page_size: int = None,
):
# 统计日期
self.stat_date = stat_date
# 钉钉群组id
self.ding_group_id = ding_group_id
# 分页起始页
self.page_number = page_number
# 分页大小
self.page_size = page_size
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.stat_date is not None:
result['statDate'] = self.stat_date
if self.ding_group_id is not None:
result['dingGroupId'] = self.ding_group_id
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('statDate') is not None:
self.stat_date = m.get('statDate')
if m.get('dingGroupId') is not None:
self.ding_group_id = m.get('dingGroupId')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
return self
class GetGroupActiveInfoResponseBodyData(TeaModel):
def __init__(
self,
stat_date: str = None,
ding_group_id: str = None,
group_create_time: str = None,
group_create_user_id: str = None,
group_create_user_name: str = None,
group_name: str = None,
group_type: int = None,
group_user_cnt_1d: int = None,
send_message_user_cnt_1d: int = None,
send_message_cnt_1d: int = None,
open_conv_uv_1d: int = None,
):
# 统计时间
self.stat_date = stat_date
# 群组id
self.ding_group_id = ding_group_id
# 群组创建时间
self.group_create_time = group_create_time
# 群组创建用户id
self.group_create_user_id = group_create_user_id
# 群组创建用户姓名
self.group_create_user_name = group_create_user_name
# 群名称
self.group_name = group_name
# 群类型:1-全员群,2-部门群,3-(其他)内部群,4-场景群
self.group_type = group_type
# 最近1天群人数
self.group_user_cnt_1d = group_user_cnt_1d
# 最近1天发消息人数
self.send_message_user_cnt_1d = send_message_user_cnt_1d
# 最近1天发消息次数
self.send_message_cnt_1d = send_message_cnt_1d
# 最近1天打开群人数
self.open_conv_uv_1d = open_conv_uv_1d
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.stat_date is not None:
result['statDate'] = self.stat_date
if self.ding_group_id is not None:
result['dingGroupId'] = self.ding_group_id
if self.group_create_time is not None:
result['groupCreateTime'] = self.group_create_time
if self.group_create_user_id is not None:
result['groupCreateUserId'] = self.group_create_user_id
if self.group_create_user_name is not None:
result['groupCreateUserName'] = self.group_create_user_name
if self.group_name is not None:
result['groupName'] = self.group_name
if self.group_type is not None:
result['groupType'] = self.group_type
if self.group_user_cnt_1d is not None:
result['groupUserCnt1d'] = self.group_user_cnt_1d
if self.send_message_user_cnt_1d is not None:
result['sendMessageUserCnt1d'] = self.send_message_user_cnt_1d
if self.send_message_cnt_1d is not None:
result['sendMessageCnt1d'] = self.send_message_cnt_1d
if self.open_conv_uv_1d is not None:
result['openConvUv1d'] = self.open_conv_uv_1d
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('statDate') is not None:
self.stat_date = m.get('statDate')
if m.get('dingGroupId') is not None:
self.ding_group_id = m.get('dingGroupId')
if m.get('groupCreateTime') is not None:
self.group_create_time = m.get('groupCreateTime')
if m.get('groupCreateUserId') is not None:
self.group_create_user_id = m.get('groupCreateUserId')
if m.get('groupCreateUserName') is not None:
self.group_create_user_name = m.get('groupCreateUserName')
if m.get('groupName') is not None:
self.group_name = m.get('groupName')
if m.get('groupType') is not None:
self.group_type = m.get('groupType')
if m.get('groupUserCnt1d') is not None:
self.group_user_cnt_1d = m.get('groupUserCnt1d')
if m.get('sendMessageUserCnt1d') is not None:
self.send_message_user_cnt_1d = m.get('sendMessageUserCnt1d')
if m.get('sendMessageCnt1d') is not None:
self.send_message_cnt_1d = m.get('sendMessageCnt1d')
if m.get('openConvUv1d') is not None:
self.open_conv_uv_1d = m.get('openConvUv1d')
return self
class GetGroupActiveInfoResponseBody(TeaModel):
def __init__(
self,
data: List[GetGroupActiveInfoResponseBodyData] = None,
total_count: int = None,
):
self.data = data
self.total_count = total_count
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.total_count is not None:
result['totalCount'] = self.total_count
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetGroupActiveInfoResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('totalCount') is not None:
self.total_count = m.get('totalCount')
return self
class GetGroupActiveInfoResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetGroupActiveInfoResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetGroupActiveInfoResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetCommentListHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetCommentListRequest(TeaModel):
def __init__(
self,
page_number: int = None,
page_size: int = None,
):
# 分页起始页
self.page_number = page_number
# 分页大小
self.page_size = page_size
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
return self
class GetCommentListResponseBodyData(TeaModel):
def __init__(
self,
comment_user_name: str = None,
content: str = None,
comment_time: float = None,
comment_id: str = None,
):
# 评论者姓名
self.comment_user_name = comment_user_name
# 评论内容
self.content = content
# 评论时间
self.comment_time = comment_time
# 评论ID
self.comment_id = comment_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.comment_user_name is not None:
result['commentUserName'] = self.comment_user_name
if self.content is not None:
result['content'] = self.content
if self.comment_time is not None:
result['commentTime'] = self.comment_time
if self.comment_id is not None:
result['commentId'] = self.comment_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commentUserName') is not None:
self.comment_user_name = m.get('commentUserName')
if m.get('content') is not None:
self.content = m.get('content')
if m.get('commentTime') is not None:
self.comment_time = m.get('commentTime')
if m.get('commentId') is not None:
self.comment_id = m.get('commentId')
return self
class GetCommentListResponseBody(TeaModel):
def __init__(
self,
data: List[GetCommentListResponseBodyData] = None,
total_count: int = None,
):
self.data = data
self.total_count = total_count
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.total_count is not None:
result['totalCount'] = self.total_count
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetCommentListResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('totalCount') is not None:
self.total_count = m.get('totalCount')
return self
class GetCommentListResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetCommentListResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetCommentListResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetPartnerTypeByParentIdHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') | |
<reponame>jhkennedy/itslive
import argparse
from datetime import datetime
import geojson
import h5py
import json
import numpy as np
import os
import psutil
import pyproj
import s3fs
import sys
import time
from tqdm import tqdm
# Date format as it appears in granules filenames of optical format:
# LC08_L1TP_011002_20150821_20170405_01_T1_X_LC08_L1TP_011002_20150720_20170406_01_T1_G0240V01_P038.nc
DATE_FORMAT = "%Y%m%d"
# Date and time format as it appears in granules filenames or radar format:
# S1A_IW_SLC__1SSH_20170221T204710_20170221T204737_015387_0193F6_AB07_X_S1B_IW_SLC__1SSH_20170227T204628_20170227T204655_004491_007D11_6654_G0240V02_P094.nc
DATE_TIME_FORMAT = "%Y%m%dT%H%M%S"
class memtracker:
def __init__(self, include_time=True):
self.output_time = include_time
if include_time:
self.start_time = time.time()
self.process = psutil.Process()
self.startrss = self.process.memory_info().rss
self.startvms = self.process.memory_info().vms
def meminfo(self, message):
if self.output_time:
time_elapsed_seconds = time.time() - self.start_time
print(f'{message:<30}: time: {time_elapsed_seconds:8.2f} seconds mem_percent {self.process.memory_percent()} ' +
f'delrss={self.process.memory_info().rss - self.startrss:16,} ' +
f'delvms={self.process.memory_info().vms - self.startvms:16,}',
flush=True)
else: # don't output time
print(f'{message:<30}: delrss={self.process.memory_info().rss - self.startrss:16,} mem_percent {self.process.memory_percent()} ' +
f'delvms={self.process.memory_info().vms - self.startvms:16,}',
flush=True)
mt = memtracker()
s3 = s3fs.S3FileSystem(anon=True)
s3_out = s3fs.S3FileSystem()
# returns a string (N78W124) for directory name based on granule centerpoint lat,lon
# !!!! Not implemented in geojson code yet !!! <- remove this line when it is.
def finddirstring(lat,lon):
if lat >= 0.0:
NShemi_str = 'N'
else:
NShemi_str = 'S'
if lon >= 0.0:
EWhemi_str = 'E'
else:
EWhemi_str = 'W'
outlat = int(10*np.trunc(np.abs(lat/10.0)))
if outlat == 90: # if you are exactly at a pole, put in lat = 80 bin
outlat = 80
outlon = int(10*np.trunc(np.abs(lon/10.0)))
if outlon >= 180: # if you are at the dateline, back off to the 170 bin
outlon = 170
dirstring = f'{NShemi_str}{outlat:02d}{EWhemi_str}{outlon:03d}'
return(dirstring)
def image_pair_feature_from_path(infilewithpath, five_points_per_side = False):
# from s3.ls:
# infilewithpath = 'https://s3/its-live-data.jpl.nasa.gov/velocity_image_pair/landsat/v00.0/32609/LC08_L1TP_050024_20180713_20180730_01_T1_X_LE07_L1TP_050024_20180315_20180316_01_RT_G0240V01_P072.nc'
# base URL from S3 directory listing has file path for s3fs access, not what you need for http directly,
# so that is hard coded here. (or not used - don't need it in every feature)
# base_URL = 'http://its-live-data.jpl.nasa.gov.s3.amazonaws.com/velocity_image_pair/landsat/v00.0'
filename_tokens = infilewithpath.split('/')
directory = '/'.join(filename_tokens[1:-1])
filename = filename_tokens[-1]
with s3.open(f"s3://{infilewithpath}", "rb") as ins3:
inh5 = h5py.File(ins3, mode = 'r')
# inh5 = h5py.File(s3.open(f"s3://{infilewithpath}", "rb"), mode = 'r')
# inh5 = h5py.File(ingeoimg.in_dir_path + '/' + ingeoimg.filename,mode='r')
# netCDF4/HDF5 cf 1.6 has x and y vectors of array pixel CENTERS
xvals = np.array(inh5.get('x'))
yvals = np.array(inh5.get('y'))
# Extract projection variable
projection_cf = None
if 'mapping' in inh5:
projection_cf = inh5['mapping']
elif 'UTM_Projection' in inh5:
projection_cf = inh5['UTM_Projection']
elif 'Polar_Stereographic' in inh5:
projection_cf = inh5['Polar_Stereographic']
imginfo_attrs = inh5['img_pair_info'].attrs
# turn hdf5 img_pair_info attrs into a python dict to save below
img_pair_info_dict = {}
for k in imginfo_attrs.keys():
if isinstance(imginfo_attrs[k], str):
img_pair_info_dict[k] = imginfo_attrs[k]
elif imginfo_attrs[k].shape == ():
img_pair_info_dict[k] = imginfo_attrs[k].decode('utf-8') # h5py returns byte values, turn into byte characters
else:
img_pair_info_dict[k] = imginfo_attrs[k][0] # h5py returns lists of numbers - all 1 element lists here, so dereference to number
num_pix_x = len(xvals)
num_pix_y = len(yvals)
minval_x, pix_size_x, rot_x_ignored, maxval_y, rot_y_ignored, pix_size_y = [float(x) for x in projection_cf.attrs['GeoTransform'].split()]
epsgcode = int(projection_cf.attrs['spatial_epsg'][0])
inh5.close()
# NOTE: these are pixel center values, need to modify by half the grid size to get bounding box/geotransform values
projection_cf_minx = xvals[0] - pix_size_x/2.0
projection_cf_maxx = xvals[-1] + pix_size_x/2.0
projection_cf_miny = yvals[-1] + pix_size_y/2.0 # pix_size_y is negative!
projection_cf_maxy = yvals[0] - pix_size_y/2.0 # pix_size_y is negative!
transformer = pyproj.Transformer.from_crs(f"EPSG:{epsgcode}", "EPSG:4326", always_xy=True) # ensure lonlat output order
ll_lonlat = np.round(transformer.transform(projection_cf_minx,projection_cf_miny),decimals = 7).tolist()
lr_lonlat = np.round(transformer.transform(projection_cf_maxx,projection_cf_miny),decimals = 7).tolist()
ur_lonlat = np.round(transformer.transform(projection_cf_maxx,projection_cf_maxy),decimals = 7).tolist()
ul_lonlat = np.round(transformer.transform(projection_cf_minx,projection_cf_maxy),decimals = 7).tolist()
# find center lon lat for inclusion in feature (to determine lon lat grid cell directory)
# projection_cf_centerx = (xvals[0] + xvals[-1])/2.0
# projection_cf_centery = (yvals[0] + yvals[-1])/2.0
center_lonlat = np.round(transformer.transform((xvals[0] + xvals[-1])/2.0,(yvals[0] + yvals[-1])/2.0 ),decimals = 7).tolist()
if five_points_per_side:
fracs = [0.25, 0.5, 0.75]
polylist = [] # ring in counterclockwise order
polylist.append(ll_lonlat)
dx = projection_cf_maxx - projection_cf_minx
dy = projection_cf_miny - projection_cf_miny
for frac in fracs:
polylist.append(np.round(transformer.transform(projection_cf_minx + (frac * dx), projection_cf_miny + (frac * dy)),decimals = 7).tolist())
polylist.append(lr_lonlat)
dx = projection_cf_maxx - projection_cf_maxx
dy = projection_cf_maxy - projection_cf_miny
for frac in fracs:
polylist.append(np.round(transformer.transform(projection_cf_maxx + (frac * dx), projection_cf_miny + (frac * dy)),decimals = 7).tolist())
polylist.append(ur_lonlat)
dx = projection_cf_minx - projection_cf_maxx
dy = projection_cf_maxy - projection_cf_maxy
for frac in fracs:
polylist.append(np.round(transformer.transform(projection_cf_maxx + (frac * dx), projection_cf_maxy + (frac * dy)),decimals = 7).tolist())
polylist.append(ul_lonlat)
dx = projection_cf_minx - projection_cf_minx
dy = projection_cf_miny - projection_cf_maxy
for frac in fracs:
polylist.append(np.round(transformer.transform(projection_cf_minx + (frac * dx), projection_cf_maxy + (frac * dy)),decimals = 7).tolist())
polylist.append(ll_lonlat)
else:
# only the corner points
polylist = [ ll_lonlat, lr_lonlat, ur_lonlat, ul_lonlat, ll_lonlat ]
poly = geojson.Polygon([polylist])
middate = img_pair_info_dict['date_center']
deldays = img_pair_info_dict['date_dt']
percent_valid_pix = img_pair_info_dict['roi_valid_percentage']
feat = geojson.Feature( geometry=poly,
properties={
'filename': filename,
'directory': directory,
'middate':middate,
'deldays':deldays,
'percent_valid_pix': percent_valid_pix,
'center_lonlat':center_lonlat,
'data_epsg':epsgcode,
# date_deldays_strrep is a string version of center date and time interval that will sort by date and then by interval length (shorter intervals first) - relies on "string" comparisons by byte
'date_deldays_strrep': img_pair_info_dict['date_center'] + f"{img_pair_info_dict['date_dt']:07.1f}".replace('.',''),
'img_pair_info_dict': img_pair_info_dict,
}
)
return(feat)
def get_tokens_from_filename(filename):
"""
Extract acquisition/processing dates and path/row for two images from the
optical granule filename, or start/end date/time and product unique ID for
radar granule filename.
"""
# Optical format granules have different file naming convention than radar
# format granules
is_optical = True
url_files = os.path.basename(filename).split('_X_')
# Get tokens for the first image name
url_tokens = url_files[0].split('_')
if len(url_tokens) < 9:
# Optical format granule
# Get acquisition/processing dates and path&row for both images
first_date_1 = datetime.strptime(url_tokens[3], DATE_FORMAT)
second_date_1 = datetime.strptime(url_tokens[4], DATE_FORMAT)
key_1 = url_tokens[2]
url_tokens = url_files[1].split('_')
first_date_2 = datetime.strptime(url_tokens[3], DATE_FORMAT)
second_date_2 = datetime.strptime(url_tokens[4], DATE_FORMAT)
key_2 = url_tokens[2]
else:
# Radar format granule
# Get start/end date/time and product unique ID for both images
is_optical = False
url_tokens = url_files[0].split('_')
# Start date and time
first_date_1 = datetime.strptime(url_tokens[-5], DATE_TIME_FORMAT)
# Stop date and time
second_date_1 = datetime.strptime(url_tokens[-4], DATE_TIME_FORMAT)
# Product unique identifier
key_1 = url_tokens[-1]
# Get tokens for the second image name: there are two extra tokens
# at the end of the filename which are specific to ITS_LIVE filename
url_tokens = url_files[1].split('_')
# Start date and time
first_date_2 = datetime.strptime(url_tokens[-7], DATE_TIME_FORMAT)
# Stop date and time
second_date_2 = datetime.strptime(url_tokens[-6], DATE_TIME_FORMAT)
# Product unique identifier
key_2 = url_tokens[-3]
return is_optical, first_date_1, second_date_1, key_1, first_date_2, second_date_2, key_2
def skip_duplicate_granules(found_urls: list, skipped_granules_filename: str):
"""
Skip duplicate granules (the ones that have earlier processing date(s)).
"""
# Need to remove duplicate granules for the middle date: some granules
# have newer processing date, keep those.
keep_urls = {}
skipped_double_granules = []
for each_url in tqdm(found_urls, ascii=True, desc='Skipping duplicate granules...'):
# Extract acquisition and processing dates for optical granule,
# start/end date/time and product unique ID for radar granule
is_optical, url_acq_1, url_proc_1, key_1, url_acq_2, url_proc_2, key_2 = \
get_tokens_from_filename(each_url)
if is_optical:
# Acquisition time and path/row of images should be identical for
# duplicate granules
granule_id = '_'.join([
url_acq_1.strftime(DATE_FORMAT),
key_1,
url_acq_2.strftime(DATE_FORMAT),
key_2
])
else:
# Start/stop date/time of both images
granule_id = '_'.join([
url_acq_1.strftime(DATE_TIME_FORMAT),
url_proc_1.strftime(DATE_TIME_FORMAT),
url_acq_2.strftime(DATE_TIME_FORMAT),
url_proc_2.strftime(DATE_TIME_FORMAT),
])
# There is a granule for the mid_date already:
# * For radar granule: issue a warning reporting product unique ID for duplicate granules
# * For optical granule: check which processing time is newer,
# keep the one with newer processing date
if granule_id in keep_urls:
if not is_optical:
# Radar format granule, just issue a warning
all_urls = ' '.join(keep_urls[granule_id])
print(f"WARNING: multiple granules are detected for {each_url}: {all_urls}")
keep_urls[granule_id].append(each_url)
continue
# Process optical granule
# Flag if newly found URL should be kept
keep_found_url = False
for found_url in keep_urls[granule_id]:
# Check already found URLs for processing time
_, _, found_proc_1, _, _, found_proc_2, _ = \
get_tokens_from_filename(found_url)
# If both granules have identical processing time,
# keep them both - granules might be in different projections,
# any other than target projection will be handled later
if url_proc_1 == found_proc_1 and \
url_proc_2 == found_proc_2:
keep_urls[granule_id].append(each_url)
keep_found_url = True
break
# There are no "identical" (same acquision and processing times)
# granules to "each_url", check if new granule has newer processing dates
if not keep_found_url:
# Check if any of the found URLs have older processing time
| |
<reponame>spacycn/tutorial-knowledge-base
import logging
from typing import List, Dict, Any, Optional, Text
from grakn.client import GraknClient
logger = logging.getLogger(__name__)
class KnowledgeBase(object):
def get_entities(
self,
entity_type: Text,
attributes: Optional[List[Dict[Text, Text]]] = None,
limit: int = 5,
) -> List[Dict[Text, Any]]:
raise NotImplementedError("Method is not implemented.")
def get_attribute_of(
self, entity_type: Text, key_attribute: Text, entity: Text, attribute: Text
) -> List[Any]:
raise NotImplementedError("Method is not implemented.")
def validate_entity(
self, entity_type, entity, key_attribute, attributes
) -> Optional[Dict[Text, Any]]:
raise NotImplementedError("Method is not implemented.")
def map(self, mapping_type: Text, mapping_key: Text) -> Text:
raise NotImplementedError("Method is not implemented.")
class GraphDatabase(KnowledgeBase):
"""
GraphDatabase uses a grakn graph database to encode your domain knowledege. Make
sure to have the graph database set up and the grakn server running.
"""
def __init__(self, uri: Text = "localhost:48555", keyspace: Text = "banking"):
self.uri = uri
self.keyspace = keyspace
self.me = "<EMAIL>"
def _thing_to_dict(self, thing):
"""
Converts a thing (a grakn object) to a dict for easy retrieval of the thing's
attributes.
"""
entity = {"id": thing.id, "type": thing.type().label()}
for each in thing.attributes():
entity[each.type().label()] = each.value()
return entity
def _execute_entity_query(self, query: Text) -> List[Dict[Text, Any]]:
"""
Executes a query that returns a list of entities with all their attributes.
"""
with GraknClient(uri=self.uri) as client:
with client.session(keyspace=self.keyspace) as session:
with session.transaction().read() as tx:
logger.debug("Executing Graql Query: " + query)
result_iter = tx.query(query)
concepts = result_iter.collect_concepts()
entities = []
for c in concepts:
entities.append(self._thing_to_dict(c))
return entities
def _execute_attribute_query(self, query: Text) -> List[Any]:
"""
Executes a query that returns the value(s) an entity has for a specific
attribute.
"""
with GraknClient(uri=self.uri) as client:
with client.session(keyspace=self.keyspace) as session:
with session.transaction().read() as tx:
print("Executing Graql Query: " + query)
result_iter = tx.query(query)
concepts = result_iter.collect_concepts()
return [c.value() for c in concepts]
def _execute_relation_query(
self, query: Text, relation_name: Text
) -> List[Dict[Text, Any]]:
"""
Execute a query that queries for a relation. All attributes of the relation and
all entities participating in the relation are part of the result.
"""
with GraknClient(uri=self.uri) as client:
with client.session(keyspace=self.keyspace) as session:
with session.transaction().read() as tx:
print("Executing Graql Query: " + query)
result_iter = tx.query(query)
relations = []
for concept in result_iter:
relation_entity = concept.map().get(relation_name)
relation = self._thing_to_dict(relation_entity)
for (
role_entity,
entity_set,
) in relation_entity.role_players_map().items():
role_label = role_entity.label()
thing = entity_set.pop()
relation[role_label] = self._thing_to_dict(thing)
relations.append(relation)
return relations
def _get_me_clause(self, entity_type: Text) -> Text:
"""
Construct the me clause. Needed to only list, for example, accounts that are
related to me.
:param entity_type: entity type
:return: me clause as string
"""
clause = ""
# do not add the me clause to a query asking for banks or people as they are
# independent of the accounts related to me
if entity_type not in ["person", "bank"]:
clause = (
f"$person isa person, has email '{self.me}';"
f"$contract(customer: $person, offer: $account, provider: $bank) isa contract;"
)
return clause
def _get_attribute_clause(
self, attributes: Optional[List[Dict[Text, Text]]] = None
) -> Text:
"""
Construct the attribute clause.
:param attributes: attributes
:return: attribute clause as string
"""
clause = ""
if attributes:
clause = ",".join([f"has {a['key']} '{a['value']}'" for a in attributes])
clause = ", " + clause
return clause
def get_attribute_of(
self, entity_type: Text, key_attribute: Text, entity: Text, attribute: Text
) -> List[Any]:
"""
Get the value of the given attribute for the provided entity.
:param entity_type: entity type
:param key_attribute: key attribute of entity
:param entity: name of the entity
:param attribute: attribute of interest
:return: the value of the attribute
"""
me_clause = self._get_me_clause(entity_type)
return self._execute_attribute_query(
f"""
match
{me_clause}
${entity_type} isa {entity_type},
has {key_attribute} '{entity}',
has {attribute} $a;
get $a;
"""
)
def _get_transaction_entities(
self, attributes: Optional[List[Dict[Text, Text]]] = None
) -> List[Dict[Text, Any]]:
"""
Query the graph database for transactions. Restrict the transactions
by the provided attributes, if any attributes are given.
As transaction is a relation, query also the related account entities.
:param attributes: list of attributes
:return: list of transactions
"""
attribute_clause = self._get_attribute_clause(attributes)
me_clause = self._get_me_clause("transaction")
return self._execute_relation_query(
f"match "
f"{me_clause} "
f"$transaction(account-of-receiver: $x, account-of-creator: $account) "
f"isa transaction{attribute_clause}; "
f"get $transaction;",
"transaction",
)
def _get_card_entities(
self, attributes: Optional[List[Dict[Text, Text]]] = None, limit: int = 5
) -> List[Dict[Text, Any]]:
"""
Query the graph database for cards. Restrict the cards
by the provided attributes, if any attributes are given.
:param attributes: list of attributes
:param limit: maximum number of cards to return
:return: list of cards
"""
attribute_clause = self._get_attribute_clause(attributes)
me_clause = self._get_me_clause("card")
return self._execute_entity_query(
f"match "
f"{me_clause} "
f"$represented-by(bank-account: $account, bank-card: $card) "
f"isa represented-by;"
f"$card isa card{attribute_clause}; "
f"get $card;"
)[:limit]
def _get_account_entities(
self, attributes: Optional[List[Dict[Text, Text]]] = None, limit: int = 5
) -> List[Dict[Text, Any]]:
"""
Query the graph database for accounts. Restrict the accounts
by the provided attributes, if any attributes are given.
Query the related relation contract, to obtain additional information
about the bank and the person who owns the account.
:param attributes: list of attributes
:param limit: maximum number of accounts to return
:return: list of accounts
"""
attribute_clause = self._get_attribute_clause(attributes)
me_clause = self._get_me_clause("account")
entities = self._execute_relation_query(
f"""
match
$account isa account{attribute_clause};
{me_clause}
get $contract;
""",
"contract",
)[:limit]
for entity in entities:
for k, v in entity["offer"].items():
entity[k] = v
entity.pop("offer")
return entities
def get_entities(
self,
entity_type: Text,
attributes: Optional[List[Dict[Text, Text]]] = None,
limit: int = 5,
) -> List[Dict[Text, Any]]:
"""
Query the graph database for entities of the given type. Restrict the entities
by the provided attributes, if any attributes are given.
:param entity_type: the entity type
:param attributes: list of attributes
:param limit: maximum number of entities to return
:return: list of entities
"""
if entity_type == "transaction":
return self._get_transaction_entities(attributes)
if entity_type == "account":
return self._get_account_entities(attributes, limit)
if entity_type == "card":
return self._get_card_entities(attributes, limit)
me_clause = self._get_me_clause(entity_type)
attribute_clause = self._get_attribute_clause(attributes)
return self._execute_entity_query(
f"match "
f"{me_clause} "
f"${entity_type} isa {entity_type}{attribute_clause}; "
f"get ${entity_type};"
)[:limit]
def map(self, mapping_type: Text, mapping_key: Text) -> Text:
"""
Query the given mapping table for the provided key.
:param mapping_type: the name of the mapping table
:param mapping_key: the mapping key
:return: the mapping value
"""
value = self._execute_attribute_query(
f"match "
f"$mapping isa {mapping_type}, "
f"has mapping-key '{mapping_key}', "
f"has mapping-value $v;"
f"get $v;"
)
if value and len(value) == 1:
return value[0]
def validate_entity(
self, entity_type, entity, key_attribute, attributes
) -> Dict[Text, Any]:
"""
Validates if the given entity has all provided attribute values.
:param entity_type: entity type
:param entity: name of the entity
:param key_attribute: key attribute of entity
:param attributes: attributes
:return: the found entity
"""
attribute_clause = self._get_attribute_clause(attributes)
value = self._execute_entity_query(
f"match "
f"${entity_type} isa {entity_type}{attribute_clause}, "
f"has {key_attribute} '{entity}'; "
f"get ${entity_type};"
)
if value and len(value) == 1:
return value[0]
class InMemoryGraph(KnowledgeBase):
"""
If you don't want to use a graph database and you just have a few data points, you
can also store your domain knowledge, for example, in a dictionary.
This class is an example class that uses a python dictionary to encode some domain
knowledge about banks.
"""
def __init__(self):
self.graph = {
"bank": [
{
"name": "N26",
"headquarters": "Berlin",
"country": "Germany",
"free-accounts": "true",
},
{
"name": "bunq",
"headquarters": "Amsterdam",
"country": "Netherlands",
"free-accounts": "false",
},
{
"name": "Deutsche Bank",
"headquarters": "Frankfurt am Main",
"country": "Germany",
"free-accounts": "false",
},
{
"name": "Commerzbank",
"headquarters": "Frankfurt am Main",
"country": "Germany",
"free-accounts": "true",
},
{
"name": "Targobank",
"headquarters": "Düsseldorf",
"country": "Germany",
"free-accounts": "true",
},
{
"name": "DKB",
"headquarters": "Berlin",
"country": "Germany",
"free-accounts": "true",
},
{
"name": "Comdirect",
"headquarters": "Quickborn",
"country": "Germany",
"free-accounts": "true",
},
]
}
self.attribute_mapping = {
"headquarters": "headquarters",
"HQ": "headquarters",
"main office": "headquarters",
"city": "headquarters",
"name": "name",
"country": "country",
"free-accounts": "free-accounts",
"free accounts": "free-accounts",
}
self.entity_type_mapping = {"banks": "bank", "bank": "bank"}
def get_entities(
self,
entity_type: Text,
attributes: Optional[List[Dict[Text, Text]]] = None,
limit: int = 5,
) -> List[Dict[Text, Any]]:
"""
Query the graph database for entities of the given type. Restrict the entities
by the provided | |
and overwritten from the
# bogus value 'J'. Caching stops this from happening.
node = qml.QNode(circuit, operable_mock_device_2_wires, cache=True)
node.evaluate([0.0])
keys = node.grad_method_for_par.keys()
if keys:
k0 = [k for k in keys][0]
node.grad_method_for_par[k0] = "J"
with pytest.raises(ValueError, match="Unknown gradient method"):
node.jacobian(0.5)
def test_indices_not_unique(self, operable_mock_device_2_wires):
"""Tests that QNode.jacobian properly raises an error if the
jacobian is requested for non-unique indices."""
def circuit(x):
qml.Rot(0.3, x, -0.2, wires=[0])
return qml.expval(qml.PauliZ(0))
node = qml.QNode(circuit, operable_mock_device_2_wires)
with pytest.raises(ValueError, match="Parameter indices must be unique."):
node.jacobian(0.5, which=[0, 0])
def test_indices_nonexistant(self, operable_mock_device_2_wires):
"""Tests that QNode.jacobian properly raises an error if the
jacobian is requested for non-existant parameters."""
def circuit(x):
qml.Rot(0.3, x, -0.2, wires=[0])
return qml.expval(qml.PauliZ(0))
node = qml.QNode(circuit, operable_mock_device_2_wires)
with pytest.raises(ValueError, match="Tried to compute the gradient wrt"):
node.jacobian(0.5, which=[0, 6])
with pytest.raises(ValueError, match="Tried to compute the gradient wrt"):
node.jacobian(0.5, which=[1, -1])
def test_unknown_method(self, operable_mock_device_2_wires):
"""Tests that QNode.jacobian properly raises an error if the
gradient method is unknown."""
def circuit(x):
qml.Rot(0.3, x, -0.2, wires=[0])
return qml.expval(qml.PauliZ(0))
node = qml.QNode(circuit, operable_mock_device_2_wires)
with pytest.raises(ValueError, match="Unknown gradient method"):
node.jacobian(0.5, method="unknown")
def test_wrong_order_in_finite_difference(self, operable_mock_device_2_wires):
"""Tests that QNode.jacobian properly raises an error if finite
differences are attempted with wrong order."""
def circuit(x):
qml.Rot(0.3, x, -0.2, wires=[0])
return qml.expval(qml.PauliZ(0))
node = qml.QNode(circuit, operable_mock_device_2_wires)
with pytest.raises(ValueError, match="Order must be 1 or 2"):
node.jacobian(0.5, method="F", order=3)
class TestQNodeParameters:
"""Tests the handling of parameters in the QNode"""
@pytest.mark.parametrize(
"x,y",
zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),
)
def test_fanout(self, qubit_device_1_wire, tol, x, y):
"""Tests that qnodes can compute the correct function when the
same parameter is used in multiple gates."""
def circuit(x, y):
qml.RX(x, wires=[0])
qml.RZ(y, wires=[0])
qml.RX(x, wires=[0])
return qml.expval(qml.PauliZ(0))
def analytic_expval(x, y):
return math.cos(x) ** 2 - math.cos(y) * math.sin(x) ** 2
node = qml.QNode(circuit, qubit_device_1_wire)
assert np.isclose(node(x, y), analytic_expval(x, y), atol=tol, rtol=0)
def test_array_parameters_scalar_return(self, qubit_device_1_wire, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with Autograd.
Test case for a circuit that returns a scalar."""
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0, 1], wires=0)
qml.RY(-0.5 * array[1, 1], wires=0)
return qml.expval(qml.PauliX(0))
node = qml.QNode(circuit, qubit_device_1_wire)
args = (0.46, np.array([[2.0, 3.0, 0.3], [7.0, 4.0, 2.1]]), -0.13)
grad_target = (
np.array(1.0),
np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]),
np.array(-0.4),
)
cost_target = 1.03257
def cost(x, array, y):
c = node(0.111, array, 4.5)
return c + 0.5 * array[0, 0] + x - 0.4 * y
cost_grad = qml.grad(cost, argnum=[0, 1, 2])
computed_grad = cost_grad(*args)
assert np.isclose(cost(*args), cost_target, atol=tol, rtol=0)
assert np.allclose(computed_grad[0], grad_target[0], atol=tol, rtol=0)
assert np.allclose(computed_grad[1], grad_target[1], atol=tol, rtol=0)
assert np.allclose(computed_grad[2], grad_target[2], atol=tol, rtol=0)
def test_qnode_array_parameters_1_vector_return(self, qubit_device_1_wire, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with Autograd.
Test case for a circuit that returns a 1-vector."""
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0, 1], wires=0)
qml.RY(-0.5 * array[1, 1], wires=0)
return (qml.expval(qml.PauliX(0)),)
node = qml.QNode(circuit, qubit_device_1_wire)
args = (0.46, np.array([[2.0, 3.0, 0.3], [7.0, 4.0, 2.1]]), -0.13)
grad_target = (
np.array(1.0),
np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]),
np.array(-0.4),
)
cost_target = 1.03257
def cost(x, array, y):
c = node(0.111, array, 4.5)[0]
return c + 0.5 * array[0, 0] + x - 0.4 * y
cost_grad = qml.grad(cost, argnum=[0, 1, 2])
computed_grad = cost_grad(*args)
assert np.isclose(cost(*args), cost_target, atol=tol, rtol=0)
assert np.allclose(computed_grad[0], grad_target[0], atol=tol, rtol=0)
assert np.allclose(computed_grad[1], grad_target[1], atol=tol, rtol=0)
assert np.allclose(computed_grad[2], grad_target[2], atol=tol, rtol=0)
def test_qnode_array_parameters_2_vector_return(self, qubit_device_2_wires, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with Autograd.
Test case for a circuit that returns a 2-vector."""
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0, 1], wires=0)
qml.RY(-0.5 * array[1, 1], wires=0)
qml.RY(array[1, 0], wires=1)
return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1))
node = qml.QNode(circuit, qubit_device_2_wires)
args = (0.46, np.array([[2.0, 3.0, 0.3], [7.0, 4.0, 2.1]]), -0.13)
grad_target = (
np.array(1.0),
np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]),
np.array(-0.4),
)
cost_target = 1.03257
def cost(x, array, y):
c = node(0.111, array, 4.5)[0]
return c + 0.5 * array[0, 0] + x - 0.4 * y
cost_grad = qml.grad(cost, argnum=[0, 1, 2])
computed_grad = cost_grad(*args)
assert np.isclose(cost(*args), cost_target, atol=tol, rtol=0)
assert np.allclose(computed_grad[0], grad_target[0], atol=tol, rtol=0)
assert np.allclose(computed_grad[1], grad_target[1], atol=tol, rtol=0)
assert np.allclose(computed_grad[2], grad_target[2], atol=tol, rtol=0)
def test_array_parameters_evaluate(self, qubit_device_2_wires, tol):
"""Tests that array parameters gives same result as positional arguments."""
a, b, c = 0.5, 0.54, 0.3
def ansatz(x, y, z):
qml.QubitStateVector(np.array([1, 0, 1, 1]) / np.sqrt(3), wires=[0, 1])
qml.Rot(x, y, z, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(qubit_device_2_wires)
def circuit1(x, y, z):
return ansatz(x, y, z)
@qml.qnode(qubit_device_2_wires)
def circuit2(x, array):
return ansatz(x, array[0], array[1])
@qml.qnode(qubit_device_2_wires)
def circuit3(array):
return ansatz(*array)
positional_res = circuit1(a, b, c)
positional_grad = circuit1.jacobian([a, b, c])
array_res = circuit2(a, np.array([b, c]))
array_grad = circuit2.jacobian([a, np.array([b, c])])
assert np.allclose(positional_res, array_res, atol=tol, rtol=0)
assert np.allclose(positional_grad, array_grad, atol=tol, rtol=0)
list_res = circuit2(a, [b, c])
list_grad = circuit2.jacobian([a, [b, c]])
assert np.allclose(positional_res, list_res, atol=tol, rtol=0)
assert np.allclose(positional_grad, list_grad, atol=tol, rtol=0)
array_res = circuit3(np.array([a, b, c]))
array_grad = circuit3.jacobian([np.array([a, b, c])])
list_res = circuit3([a, b, c])
list_grad = circuit3.jacobian([[a, b, c]])
assert np.allclose(positional_res, array_res, atol=tol, rtol=0)
assert np.allclose(positional_grad, array_grad, atol=tol, rtol=0)
def test_multiple_expectation_different_wires(self, qubit_device_2_wires, tol):
"""Tests that qnodes return multiple expectation values."""
a, b, c = 0.5, 0.54, 0.3
@qml.qnode(qubit_device_2_wires)
def circuit(x, y, z):
qml.RX(x, wires=[0])
qml.RZ(y, wires=[0])
qml.CNOT(wires=[0, 1])
qml.RY(y, wires=[0])
qml.RX(z, wires=[0])
return qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(1))
def analytic_expval(a, b, c):
return [-1 * math.cos(a) * math.cos(b) * math.sin(c), math.cos(a)]
res = circuit(a, b, c)
analytic_res = analytic_expval(a, b, c)
assert np.allclose(res, analytic_res, atol=tol, rtol=0)
class TestQNodeKeywordArguments:
"""Tests that the qnode properly handles keyword arguments."""
def test_multiple_keywordargs_used(self, qubit_device_2_wires, tol):
"""Tests that qnodes use multiple keyword arguments."""
def circuit(w, x=None, y=None):
qml.RX(x, wires=[0])
qml.RX(y, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
node = qml.QNode(circuit, qubit_device_2_wires)
c = node(1.0, x=np.pi, y=np.pi)
assert np.allclose(c, [-1.0, -1.0], atol=tol, rtol=0)
def test_multidimensional_keywordargs_used(self, qubit_device_2_wires, tol):
"""Tests that qnodes use multi-dimensional keyword arguments."""
def circuit(w, x=None):
qml.RX(x[0], wires=[0])
qml.RX(x[1], wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
node = qml.QNode(circuit, qubit_device_2_wires)
c = node(1.0, x=[np.pi, np.pi])
assert np.allclose(c, [-1.0, -1.0], atol=tol, rtol=0)
def test_keywordargs_for_wires(self, qubit_device_2_wires, tol):
"""Tests that wires can be passed as keyword arguments."""
default_q = 0
def circuit(x, q=default_q):
qml.RX(x, wires=[q])
return qml.expval(qml.PauliZ(q))
node = qml.QNode(circuit, qubit_device_2_wires)
c = node(np.pi, q=1)
assert node.queue[0].wires == [1]
assert np.isclose(c, -1.0, atol=tol, rtol=0)
c = node(np.pi)
assert node.queue[0].wires == [default_q]
assert np.isclose(c, -1.0, atol=tol, rtol=0)
def test_keywordargs_used(self, qubit_device_1_wire, tol):
"""Tests that qnodes use keyword arguments."""
def circuit(w, x=None):
qml.RX(x, wires=[0])
return qml.expval(qml.PauliZ(0))
node = qml.QNode(circuit, qubit_device_1_wire)
c = node(1.0, x=np.pi)
assert np.isclose(c, -1.0, atol=tol, rtol=0)
def test_keywordarg_updated_in_multiple_calls(self, qubit_device_2_wires, tol):
"""Tests that qnodes update keyword arguments in consecutive calls."""
def circuit(w, x=None):
qml.RX(w, wires=[0])
qml.RX(x, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
node = qml.QNode(circuit, qubit_device_2_wires)
c1 = node(0.1, x=0.0)
c2 = node(0.1, x=np.pi)
assert c1[1] != c2[1]
def test_keywordarg_passes_through_classicalnode(self, qubit_device_2_wires, tol):
"""Tests that qnodes' keyword arguments pass through classical nodes."""
def circuit(w, x=None):
qml.RX(w, wires=[0])
qml.RX(x, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
node = qml.QNode(circuit, qubit_device_2_wires)
def classical_node(w, x=None):
return node(w, x=x)
c = classical_node(0.0, x=np.pi)
assert np.allclose(c, [1.0, -1.0], atol=tol, rtol=0)
class TestQNodeGradients:
"""Qnode gradient tests."""
@pytest.mark.parametrize("shape", [(8,), (8, 1), (4, 2), (2, 2, 2), (2, 1, 2, 1, 2)])
def test_multidim_array(self, shape, tol):
"""Tests that arguments which are multidimensional arrays are
properly evaluated and differentiated in QNodes."""
base_array = np.linspace(-1.0, 1.0, 8)
multidim_array = np.reshape(base_array, shape)
def circuit(w):
qml.RX(w[np.unravel_index(0, shape)], wires=0) # base_array[0]
qml.RX(w[np.unravel_index(1, shape)], wires=1) # base_array[1]
qml.RX(w[np.unravel_index(2, shape)], wires=2) # ...
qml.RX(w[np.unravel_index(3, shape)], wires=3)
qml.RX(w[np.unravel_index(4, shape)], wires=4)
qml.RX(w[np.unravel_index(5, shape)], wires=5)
qml.RX(w[np.unravel_index(6, shape)], wires=6)
qml.RX(w[np.unravel_index(7, shape)], wires=7)
return tuple(qml.expval(qml.PauliZ(idx)) for idx in range(len(base_array)))
dev = qml.device("default.qubit", wires=8)
circuit = qml.QNode(circuit, dev)
# circuit evaluations
circuit_output = circuit(multidim_array)
expected_output = np.cos(base_array)
assert np.allclose(circuit_output, expected_output, atol=tol, rtol=0)
# circuit jacobians
circuit_jacobian = circuit.jacobian([multidim_array])
expected_jacobian = -np.diag(np.sin(base_array))
assert np.allclose(circuit_jacobian, expected_jacobian, atol=tol, rtol=0)
def test_qnode_cv_gradient_methods(self, operable_mock_CV_device_2_wires):
"""Tests the gradient computation methods on CV circuits."""
# we can only use the 'A' method on parameters which only affect gaussian operations
# that are not succeeded by | |
Asian ideograph
0x215F31: (0x968D, 0), # East Asian ideograph
0x223B28: (0x67A4, 0), # East Asian ideograph
0x213B29: (0x5BD0, 0), # East Asian ideograph
0x213B2A: (0x5BD3, 0), # East Asian ideograph
0x213B2B: (0x5BE1, 0), # East Asian ideograph
0x213B2C: (0x5BE5, 0), # East Asian ideograph
0x215F32: (0x9698, 0), # East Asian ideograph
0x223B2E: (0x678F, 0), # East Asian ideograph
0x233B2F: (0x8ECF, 0), # East Asian ideograph
0x223B30: (0x6772, 0), # East Asian ideograph
0x223B31: (
0x6798,
0,
), # East Asian ideograph (variant of 4C3B31 which maps to 6798)
0x223B32: (0x676A, 0), # East Asian ideograph
0x233B33: (0x8ED5, 0), # East Asian ideograph
0x213B34: (0x5BEE, 0), # East Asian ideograph
0x273B35: (0x5BBD, 0), # East Asian ideograph
0x273B36: (0x5BA1, 0), # East Asian ideograph
0x273B37: (0x5199, 0), # East Asian ideograph
0x273B38: (0x5BA0, 0), # East Asian ideograph
0x223B39: (0x67AC, 0), # East Asian ideograph
0x213B3A: (0x5BF8, 0), # East Asian ideograph
0x223B3B: (0x67A0, 0), # East Asian ideograph
0x213B3C: (0x5C01, 0), # East Asian ideograph
0x213B3D: (0x5C04, 0), # East Asian ideograph
0x213B3E: (0x5C09, 0), # East Asian ideograph
0x233B3F: (0x8EFA, 0), # East Asian ideograph
0x273B40: (0x5C06, 0), # East Asian ideograph
0x213B41: (0x5C0A, 0), # East Asian ideograph
0x233B42: (0x8EF9, 0), # East Asian ideograph
0x273B43: (0x5BF9, 0), # East Asian ideograph
0x223B44: (0x67F9, 0), # East Asian ideograph
0x213B45: (0x5C0F, 0), # East Asian ideograph
0x213B46: (0x5C11, 0), # East Asian ideograph
0x213B47: (0x5C16, 0), # East Asian ideograph
0x223B48: (0x678D, 0), # East Asian ideograph
0x223B49: (0x678C, 0), # East Asian ideograph
0x213B4A: (0x5C2C, 0), # East Asian ideograph
0x233B4B: (0x8EE8, 0), # East Asian ideograph
0x223B4C: (0x67FC, 0), # East Asian ideograph
0x213B4D: (0x5C38, 0), # East Asian ideograph
0x223B4E: (0x6810, 0), # East Asian ideograph
0x233B4F: (0x8EEB, 0), # East Asian ideograph
0x213B50: (0x5C40, 0), # East Asian ideograph
0x223B51: (0x67C8, 0), # East Asian ideograph
0x23455F: (0x935A, 0), # East Asian ideograph
0x213B53: (0x5C3E, 0), # East Asian ideograph
0x223B54: (0x67CC, 0), # East Asian ideograph
0x213B55: (0x5C45, 0), # East Asian ideograph
0x233B56: (0x8F00, 0), # East Asian ideograph
0x213B57: (0x5C4E, 0), # East Asian ideograph
0x223B58: (0x67C5, 0), # East Asian ideograph
0x233B59: (0x8F05, 0), # East Asian ideograph
0x233B5A: (0x8F08, 0), # East Asian ideograph
0x233B5B: (0x8F07, 0), # East Asian ideograph
0x223B5C: (0x67BB, 0), # East Asian ideograph
0x213B5D: (0x5C5B, 0), # East Asian ideograph (not in Unicode)
0x213B5E: (0x5C60, 0), # East Asian ideograph
0x223B5F: (0x67B0, 0), # East Asian ideograph
0x223B60: (0x6803, 0), # East Asian ideograph
0x223B61: (0x67F8, 0), # East Asian ideograph
0x213B62: (0x5C65, 0), # East Asian ideograph
0x273B63: (0x5C5E, 0), # East Asian ideograph
0x233B64: (0x8F2C, 0), # East Asian ideograph
0x213B65: (0x5C71, 0), # East Asian ideograph
0x225A2A: (0x741B, 0), # East Asian ideograph
0x213B67: (0x5C90, 0), # East Asian ideograph
0x213B68: (0x5C8C, 0), # East Asian ideograph
0x213B69: (0x5C91, 0), # East Asian ideograph
0x213B6A: (0x5C94, 0), # East Asian ideograph
0x233B6B: (0x8F1E, 0), # East Asian ideograph
0x213B6C: (0x5CB8, 0), # East Asian ideograph
0x233B6D: (0x8F25, 0), # East Asian ideograph
0x233B6E: (0x8F20, 0), # East Asian ideograph
0x223B6F: (0x67E4, 0), # East Asian ideograph
0x223B70: (0x67D9, 0), # East Asian ideograph
0x223B71: (0x67DB, 0), # East Asian ideograph
0x223B72: (0x67B5, 0), # East Asian ideograph
0x213B73: (0x5D01, 0), # East Asian ideograph
0x273B74: (0x5CE1, 0), # East Asian ideograph
0x223B75: (0x67F7, 0), # East Asian ideograph
0x213B76: (0x5CFB, 0), # East Asian ideograph
0x223B77: (0x67B3, 0), # East Asian ideograph
0x233B78: (0x8F36, 0), # East Asian ideograph
0x233B79: (0x8F2E, 0), # East Asian ideograph
0x233B7A: (0x8F33, 0), # East Asian ideograph
0x215F3F: (0x96C0, 0), # East Asian ideograph
0x223B7C: (0x67EE, 0), # East Asian ideograph
0x223B7D: (0x6AAF, 0), # East Asian ideograph
0x223B7E: (0x67B2, 0), # East Asian ideograph
0x225F40: (0x761B, 0), # East Asian ideograph
0x6F577E: (0xC970, 0), # Korean hangul
0x6F533B: (0xC158, 0), # Korean hangul
0x213D63: (0x5F8A, 0), # East Asian ideograph
0x6F4B7E: (0xB125, 0), # Korean hangul
0x2D5D2F: (0x9196, 0), # East Asian ideograph
0x2D5F43: (0x9CEB, 0), # East Asian ideograph
0x6F2459: (0x3137, 0), # Korean hangul
0x293B42: (0x8F75, 0), # East Asian ideograph
0x235F45: (0x9F22, 0), # East Asian ideograph
0x2D5F46: (0x96BD, 0), # East Asian ideograph
0x6F533C: (0xC167, 0), # Korean hangul
0x213D64: (0x5F87, 0), # East Asian ideograph
0x225F47: (0x7619, 0), # East Asian ideograph
0x234562: (0x935F, 0), # East Asian ideograph
0x235F48: (0x9F2B, 0), # East Asian ideograph
0x2E604A: (0x7690, 0), # East Asian ideograph
0x235F49: (0x9F26, 0), # East Asian ideograph
0x225270: (0x7144, 0), # East Asian ideograph
0x6F5D65: (0xD72D, 0), # Korean hangul
0x224E2D: (0x6FC9, 0), # East Asian ideograph
0x2D4B3F: (0x73CE, 0), # East Asian ideograph
0x275F4B: (0x6742, 0), # East Asian ideograph
0x276234: (0x9E29, 0), # East Asian ideograph
0x6F533D: (0xC168, 0), # Korean hangul
0x225F4C: (0x761D, 0), # East Asian ideograph
0x275F4D: (0x96CF, 0), # East Asian ideograph
0x6F5773: (0xC90F, 0), # Korean hangul
0x6F245B: (0x3141, 0), # Korean hangul
0x275F4E: (0x53CC, 0), # East Asian ideograph
0x216C48: (0x52D6, 0), # East Asian ideograph
0x275F4F: (0x79BB, 0), # East Asian ideograph
0x215F50: (
0x96E3,
0,
), # East Asian ideograph (variant of 4B5F50 which maps to 96E3)
0x6F533E: (0xC170, 0), # Korean hangul
0x213D66: (0x5F92, 0), # East Asian ideograph
0x215F51: (0x96E8, 0), # East Asian ideograph
0x225F3B: (0x7610, 0), # East Asian ideograph
0x284345: (0x680C, 0), # East Asian ideograph
0x6F5848: (0xCA08, 0), # Korean hangul
0x6F245C: (0x3142, 0), # Korean hangul
0x235F53: (0x9F2F, 0), # East Asian ideograph
0x225F54: (0x762D, 0), # East Asian ideograph
0x234571: (0x936B, 0), # East Asian ideograph
0x6F505B: (0xBBC0, 0), # Korean hangul
0x275F55: (0x7535, 0), # East Asian ideograph
0x6F533F: (0xC18C, 0), # Korean hangul
0x213D67: (0x5F91, 0), # East Asian ideograph
0x6F4D64: (0xB4E0, 0), # Korean hangul
0x213924: (0x5922, 0), # East Asian ideograph
0x6F245D: (0x3145, 0), # Korean hangul
0x275128: (0x7ECB, 0), # East Asian ideograph
0x4B5F58: (0xF9B2, 0), # East Asian ideograph
0x227049: (0x7D0F, 0), # East Asian ideograph
0x224E30: (0x6FA0, 0), # East Asian ideograph
0x293338: (0x8BFD, 0), # East Asian ideograph
0x2E715A: (0x7E27, 0), # East Asian ideograph
0x215F5A: (0x9707, 0), # East Asian ideograph
0x6F5340: (0xC18D, 0), # Korean hangul
0x223C21: (0x67B9, 0), # East Asian ideograph
0x213C22: (0x5D11, 0), # East Asian ideograph
0x215B3E: (0x8EFE, 0), # East Asian ideograph
0x223C24: (0x67E3, 0), # East Asian ideograph
0x213C25: (0x5D14, 0), # East Asian ideograph
0x233C26: (0x8F39, 0), # East Asian ideograph
0x233C27: (0x8F34, 0), # East Asian ideograph
0x273C28: (0x5C9A, 0), # East Asian ideograph
0x223C29: (0x67E2, 0), # East Asian ideograph
0x273C2A: (0x5D2D, 0), # East Asian ideograph
0x273C2B: (0x5C96, 0), # East Asian ideograph
0x213C2C: (0x5D9D, 0), # East Asian ideograph
0x273C2D: (0x5C7F, 0), # East Asian ideograph
0x273C2E: (0x5CB3, 0), # East Asian ideograph
0x223C2F: (0x67E7, 0), # East Asian ideograph
0x223C30: (0x6849, 0), # East Asian ideograph
0x223C31: (0x683E, 0), # East Asian ideograph
0x273C32: (0x5DC5, 0), # East Asian ideograph
0x214A32: (0x71EC, 0), # East Asian ideograph
0x213C34: (0x5DDD, 0), # East Asian ideograph
0x215F5E: (0x9711, 0), # East Asian ideograph
0x223C36: (0x6814, 0), # East Asian ideograph
0x223C37: (0x684B, 0), # East Asian ideograph
0x223C38: (0x681E, 0), # East Asian ideograph
0x213C39: (0x5DE7, 0), # East Asian ideograph
0x213C3A: (0x5DE6, 0), # East Asian ideograph
0x223C3B: (0x6833, 0), # East Asian ideograph
0x213C3C: (0x5DEE, 0), # East Asian ideograph
0x233C3D: (0x8F52, 0), # East Asian ideograph
0x213C3E: (0x5DF2, 0), # East Asian ideograph
0x213C3F: (0x5DF3, 0), # East Asian ideograph
0x223C40: (0x6831, 0), # East Asian ideograph
0x215F60: (0x9716, 0), # East Asian ideograph
0x223C42: (0x6835, 0), # East Asian ideograph
0x223C43: (0x683B, 0), # East Asian ideograph
0x223C44: (0x684E, 0), # East Asian ideograph
| |
<reponame>Unipisa/biaffine-parser<gh_stars>1-10
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from parser.modules import MLP, BertEmbedding, Biaffine, BiLSTM, CharLSTM
from parser.modules.dropout import IndependentDropout, SharedDropout
from parser.utils.alg import eisner
from parser.utils.fn import istree
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from typing import Dict, Optional, Tuple, Any, List
import torch.nn.functional as F
class Model(nn.Module):
r"""
The implementation of Biaffine Dependency Parser.
References:
- <NAME> and <NAME>. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_words (int):
The size of the word vocabulary.
n_feats (int):
The size of the feat vocabulary.
n_rels (int):
The number of labels in the treebank.
feat (str):
Specifies which type of additional feature to use: ``'char'`` | ``'bert'`` | ``'tag'``.
``'char'``: Character-level representations extracted by CharLSTM.
``'bert'``: BERT representations, other pretrained langugae models like XLNet are also feasible.
``'tag'``: POS tag embeddings.
Default: ``'char'``.
n_word_embed (int):
The size of word embeddings. Default: 100.
n_feat_embed (int):
The size of feature representations. Default: 100.
n_char_embed (int):
The size of character embeddings serving as inputs of CharLSTM, required if ``feat='char'``. Default: 50.
bert (str):
Specifies which kind of language model to use, e.g., ``'bert-base-cased'`` and ``'xlnet-base-cased'``.
This is required if ``feat='bert'``. The full list can be found in `transformers`_.
Default: ``None``.
n_bert_layers (int):
Specifies how many last layers to use. Required if ``feat='bert'``.
The final outputs would be the weight sum of the hidden states of these layers.
Default: 4.
bert_fine_tune (bool):
Weather to fine tune the BERT model.
Deafult: False.
mix_dropout (float):
The dropout ratio of BERT layers. Required if ``feat='bert'``. Default: .0.
token_dropout (float):
The dropout ratio of tokens. Default: .0.
embed_dropout (float):
The dropout ratio of input embeddings. Default: .33.
n_lstm_hidden (int):
The size of LSTM hidden states. Default: 400.
n_lstm_layers (int):
The number of LSTM layers. Default: 3.
lstm_dropout (float):
The dropout ratio of LSTM. Default: .33.
n_mlp_arc (int):
Arc MLP size. Default: 500.
n_mlp_rel (int):
Label MLP size. Default: 100.
mlp_dropout (float):
The dropout ratio of MLP layers. Default: .33.
use_hidden_states (bool):
Wethre to use hidden states rather than outputs from BERT.
Default: True.
use_attentions (bool):
Wethre to use attention heads from BERT.
Default: False.
attention_head (int):
Which attention head from BERT to use. Default: 0.
attention_layer (int):
Which attention layer from BERT to use; use all if 0. Default: 6.
feat_pad_index (int):
The index of the padding token in the feat vocabulary. Default: 0.
pad_index (int):
The index of the padding token in the word vocabulary. Default: 0.
unk_index (int):
The index of the unknown token in the word vocabulary. Default: 1.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
.. _transformers:
https://github.com/huggingface/transformers
"""
def __init__(self, args, mask_token_id=0):
super().__init__()
self.args = args
if args.n_embed:
# the embedding layer
self.word_embed = nn.Embedding(num_embeddings=args.n_words,
embedding_dim=args.n_embed)
self.unk_index = args.unk_index
else:
self.word_embed = None
if args.feat == 'char':
self.feat_embed = CharLSTM(n_chars=args.n_feats,
n_embed=args.n_char_embed,
n_out=args.n_feat_embed,
pad_index=args.feat_pad_index)
self.pad_index = args.pad_index
elif args.feat == 'bert':
self.feat_embed = BertEmbedding(model=args.bert_model,
n_layers=args.n_bert_layers,
n_out=args.n_feat_embed,
requires_grad=args.bert_fine_tune,
mask_token_id=mask_token_id,
token_dropout=args.token_dropout,
mix_dropout=args.mix_dropout,
use_hidden_states=args.use_hidden_states,
use_attentions=args.use_attentions,
attention_layer=args.attention_layer)
#self.args.n_mlp_arc = self.feat_embed.bert.config.max_position_embeddings
self.args.n_feat_embed = self.feat_embed.n_out # taken from the model
self.args.n_bert_layers = self.feat_embed.n_layers # taken from the model
self.pad_index = self.feat_embed.pad_index # taken from the model
self.args.pad_index = self.pad_index # update
else:
self.feat_embed = nn.Embedding(num_embeddings=args.n_feats,
embedding_dim=args.n_feat_embed)
self.pad_index = args.pad_index
self.embed_dropout = IndependentDropout(p=args.embed_dropout)
if args.n_lstm_layers:
# the lstm layer
self.lstm = BiLSTM(input_size=args.n_embed+args.n_feat_embed,
hidden_size=args.n_lstm_hidden,
num_layers=args.n_lstm_layers,
dropout=args.lstm_dropout)
self.lstm_dropout = SharedDropout(p=args.lstm_dropout)
mlp_input_size = args.n_lstm_hidden*2
else:
self.lstm = None
mlp_input_size = args.n_embed + args.n_feat_embed
# the MLP layers
self.mlp_arc_d = MLP(n_in=mlp_input_size,
n_out=args.n_mlp_arc,
dropout=args.mlp_dropout)
self.mlp_arc_h = MLP(n_in=mlp_input_size,
n_out=args.n_mlp_arc,
dropout=args.mlp_dropout)
self.mlp_rel_d = MLP(n_in=mlp_input_size,
n_out=args.n_mlp_rel,
dropout=args.mlp_dropout)
self.mlp_rel_h = MLP(n_in=mlp_input_size,
n_out=args.n_mlp_rel,
dropout=args.mlp_dropout)
# the Biaffine layers
self.arc_attn = Biaffine(n_in=args.n_mlp_arc,
bias_x=True,
bias_y=False)
self.rel_attn = Biaffine(n_in=args.n_mlp_rel,
n_out=args.n_rels,
bias_x=True,
bias_y=True)
# transformer attention
if args.use_attentions:
self.attn_mix = nn.Parameter(torch.randn(1)) #2)) # 1))
# # distance
# self.args.distance = False # DEBUG
# if self.args.distance:
# self.distance = DeepBiaffine(mlp_input_size, mlp_input_size, self.args.deep_biaff_hidden_dim, 1, dropout=args.mlp_dropout)
self.criterion = nn.CrossEntropyLoss()
def extra_repr(self):
total_params = sum(p.numel() for p in self.parameters())
trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
return f"Total parameters: {total_params}\n" \
f"Trainable parameters: {trainable_params}"
def load_pretrained(self, embed=None):
if embed is not None:
self.pretrained = nn.Embedding.from_pretrained(embed)
nn.init.zeros_(self.word_embed.weight)
return self
def forward(self, words: torch.Tensor,
feats: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Args:
words (~torch.LongTensor): ``[batch_size, seq_len]``.
Word indices.
feats (~torch.LongTensor):
Feat indices.
If feat is ``'char'`` or ``'bert'``, the size of feats should be ``[batch_size, seq_len, fix_len]``.
if ``'tag'``, the size is ``[batch_size, seq_len]``.
Returns:
~torch.Tensor, ~torch.Tensor:
The first tensor of shape ``[batch_size, seq_len, seq_len]`` holds scores of all possible arcs.
The second of shape ``[batch_size, seq_len, seq_len, n_labels]`` holds
scores of all possible labels on each arc.
"""
# words, feats are the first two items in the batch from TextDataLoader.__iter__()
word_feats = feats[:,:,0] # drop subpiece dimension
batch_size, seq_len = word_feats.shape
# get the mask and lengths of given batch
mask = word_feats.ne(self.pad_index)
lens = mask.sum(dim=1)
# feat_embed: [batch_size, seq_len, n_feat_embed]
# attn: [batch_size, seq_len, seq_len]
feat_embed, attn = self.feat_embed(feats)
if self.word_embed:
ext_words = words
# set the indices larger than num_embeddings to unk_index
if hasattr(self, 'pretrained'):
ext_mask = words.ge(self.word_embed.num_embeddings)
ext_words = words.masked_fill(ext_mask, self.unk_index)
# get outputs from embedding layers
word_embed = self.word_embed(ext_words)
if hasattr(self, 'pretrained'):
word_embed += self.pretrained(words)
word_embed, feat_embed = self.embed_dropout(word_embed, feat_embed)
# concatenate the word and feat representations
embed = torch.cat((word_embed, feat_embed), dim=-1)
else:
embed = self.embed_dropout(feat_embed)[0]
if self.lstm:
# print('PAD:', self.pad_index, words, feats, embed, lens) # DEBUG
x = pack_padded_sequence(embed, lens, True, False)
x, _ = self.lstm(x)
x, _ = pad_packed_sequence(x, True, total_length=seq_len)
x = self.lstm_dropout(x)
else:
x = embed
# apply MLPs to the BiLSTM output states
arc_d = self.mlp_arc_d(x)
arc_h = self.mlp_arc_h(x)
rel_d = self.mlp_rel_d(x)
rel_h = self.mlp_rel_h(x)
# [batch_size, seq_len, seq_len]
s_arc = self.arc_attn(arc_d, arc_h)
# [batch_size, seq_len, seq_len, n_rels]
s_rel = self.rel_attn(rel_d, rel_h).permute(0, 2, 3, 1)
# mix bert attentions
if attn is not None:
s_arc += self.attn_mix * attn
# s_rel += self.attn_mix[1] * attn.unsqueeze(-1)
# # head-dependent distance
# if self.args.distance:
# # @see https://arxiv.org/pdf/1901.10457.pdf
# arange = torch.arange(words.size(1), device=words.device)
# head_offset = arange.view(1, 1, -1).expand(words.size(0), -1, -1) \
# - arange.view(1, -1, 1).expand(words.size(0), -1, -1)
# dist_scores = self.distance(x, x).squeeze(3)
# dist_pred = 1 + F.softplus(dist_scores)
# dist_target = torch.abs(head_offset)
# dist_cauchy = -torch.log(1 + (dist_target.float() - dist_pred)**2/2)
# s_arc += dist_cauchy.detach()
# else:
# dist_cauchy = None
# set the scores that exceed the length of each sentence to -inf
s_arc.masked_fill_(~mask.unsqueeze(1), float('-inf'))
# Lower the diagonal, because the head of a word can't be itself.
s_arc += torch.diag(s_arc.new(seq_len).fill_(float('-inf')))
return s_arc, s_rel #, dist_cauchy
def loss(self, s_arc: torch.Tensor, s_rel: torch.Tensor,
arcs: torch.Tensor, rels: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
# dist_cauchy: torch.Tensor = None
r"""
Computes the arc and tag loss for a sequence given gold heads and tags.
Args:
s_arc : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tags_count),
which will be used to generate predictions for the dependency tags
for the given arcs.
s_rel : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tags_count),
which will be used to generate predictions for the dependency tags
for the given arcs.
arcs : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The indices of the heads for each word.
rels : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The dependency labels of the heads for each word.
mask : ``torch.Tensor``, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
# dist_cauchy: ``torch.Tensor``, optional.
# log of distance probability distribution of head offsets.
Returns
-------
loss : ``torch.Tensor``.
The sum of the cross-entropy losses from the arcs and rels predictions.
"""
# heads = arcs
s_arc, arcs = s_arc[mask], arcs[mask]
s_rel, rels = s_rel[mask], rels[mask]
# select the predicted relations towards the correct heads
s_rel = s_rel[torch.arange(len(arcs)), arcs]
arc_loss = self.criterion(s_arc, arcs)
rel_loss = self.criterion(s_rel, rels)
loss = arc_loss + rel_loss
# if dist_cauchy is not None:
# #dist_cauchy = torch.gather(dist_cauchy[:, 1:], 2, heads.unsqueeze(2))
# dist_cauchy = torch.gather(dist_cauchy, 2, heads.unsqueeze(2))
# loss -= dist_cauchy.sum()
return loss
def | |
match type {}...
>>> check_type([], {}) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: [] cannot match type {}...
>>> from collections import defaultdict
>>> check_type({}, dict_({}, defaultdict, lambda: defaultdict(int)))\
# doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: {} cannot match type {}: \
allowed types are: <... 'collections.defaultdict'>
>>> check_type(defaultdict(str), dict_({}, defaultdict,
... lambda: defaultdict(int))) # doctest: +ELLIPSIS
defaultdict(<... 'int'>, {})
>>> from collections import OrderedDict
>>> check_type(OrderedDict((("b",1),("a",2),("def","abc"))),
... dict_({"a": int, "b": int, "def": str}, dict, OrderedDict))
OrderedDict([('b', 1), ('a', 2), ('def', 'abc')])
>>> check_type({"a":1}, {})
{'a': 1}
>>> check_type({"a":1}, {"b": int}) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: {'a': 1} cannot match type \
{'b': <... 'int'>}: key 'b' is required
>>> check_type({"abc": 1, "abd": 2, "abe": "abc"}, {"~a.*": int}) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: At 'abe': 'abc' cannot match type <... 'int'>
>>> check_type({"abc": 1, "abd": 2, "abe": "abc"}, \
{"~a.*": int, "abe": str}) == {'abc': 1, 'abd': 2, 'abe': 'abc'}
True
>>> check_type({"abc": 1, "abd": 2, "abe": "abc"}, \
{"~a.*": int, "?abe": str}) == {'abc': 1, 'abd': 2, 'abe': 'abc'}
True
>>> check_type({"abc": 1, "def": "abc"}, {"abc": int}) == \
{'abc': 1, 'def': 'abc'}
True
>>> check_type({"abc": 1, "abc": 2, "bcd": "abc", "bce": "abd"},
... {"~^a.*": int, "~^b.*": str}) == \\
... {"abc": 1, "abc": 2, "bcd": "abc", "bce": "abd"}
True
>>> my_type = (str, [])
>>> my_type[1].append(my_type)
>>> check_type(1, my_type) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: 1 cannot match type (<... 'str'>, [(...)])...
>>> my_obj = []
>>> my_obj.append(my_obj)
>>> my_obj.append(1)
>>> check_type(my_obj, my_type) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: [[...], 1] cannot match type \
(<... 'str'>, [(...)])...
>>> my_obj = []
>>> my_obj.append(my_obj)
>>> my_obj.append("abc")
>>> check_type(my_obj, my_type)
[[...], 'abc']
>>> my_type = []
>>> my_type2 = {"a": my_type, "b": my_type}
>>> my_type.append(my_type2)
>>> my_obj = {}
>>> my_obj['a'] = my_obj
>>> my_obj['b'] = my_obj
>>> r = check_type(my_obj, my_type)
>>> r[0]['a'][0] is r[0]['b'][0]
True
>>> r[0]['a'][0] is r[0]
True
>>> r = check_type(my_obj, my_type2)
>>> r['a'][0] is r['b'][0]
True
>>> r['a'][0] is r
True
>>> my_obj2 = []
>>> my_obj2.append(my_obj2)
>>> my_obj2.append(1)
>>> my_obj = [my_obj2, my_obj2]
>>> my_type = []
>>> my_type.append((int, my_type))
>>> check_type(my_obj, my_type)
[[[...], 1], [[...], 1]]
>>> r = _
>>> r[0] is r[1]
True
>>> my_type = []
>>> my_type.append(([int], my_type))
>>> check_type(my_obj, my_type)
[[[...], [1]], [[...], [1]]]
>>> r = _
>>> r[0] is r[1]
True
>>> check_type({"abc": {"def": "123"}}, {"abc": {"def": int}}) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: At 'abc.def': '123' cannot match type <... 'int'>
>>> check_type({"abc": [{"def": 123}, {"def": "123"}]}, {"abc": [{"def": int}]}) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: At 'abc.1.def': '123' cannot match type <... 'int'>
>>> check_type({"abc": [{"def": 123}, {"def": "123"}]}, ({"abc": [{"def": int}]}, {"abc": [{"def": str}]}))\
# doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: {'abc': [{'def': 123}, {'def': '123'}]} cannot match type \
({'abc': [{'def': <... 'int'>}]}, {'abc': [{'def': <... 'str'>}]}): Not matched by any of the sub types:
At 'abc.1.def': '123' cannot match type <... 'int'>
At 'abc.0.def': 123 cannot match type <... 'str'>
>>> check_type({"abc": 123, "def": "abc"}, map_(str, str)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: At 'abc': 123 cannot match type <... 'str'>
>>> check_type({"abc": {"abc": 123, 123: "abc"}}, {"abc": map_(int, str)}) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: At 'abc.<Key>': 'abc' cannot match type <... 'int'>
"""
return _check_type_inner(value, type)
try:
_long = long
except Exception:
_long = int
try:
_unicode = unicode
except Exception:
_unicode = str
class ListChecker(CustomizedChecker):
"""
Default `[]` type implementation
Examples::
>>> list_([])
[]
>>> list_({})
Traceback (most recent call last):
...
InvalidTypeException: {} is not a valid type: must be a list
"""
def bind(self, type_, strict = False,
allowed_type = (list, tuple)):
"""
`type_` must be a list type [] / [sub_type]
:param strict: if True, auto-convert from a single value
to a list is disabled
:param allowed_type: a tuple of allowed class of input
"""
if not isinstance(type_, list):
raise InvalidTypeException(type_, "must be a list")
if len(type_) > 1:
raise InvalidTypeException(type_,
"list must contain 0 or 1 valid inner type")
self.type_ = type_
self.strict = strict
self.allowed_type = allowed_type
def __repr__(self):
return repr(self.type_)
def pre_check_type(self, value):
if isinstance(value, self.allowed_type):
return []
elif self.strict:
raise TypeMismatchException(value, self.type_,
"strict mode disables auto-convert-to-list for single value")
else:
return None
def final_check_type(self, value, current_result, recursive_check_type):
if not self.type_:
# matches any list or tuple
if current_result is None:
return [value]
else:
current_result.extend(value)
return current_result
else:
subtype = self.type_[0]
if current_result is not None:
current_result.extend(recursive_check_type(o, subtype, i)
for i,o in enumerate(value))
return current_result
else:
return [recursive_check_type(value, subtype)]
list_ = ListChecker
class DictChecker(CustomizedChecker):
"""
Default `{}` type implementation
Examples::
>>> dict_({})
{}
>>> dict_([])
Traceback (most recent call last):
...
InvalidTypeException: [] is not a valid type: must be a dict
"""
def bind(self, type_, allowed_type = dict, created_type = dict):
"""
:param type_: a dict describing the input format
:param allowed_type: limit input type to a sub type,
or a tuple of sub types
:param created_type: create a subtype of dict instead
(e.g. OrderedDict)
"""
if not isinstance(type_, dict):
raise InvalidTypeException(type_, "must be a dict")
self.type_ = type_
self.allowed_type = allowed_type
self.created_type = created_type
self.required_keys = dict((k[1:]
if isinstance(k, str)
and k.startswith('!')
else k, v)
for k,v in self.type_.items()
if not isinstance(k, str)
or (not k.startswith('?')
and not k.startswith('~')))
self.optional_keys = dict((k[1:], v) for k, v in self.type_.items()
if k.startswith('?'))
self.optional_keys.update(self.required_keys)
self.regexp_keys = [(k[1:], v) for k, v in self.type_.items()
if k.startswith('~')]
def pre_check_type(self, value):
if not isinstance(value, self.allowed_type):
raise TypeMismatchException(value, self.type_,
"allowed types are: " + repr(self.allowed_type))
return self.created_type()
def final_check_type(self, value, current_result, recursive_check_type):
if not self.type_:
current_result.update(value)
else:
# check required keys
for k in self.required_keys:
if k not in value:
raise TypeMismatchException(value, self.type_, 'key '
+ repr(k) + ' is required')
optional_keys = self.optional_keys
for k, v in value.items():
if k in optional_keys:
current_result[k] = recursive_check_type(v,
optional_keys[k],
k)
else:
for rk, rv in self.regexp_keys:
if re.search(rk, k):
current_result[k] = recursive_check_type(
v,
rv,
k)
break
else:
current_result[k] = v
return current_result
def __repr__(self):
return repr(self.type_)
dict_ = DictChecker
class TupleChecker(CustomizedChecker):
"""
Check a tuple type: a fix-sized tuple/list, each element may have
a different type
Examples::
>>> tuple_((str, int)) # doctest: +ELLIPSIS
tuple_((<... 'str'>, <... 'int'>))
>>> tuple_({})
Traceback (most recent call last):
...
InvalidTypeException: tuple_({}) is not a valid type: \
must use a tuple/list of types
>>> check_type((1,2), tuple_((1,2), allowed_type=int)) \
# doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: (1, 2) cannot match type tuple_((1, 2)): \
allowed types are: <... 'int'>
>>> check_type(("abc", 123), tuple_(()))
Traceback (most recent call last):
...
TypeMismatchException: ('abc', 123) cannot match type tuple_(()): \
length mismatch
>>> check_type(("abc", 123), tuple_((str, int)))
('abc', 123)
>>> check_type(["abc", 123], tuple_((str, int)))
('abc', 123)
>>> t = []
>>> tuple_type = tuple_()
>>> t.append(tuple_type)
>>> tuple_type.bind(t)
>>> l = []
>>> l.append(l)
>>> check_type(l, tuple_type) \\
... # By default, a direct recursive is not allowed # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: At '0': [[...]] cannot match type \
tuple_([...])
>>> t = []
>>> tuple_type = tuple_()
>>> t.append([tuple_type])
>>> tuple_type.bind(t)
>>> check_type(l, tuple_type) # An indirect recursive is allowed
([([...],)],)
>>> t = []
>>> tuple_type = tuple_()
>>> t.append(tuple_type)
>>> t.append(int)
>>> tuple_type.bind(t, allow_recursive = True)
>>> l = []
>>> l.append(l)
>>> l.append(123)
>>> check_type(l, tuple_type) \\
... # allow_recursive allows a direct recursive \
and return list instead of tuple
[[...], 123]
"""
def bind(self, tuple_of_types, allowed_type = (list, tuple),
allow_recursive = False):
"""
:param tuple_of_types: a tuple or list, each of its element is
a valid type
:param allowed_type: allowed | |
{'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: str,
**kwargs
):
super(ContainerGroupNetworkProfile, self).__init__(**kwargs)
self.id = id
class ContainerGroupPropertiesInstanceView(msrest.serialization.Model):
"""The instance view of the container group. Only valid in response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar events: The events of this container group.
:vartype events: list[~azure.mgmt.containerinstance.models.Event]
:ivar state: The state of the container group. Only valid in response.
:vartype state: str
"""
_validation = {
'events': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'events': {'key': 'events', 'type': '[Event]'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerGroupPropertiesInstanceView, self).__init__(**kwargs)
self.events = None
self.state = None
class ContainerHttpGet(msrest.serialization.Model):
"""The container Http Get settings, for liveness or readiness probe.
All required parameters must be populated in order to send to Azure.
:param path: The path to probe.
:type path: str
:param port: Required. The port number to probe.
:type port: int
:param scheme: The scheme. Possible values include: "http", "https".
:type scheme: str or ~azure.mgmt.containerinstance.models.Scheme
"""
_validation = {
'port': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'scheme': {'key': 'scheme', 'type': 'str'},
}
def __init__(
self,
*,
port: int,
path: Optional[str] = None,
scheme: Optional[Union[str, "Scheme"]] = None,
**kwargs
):
super(ContainerHttpGet, self).__init__(**kwargs)
self.path = path
self.port = port
self.scheme = scheme
class ContainerPort(msrest.serialization.Model):
"""The port exposed on the container instance.
All required parameters must be populated in order to send to Azure.
:param protocol: The protocol associated with the port. Possible values include: "TCP", "UDP".
:type protocol: str or ~azure.mgmt.containerinstance.models.ContainerNetworkProtocol
:param port: Required. The port number exposed within the container group.
:type port: int
"""
_validation = {
'port': {'required': True},
}
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
*,
port: int,
protocol: Optional[Union[str, "ContainerNetworkProtocol"]] = None,
**kwargs
):
super(ContainerPort, self).__init__(**kwargs)
self.protocol = protocol
self.port = port
class ContainerProbe(msrest.serialization.Model):
"""The container probe, for liveness or readiness.
:param exec_property: The execution command to probe.
:type exec_property: ~azure.mgmt.containerinstance.models.ContainerExec
:param http_get: The Http Get settings to probe.
:type http_get: ~azure.mgmt.containerinstance.models.ContainerHttpGet
:param initial_delay_seconds: The initial delay seconds.
:type initial_delay_seconds: int
:param period_seconds: The period seconds.
:type period_seconds: int
:param failure_threshold: The failure threshold.
:type failure_threshold: int
:param success_threshold: The success threshold.
:type success_threshold: int
:param timeout_seconds: The timeout seconds.
:type timeout_seconds: int
"""
_attribute_map = {
'exec_property': {'key': 'exec', 'type': 'ContainerExec'},
'http_get': {'key': 'httpGet', 'type': 'ContainerHttpGet'},
'initial_delay_seconds': {'key': 'initialDelaySeconds', 'type': 'int'},
'period_seconds': {'key': 'periodSeconds', 'type': 'int'},
'failure_threshold': {'key': 'failureThreshold', 'type': 'int'},
'success_threshold': {'key': 'successThreshold', 'type': 'int'},
'timeout_seconds': {'key': 'timeoutSeconds', 'type': 'int'},
}
def __init__(
self,
*,
exec_property: Optional["ContainerExec"] = None,
http_get: Optional["ContainerHttpGet"] = None,
initial_delay_seconds: Optional[int] = None,
period_seconds: Optional[int] = None,
failure_threshold: Optional[int] = None,
success_threshold: Optional[int] = None,
timeout_seconds: Optional[int] = None,
**kwargs
):
super(ContainerProbe, self).__init__(**kwargs)
self.exec_property = exec_property
self.http_get = http_get
self.initial_delay_seconds = initial_delay_seconds
self.period_seconds = period_seconds
self.failure_threshold = failure_threshold
self.success_threshold = success_threshold
self.timeout_seconds = timeout_seconds
class ContainerPropertiesInstanceView(msrest.serialization.Model):
"""The instance view of the container instance. Only valid in response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar restart_count: The number of times that the container instance has been restarted.
:vartype restart_count: int
:ivar current_state: Current container instance state.
:vartype current_state: ~azure.mgmt.containerinstance.models.ContainerState
:ivar previous_state: Previous container instance state.
:vartype previous_state: ~azure.mgmt.containerinstance.models.ContainerState
:ivar events: The events of the container instance.
:vartype events: list[~azure.mgmt.containerinstance.models.Event]
"""
_validation = {
'restart_count': {'readonly': True},
'current_state': {'readonly': True},
'previous_state': {'readonly': True},
'events': {'readonly': True},
}
_attribute_map = {
'restart_count': {'key': 'restartCount', 'type': 'int'},
'current_state': {'key': 'currentState', 'type': 'ContainerState'},
'previous_state': {'key': 'previousState', 'type': 'ContainerState'},
'events': {'key': 'events', 'type': '[Event]'},
}
def __init__(
self,
**kwargs
):
super(ContainerPropertiesInstanceView, self).__init__(**kwargs)
self.restart_count = None
self.current_state = None
self.previous_state = None
self.events = None
class ContainerState(msrest.serialization.Model):
"""The container instance state.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar state: The state of the container instance.
:vartype state: str
:ivar start_time: The date-time when the container instance state started.
:vartype start_time: ~datetime.datetime
:ivar exit_code: The container instance exit codes correspond to those from the ``docker run``
command.
:vartype exit_code: int
:ivar finish_time: The date-time when the container instance state finished.
:vartype finish_time: ~datetime.datetime
:ivar detail_status: The human-readable status of the container instance state.
:vartype detail_status: str
"""
_validation = {
'state': {'readonly': True},
'start_time': {'readonly': True},
'exit_code': {'readonly': True},
'finish_time': {'readonly': True},
'detail_status': {'readonly': True},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'exit_code': {'key': 'exitCode', 'type': 'int'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'detail_status': {'key': 'detailStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerState, self).__init__(**kwargs)
self.state = None
self.start_time = None
self.exit_code = None
self.finish_time = None
self.detail_status = None
class DnsConfiguration(msrest.serialization.Model):
"""DNS configuration for the container group.
All required parameters must be populated in order to send to Azure.
:param name_servers: Required. The DNS servers for the container group.
:type name_servers: list[str]
:param search_domains: The DNS search domains for hostname lookup in the container group.
:type search_domains: str
:param options: The DNS options for the container group.
:type options: str
"""
_validation = {
'name_servers': {'required': True},
}
_attribute_map = {
'name_servers': {'key': 'nameServers', 'type': '[str]'},
'search_domains': {'key': 'searchDomains', 'type': 'str'},
'options': {'key': 'options', 'type': 'str'},
}
def __init__(
self,
*,
name_servers: List[str],
search_domains: Optional[str] = None,
options: Optional[str] = None,
**kwargs
):
super(DnsConfiguration, self).__init__(**kwargs)
self.name_servers = name_servers
self.search_domains = search_domains
self.options = options
class EncryptionProperties(msrest.serialization.Model):
"""The container group encryption properties.
All required parameters must be populated in order to send to Azure.
:param vault_base_url: Required. The keyvault base url.
:type vault_base_url: str
:param key_name: Required. The encryption key name.
:type key_name: str
:param key_version: Required. The encryption key version.
:type key_version: str
"""
_validation = {
'vault_base_url': {'required': True},
'key_name': {'required': True},
'key_version': {'required': True},
}
_attribute_map = {
'vault_base_url': {'key': 'vaultBaseUrl', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
'key_version': {'key': 'keyVersion', 'type': 'str'},
}
def __init__(
self,
*,
vault_base_url: str,
key_name: str,
key_version: str,
**kwargs
):
super(EncryptionProperties, self).__init__(**kwargs)
self.vault_base_url = vault_base_url
self.key_name = key_name
self.key_version = key_version
class EnvironmentVariable(msrest.serialization.Model):
"""The environment variable to set within the container instance.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the environment variable.
:type name: str
:param value: The value of the environment variable.
:type value: str
:param secure_value: The value of the secure environment variable.
:type secure_value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'secure_value': {'key': 'secureValue', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
value: Optional[str] = None,
secure_value: Optional[str] = None,
**kwargs
):
super(EnvironmentVariable, self).__init__(**kwargs)
self.name = name
self.value = value
self.secure_value = secure_value
class Event(msrest.serialization.Model):
"""A container group or container instance event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar count: The count of the event.
:vartype count: int
:ivar first_timestamp: The date-time of the earliest logged event.
:vartype first_timestamp: ~datetime.datetime
:ivar last_timestamp: The date-time of the latest logged event.
:vartype last_timestamp: ~datetime.datetime
:ivar name: The event name.
:vartype name: str
:ivar message: The event message.
:vartype message: str
:ivar type: The event type.
:vartype type: str
"""
_validation = {
'count': {'readonly': True},
'first_timestamp': {'readonly': True},
'last_timestamp': {'readonly': True},
'name': {'readonly': True},
'message': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'first_timestamp': {'key': 'firstTimestamp', 'type': 'iso-8601'},
'last_timestamp': {'key': 'lastTimestamp', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Event, self).__init__(**kwargs)
self.count = None
self.first_timestamp = None
self.last_timestamp = None
self.name = None
self.message = None
self.type = None
class GitRepoVolume(msrest.serialization.Model):
"""Represents a volume that is populated with the contents of a git repository.
| |
<reponame>s4sarath/tf-transformers<filename>src/tf_transformers/layers/attention/block_attention.py<gh_stars>1-10
# from __future__ import google_type_annotations
from __future__ import absolute_import, division, print_function
import math
import numpy as np
import tensorflow as tf
from tf_transformers.core import LegacyLayer
from tf_transformers.layers import dense_einsum
from tf_transformers.layers.mask import masked_softmax
from tf_transformers.utils import tf_utils
from tf_transformers.layers.mask import SelfAttentionMask
# Lets HARDCODE few things
max_allowed_sequence_length = 512
to_block_size = 64
from_seq_length = 4096
from_block_size = 64
to_seq_length = 4096
to_block_size = 64
def get_qk_index_pos(n_rows, n_columns):
"""We generate random psitions to attend per block
Args:
n_rows ([type]): [description]
n_columns ([type]): [description]
Returns:
[type]: [description]
"""
qk_index_pos = []
ix_size = max_allowed_sequence_length // to_block_size
for i in range(n_rows):
a = np.zeros(n_columns)
ix_size = max_allowed_sequence_length // to_block_size
ix = np.random.choice(len(a), size=ix_size, replace=False)
a[ix] = 1.0
qk_index_pos.append(a)
return qk_index_pos
class BlockMultiHeadAttention(LegacyLayer):
"""BlockMultiHeadAttention layer.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-width vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
"""
def __init__(
self,
num_heads,
head_size,
dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name="attention",
**kwargs,
):
"""
Args:
num_heads: Number of attention heads.
head_size: Size of each attention head.
dropout: Dropout probability.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
kwargs["name"] = name
super(BlockMultiHeadAttention, self).__init__(**kwargs)
self._num_heads = num_heads
self._head_size = head_size
self._dropout_rate = dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._query_dense = dense_einsum.DenseEinsum(
output_shape=(self._num_heads, self._head_size),
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="query",
)
self._key_dense = dense_einsum.DenseEinsum(
output_shape=(self._num_heads, self._head_size),
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="key",
)
self._value_dense = dense_einsum.DenseEinsum(
output_shape=(self._num_heads, self._head_size),
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="value",
)
self._masked_softmax = masked_softmax.MaskedSoftmax(mask_expansion_axes=[1])
self._dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
def get_config(self):
config = {
"num_heads": self._num_heads,
"head_size": self._head_size,
"dropout_rate": self._dropout_rate,
"kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer": tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint": tf.keras.constraints.serialize(self._bias_constraint),
}
base_config = super(MultiHeadAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def block_wise_attention_scores(self, query_tensor, key_tensor, attention_mask):
""""""
batch_size = tf.shape(query_tensor)[0]
query_blocks = tf.split(query_tensor, axis=2, num_or_size_splits=from_seq_length // from_block_size)
key_blocks = tf.split(key_tensor, axis=2, num_or_size_splits=to_seq_length // to_block_size)
qk_index_pos = get_qk_index_pos(len(query_blocks), len(key_blocks))
all_blocks = []
attention_mask_q_split = tf.split(attention_mask, axis=1, num_or_size_splits=len(query_blocks))
for q_index, q_block in enumerate(query_blocks):
block_output = []
non_zero_block_output = []
attention_mask_block = []
zero_block_output = []
attention_mask_k_split = tf.split(
attention_mask_q_split[q_index], axis=2, num_or_size_splits=len(key_blocks)
)
zero_tensor = tf.zeros(
(batch_size, self._num_heads, from_seq_length // from_block_size, to_seq_length // to_block_size)
)
for k_index, k_block in enumerate(key_blocks):
if qk_index_pos[q_index][k_index] == 1:
qk_block = tf.matmul(q_block, k_block, transpose_b=True)
non_zero_block_output.append(qk_block)
attention_mask_block.append(attention_mask_k_split[k_index])
block_output.append(zero_tensor)
non_zero_block_output_softmax_masked = self._masked_softmax(
[tf.concat(non_zero_block_output, axis=-1), tf.concat(attention_mask_block, axis=2)]
)
non_zero_block_output_softmax_blocks = tf.split(
non_zero_block_output_softmax_masked,
axis=-1,
num_or_size_splits=max_allowed_sequence_length // to_block_size,
)
non_zero_counter = 0
for _index, _value in enumerate(qk_index_pos[q_index]):
if _value == 1:
block_output[_index] = non_zero_block_output_softmax_blocks[non_zero_counter]
non_zero_counter += 1
all_blocks.append(tf.concat(block_output, axis=-1))
attention_probs = tf.concat(all_blocks, axis=2)
return qk_index_pos, attention_probs
def block_wise_full_calculations(self, query_tensor, key_tensor, value_tensor, input_mask):
"""Entire end to end attention and context cacluation happens here"""
batch_size = tf.shape(query_tensor)[0]
query_blocks = tf.split(query_tensor, axis=2, num_or_size_splits=from_seq_length // from_block_size)
key_blocks = tf.split(key_tensor, axis=2, num_or_size_splits=to_seq_length // to_block_size)
value_blocks = tf.split(value_tensor, axis=2, num_or_size_splits=to_seq_length // to_block_size)
qk_index_pos = get_qk_index_pos(len(query_blocks), len(key_blocks))
all_blocks = []
input_mask_split = tf.split(input_mask, axis=1, num_or_size_splits=len(query_blocks))
for q_index, q_block in enumerate(query_blocks):
block_output = []
non_zero_block_output = []
attention_mask_block = []
zero_block_output = []
input_mask_block = []
value_blocks_local = []
zero_tensor = tf.zeros(
(batch_size, self._num_heads, from_seq_length // from_block_size, to_seq_length // to_block_size)
)
for k_index, k_block in enumerate(key_blocks):
if qk_index_pos[q_index][k_index] == 1:
qk_block = tf.matmul(q_block, k_block, transpose_b=True)
non_zero_block_output.append(qk_block)
input_mask_block.append(input_mask_split[k_index])
value_blocks_local.append(value_blocks[k_index])
input_mask_block = tf.concat(input_mask_block, axis=1)
local_attention_mask = SelfAttentionMask()(
[tf.random.uniform(shape=(batch_size, to_block_size, 1)), input_mask_block]
)
attention_probs_local = self._masked_softmax(
[tf.concat(non_zero_block_output, axis=-1), local_attention_mask]
)
value_blocks_local = tf.concat(value_blocks_local, axis=2)
context_layer_local = tf.matmul(attention_probs_local, value_blocks_local)
all_blocks.append(context_layer_local)
return tf.concat(all_blocks, axis=2)
def block_wise_context_caculation(self, qk_index_pos, attention_probs, value_tensor):
""""""
batch_size = tf.shape(value_tensor)[0]
attention_probs_q_split = tf.split(attention_probs, axis=2, num_or_size_splits=from_seq_length // to_block_size)
value_blocks = tf.split(value_tensor, axis=2, num_or_size_splits=to_seq_length // to_block_size)
all_blocks2 = []
for a_index, a_block in enumerate(attention_probs_q_split):
block_output = 0
attention_probs_k_split = tf.split(
attention_probs_q_split[a_index], axis=3, num_or_size_splits=len(attention_probs_q_split)
)
zero_tensor = tf.zeros((batch_size, self._num_heads, from_seq_length // from_block_size, self._head_size))
for v_index, v_block in enumerate(value_blocks):
if qk_index_pos[a_index][v_index] == 1:
av_block = tf.matmul(attention_probs_k_split[v_index], v_block)
block_output += av_block
else:
block_output += zero_tensor
all_blocks2.append(block_output)
context_layer = tf.concat(all_blocks2, axis=2)
return context_layer
@staticmethod
def merge_attention_heads(x):
batch, n_heads, sequence, feature_length = tf_utils.get_shape_list(x)
return tf.reshape(tf.transpose(x, [0, 2, 1, 3]), [batch, sequence, n_heads * feature_length])
def _update_cache(self, key_tensor, value_tensor, cache_key, cache_value, decode_loop_step):
"""Updates cache states and gets full-length key/value tensors."""
# Combines cached keys and values with new keys and values.
if decode_loop_step is not None:
# TPU special case.
key_seq_dim = cache_key.shape.as_list()[2]
indices = tf.reshape(
tf.one_hot(decode_loop_step, key_seq_dim, dtype=key_tensor.dtype),
[1, 1, key_seq_dim, 1],
)
key_tensor = cache_key + key_tensor * indices
value_seq_dim = cache_value.shape.as_list()[2]
indices = tf.reshape(
tf.one_hot(decode_loop_step, value_seq_dim, dtype=value_tensor.dtype),
[1, 1, value_seq_dim, 1],
)
value_tensor = cache_value + value_tensor * indices
else:
key_tensor = tf.concat([tf.cast(cache_key, key_tensor.dtype), key_tensor], axis=2)
value_tensor = tf.concat([tf.cast(cache_value, value_tensor.dtype), value_tensor], axis=2)
# Update cache
cache_key = key_tensor
cache_value = value_tensor
return key_tensor, value_tensor
def call_predict(self, inputs, cache_key=None, cache_value=None):
from_tensor = inputs[0]
to_tensor = inputs[1]
attention_mask = inputs[2] if len(inputs) == 3 else None
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_tensor` = [B, F, N ,H]
def left():
query_tensor = self._query_dense(from_tensor)
# `key_tensor` = [B, T, N, H]
key_tensor = self._key_dense(to_tensor)
# `value_tensor` = [B, T, N, H]
value_tensor = self._value_dense(to_tensor)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `query_tensor` = [B, N, F, H]
# 'key_tensor' = [B, N, T, H]
# `value_tensor` = [B, N, T, H]
# Transpose to [B, N, T, H]
query_tensor = tf.transpose(query_tensor, [0, 2, 1, 3])
key_tensor = tf.transpose(key_tensor, [0, 2, 1, 3])
value_tensor = tf.transpose(value_tensor, [0, 2, 1, 3])
return query_tensor, key_tensor, value_tensor
def right():
query_tensor = self._query_dense(from_tensor)
# `key_tensor` = [B, T, N, H]
key_tensor = self._key_dense(to_tensor)
# `value_tensor` = [B, T, N, H]
value_tensor = self._value_dense(to_tensor)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `query_tensor` = [B, N, F, H]
# 'key_tensor' = [B, N, T, H]
# `value_tensor` = [B, N, T, H]
# Transpose to [B, N, T, H]
query_tensor = tf.transpose(query_tensor, [0, 2, 1, 3])
key_tensor = tf.transpose(key_tensor, [0, 2, 1, 3])
value_tensor = tf.transpose(value_tensor, [0, 2, 1, 3])
key_tensor, value_tensor = self._update_cache(
key_tensor, value_tensor, cache_key, cache_value, decode_loop_step=None
)
return query_tensor, key_tensor, value_tensor
query_tensor, key_tensor, value_tensor = tf.cond(
tf.equal(tf.reduce_sum(cache_key), 0.0), lambda: left(), lambda: right()
)
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# E = `embedding_dimension`
attention_scores = tf.einsum("BNFH,BNTH->BNFT", query_tensor, key_tensor)
attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(self._head_size)))
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_scores_mask = tf.cast(tf.equal(attention_scores, 0.0), tf.float32) * -10000
attention_scores += attention_scores_mask
attention_probs = self._masked_softmax([attention_scores, attention_mask])
# Why multiply with this mask? When we have past key , in the case of variable batch
# we need not to consider padding values for softmax. So this is the hack
attention_probs = attention_probs * tf.expand_dims(attention_mask, 1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
| |
no ``Book`` found by the given ``Id``
raise: NullArgument - ``book_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
if not self.supports_comment_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.CommentAdminSession(book_id, runtime=self._runtime)
@utilities.remove_null_proxy_kwarg
def get_comment_book_session(self):
"""Gets the session for retrieving comment to book mappings.
return: (osid.commenting.CommentBookSession) - a
``CommentBookSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_book()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_book()`` is ``true``.*
"""
if not self.supports_comment_book():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CommentBookSession(runtime=self._runtime)
comment_book_session = property(fget=get_comment_book_session)
@utilities.remove_null_proxy_kwarg
def get_comment_book_assignment_session(self):
"""Gets the session for assigning comment to book mappings.
return: (osid.commenting.CommentBookAssignmentSession) - a
``CommentBookAssignmentSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_book_assignment()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_book_assignment()`` is ``true``.*
"""
if not self.supports_comment_book_assignment():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CommentBookAssignmentSession(runtime=self._runtime)
comment_book_assignment_session = property(fget=get_comment_book_assignment_session)
@utilities.remove_null_proxy_kwarg
def get_book_lookup_session(self):
"""Gets the ``OsidSession`` associated with the book lookup service.
return: (osid.commenting.BookLookupSession) - a
``BookLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_book_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_book_lookup()`` is ``true``.*
"""
if not self.supports_book_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BookLookupSession(runtime=self._runtime)
book_lookup_session = property(fget=get_book_lookup_session)
@utilities.remove_null_proxy_kwarg
def get_book_admin_session(self):
"""Gets the ``OsidSession`` associated with the book administrative service.
return: (osid.commenting.BookAdminSession) - a
``BookAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_book_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_book_admin()`` is ``true``.*
"""
if not self.supports_book_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BookAdminSession(runtime=self._runtime)
book_admin_session = property(fget=get_book_admin_session)
@utilities.remove_null_proxy_kwarg
def get_book_hierarchy_session(self):
"""Gets the ``OsidSession`` associated with the book hierarchy service.
return: (osid.commenting.BookHierarchySession) - a
``BookHierarchySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_book_hierarchy()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_book_hierarchy()`` is ``true``.*
"""
if not self.supports_book_hierarchy():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BookHierarchySession(runtime=self._runtime)
book_hierarchy_session = property(fget=get_book_hierarchy_session)
@utilities.remove_null_proxy_kwarg
def get_book_hierarchy_design_session(self):
"""Gets the ``OsidSession`` associated with the book hierarchy design service.
return: (osid.commenting.BookHierarchyDesignSession) - a
``BookHierarchyDesignSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_book_hierarchy_design()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_book_hierarchy_design()`` is ``true``.*
"""
if not self.supports_book_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BookHierarchyDesignSession(runtime=self._runtime)
book_hierarchy_design_session = property(fget=get_book_hierarchy_design_session)
def get_commenting_batch_manager(self):
"""Gets a ``CommentingBatchManager``.
return: (osid.commenting.batch.CommentingBatchManager) - a
``CommentingBatchManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_commenting_batch()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_commenting_batch()`` is ``true``.*
"""
raise errors.Unimplemented()
commenting_batch_manager = property(fget=get_commenting_batch_manager)
class CommentingProxyManager(osid_managers.OsidProxyManager, CommentingProfile, commenting_managers.CommentingProxyManager):
"""The commenting manager provides access to commenting sessions and provides interoperability tests for various aspects of this service.
Methods in this manager accept a ``Proxy`` for passing information
from a server environment. The sessions included in this manager
are:
* ``CommentLookupSession:`` a session to lookup comments
* ``RatingLookupSession:`` a session to lookup comments
* ``CommentQuerySession:`` a session to query comments
* ``CommentSearchSession:`` a session to search comments
* ``CommentAdminSession:`` a session to manage comments
* ``CommentNotificationSession:`` a session to subscribe to
notifications of comment changes
* ``CommentBookSession:`` a session for looking up comment and
book mappings
* ``CommentBookAssignmentSession:`` a session for managing comment
and book mappings
* ``CommentSmartBookSession:`` a session to manage dynamic comment
books
* ``BookLookupSession:`` a session to retrieve books
* ``BookQuerySession:`` a session to query books
* ``BookSearchSession:`` a session to search for books
* ``BookAdminSession:`` a session to create, update and delete
books
* ``BookNotificationSession:`` a session to receive notifications
for changes in books
* ``BookHierarchyTraversalSession:`` a session to traverse
hierarchies of books
* ``BookHierarchyDesignSession:`` a session to manage hierarchies
of books
The commenting manager also provides a profile for determing the
supported search types supported by this service.
"""
def __init__(self):
osid_managers.OsidProxyManager.__init__(self)
@utilities.arguments_not_none
def get_comment_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the comment lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentLookupSession) - a
``CommentLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_comment_lookup()`` is ``true``.*
"""
if not self.supports_comment_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CommentLookupSession(proxy=proxy, runtime=self._runtime)
@utilities.arguments_not_none
def get_comment_lookup_session_for_book(self, book_id, proxy):
"""Gets the ``OsidSession`` associated with the comment lookup service for the given book.
arg: book_id (osid.id.Id): the ``Id`` of the ``Book``
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentLookupSession) - a
``CommentLookupSession``
raise: NotFound - no ``Book`` found by the given ``Id``
raise: NullArgument - ``book_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_lookup()`` and
``supports_visible_federation()`` are ``true``*
"""
if not self.supports_comment_lookup():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.CommentLookupSession(book_id, proxy, self._runtime)
@utilities.arguments_not_none
def get_comment_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the comment query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentQuerySession) - a
``CommentQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_comment_query()`` is ``true``.*
"""
if not self.supports_comment_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CommentQuerySession(proxy=proxy, runtime=self._runtime)
@utilities.arguments_not_none
def get_comment_query_session_for_book(self, book_id, proxy):
"""Gets the ``OsidSession`` associated with the comment query service for the given book.
arg: book_id (osid.id.Id): the ``Id`` of the ``Book``
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentQuerySession) - a
``CommentQuerySession``
raise: NotFound - no ``Comment`` found by the given ``Id``
raise: NullArgument - ``book_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_query()`` and
``supports_visible_federation()`` are ``true``*
"""
if not self.supports_comment_query():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.CommentQuerySession(book_id, proxy, self._runtime)
@utilities.arguments_not_none
def get_comment_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the comment administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentAdminSession) - a
``CommentAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_comment_admin()`` is ``true``.*
"""
if not self.supports_comment_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CommentAdminSession(proxy=proxy, runtime=self._runtime)
@utilities.arguments_not_none
def get_comment_admin_session_for_book(self, book_id, proxy):
"""Gets the ``OsidSession`` associated with the comment administration service for the given book.
arg: book_id (osid.id.Id): the ``Id`` of the ``Book``
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentAdminSession) - a
``CommentAdminSession``
raise: NotFound - no ``Comment`` found by the given ``Id``
raise: NullArgument - ``book_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
if not self.supports_comment_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.CommentAdminSession(book_id, proxy, self._runtime)
@utilities.arguments_not_none
def get_comment_book_session(self, proxy):
"""Gets the session for retrieving comment to book mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentBookSession) - a
``CommentBookSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_book()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_book()`` is ``true``.*
"""
if not self.supports_comment_book():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CommentBookSession(proxy=proxy, runtime=self._runtime)
@utilities.arguments_not_none
def get_comment_book_assignment_session(self, proxy):
"""Gets the session for assigning comment to book mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentBookAssignmentSession) - a
``CommentBookAssignmentSession``
raise: NullArgument - ``proxy`` is |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.