content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
import unittest
from robot.parsing import TestCaseFile
from robot.parsing.model import TestCaseTable
from robot.utils import ET, ETSource, StringIO
from robot.utils.asserts import assert_equal
def create_test_case_file():
data = TestCaseFile(source='foo.txt')
table = TestCaseTable(data)
data.testcase_table = table
table.set_header(['test case', 'some', 'and other'])
test = table.add('A test')
test.add_step(['A kw', 'an arg'])
return data
class _WriterTestCase(unittest.TestCase):
def _test_rows_are_not_split_if_there_are_headers(self, format='txt'):
output = self._add_long_step_and_save(format)
assert_equal(len(output.splitlines()), 3)
def _add_long_step_and_save(self, format):
data = create_test_case_file()
data.testcase_table.tests[0].add_step(['A kw', '1', '2', '3', '4', '6', '7', '8'])
output = StringIO()
data.save(format=format, output=output)
return output.getvalue().strip()
class TestSpaceSeparatedWriter(_WriterTestCase):
def test_end_of_line_whitespace_is_removed(self):
output = StringIO()
create_test_case_file().save(output=output)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(repr(expected), repr(output.getvalue()))
def test_rows_are_not_split_if_there_are_headers(self):
self._test_rows_are_not_split_if_there_are_headers()
def test_configuring_number_of_separating_spaces(self):
output = StringIO()
create_test_case_file().save(output=output, txt_separating_spaces=8)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(repr(expected), repr(output.getvalue()))
class TestTsvWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
try:
import csv
except ImportError:
pass # csv not available on IronPython 2.7
else:
self._test_rows_are_not_split_if_there_are_headers('tsv')
class TestHtmlWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
output = self._add_long_step_and_save('html')
with ETSource('\n'.join(output.splitlines()[1:])) as source:
tree = ET.parse(source)
lines = tree.findall('body/table/tr')
assert_equal(len(lines), 3)
for l in lines:
cols = l.findall('td') or l.findall('th')
assert_equal(len(cols), 9)
if __name__ == '__main__':
unittest.main()
| utest/writer/test_filewriters.py | 2,631 | csv not available on IronPython 2.7 | 35 | en | 0.515641 |
import cv2.cv2 as cv2
import skimage.io as io
from skimage.transform import downscale_local_mean
import numpy as np
from model import *
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from images_to_arr import *
import pickle
import csv
def removeBackground(img_in):
Img_backless = np.copy(img_in)
Img_backless = np.subtract(np.multiply(Img_backless,1.11),0.11)
Img_backless[Img_backless < 0] = 0
return Img_backless
def newBBcoords(img_pred_Log,test_image):
# returns coordinates of the bounding box for the region with the largest area
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
labelsImg = np.multiply(np.array(labelsLog, np.uint8),255)
#myShowImage(labelsImg)
sizeBoxX = regionsLog[maxIndex]['bbox'][3]-regionsLog[maxIndex]['bbox'][1]
sizeBoxY = regionsLog[maxIndex]['bbox'][2]-regionsLog[maxIndex]['bbox'][0]
coordsBbox = list(regionsLog[maxIndex]['bbox'])
if sizeBoxX <= 0.5 * img_pred_Log.shape[1]:
newSizeBoxX = 0.3 / (sizeBoxX / img_pred_Log.shape[1])
coordsBbox[1] = coordsBbox[1] - sizeBoxX*(0.5*(newSizeBoxX-1))
coordsBbox[3] = coordsBbox[3] + sizeBoxX*(0.5*(newSizeBoxX-1))
if sizeBoxY <= 0.5 * img_pred_Log.shape[0]:
newSizeBoxY = 0.5 / (sizeBoxY / img_pred_Log.shape[0])
coordsBbox[0] = coordsBbox[0] - sizeBoxY*(0.5*(newSizeBoxY-1))
coordsBbox[2] = coordsBbox[2] + sizeBoxY*(0.5*(newSizeBoxY-1))
if coordsBbox[0] < 0:
coordsBbox[0] = 0
if coordsBbox[1] < 0:
coordsBbox[1] = 0
if coordsBbox[2] > test_image.shape[0]:
coordsBbox[2] = test_image.shape[0] - 1
if coordsBbox[3] > test_image.shape[1]:
coordsBbox[3] = test_image.shape[1] - 1
coordsBboxInt = [round(x) for x in coordsBbox]
return coordsBboxInt
def getLargestAreaEcentroid(img_pred_Log):
# returns mask with the regions with the largest area, coords of centroid and radius
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
centreCoords = np.round(regionsLog[maxIndex]['centroid'])
centreCoords = centreCoords.astype(np.uint)
radius = (regionsLog[maxIndex]['major_axis_length'] + regionsLog[maxIndex]['minor_axis_length']) / 4
colsCoord = [regionsLog[maxIndex]['bbox'][1],regionsLog[maxIndex]['bbox'][3]]
labelsArr = np.array(labelsLog)
return labelsArr, centreCoords, radius, colsCoord
image_arr = np.load('image_arr.npy')
mask_arr = np.load('mask_arr.npy')
image_arr_red_channels = np.load('image_arr_red_channels.npy')
image_arr_green_channels = np.load('image_arr_green_channels.npy')
image_arr_blue_channels = np.load('image_arr_blue_channels.npy')
entropy = np.load('entropy_arr.npy')
elips = np.load('elips_arr.npy')
vessels = np.load('vessels_arr.npy')
test_image = np.zeros(image_arr[0].shape)
test_image_mask = np.zeros(mask_arr[0].shape)
test_img_RC = np.zeros(image_arr[0].shape)
test_img_GC = np.zeros(image_arr[0].shape)
test_img_BC = np.zeros(image_arr[0].shape)
entropy_arr = np.zeros(image_arr[0].shape)
elips_arr = np.zeros(image_arr[0].shape)
ODROILog = []
ODROIBay = []
getClassifiers = False
if getClassifiers:
X_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,4])
Y_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,1])
for j in range(0,40):
for i in range(0,40): # Get train data
if i == j:
continue
test_image = image_arr[i]
test_image_mask = mask_arr[i]
labels, num = label(test_image_mask, neighbors=8, background = 0, return_num = True)
regions = regionprops(labels)
centreCoords = np.round(regions[0]['centroid'])
centreCoords = centreCoords.astype(np.uint)
centreMask = np.zeros(test_image_mask.shape)
centreMask[centreCoords[0],centreCoords[1]] = 1
#Change here!
#test_image_mask = centreMask
test_image_RC = image_arr_red_channels[i]
test_image_GC = image_arr_green_channels[i]
test_image_BC = image_arr_blue_channels[i]
entropy_arr = entropy[i]
elips_arr = elips[i]
#test_image_RC = removeBackground(test_image_RC)
#test_image = removeBackground(test_image)
imageIndxs = np.where(test_image != 0)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
redChannel_Arr = np.squeeze(test_image_RC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
redChannel_Arr = (redChannel_Arr-np.average(redChannel_Arr)) / np.std(redChannel_Arr)
entropy_arr = np.squeeze(entropy_arr.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#entropy_arr = (entropy_arr-np.average(entropy_arr)) / np.std(entropy_arr)
# Distance Array
indices_Arr = np.indices((test_image.shape[0],test_image.shape[1])).transpose((1,2,0))
centreCoords = np.array([test_image.shape[0]/2,test_image.shape[1]/2])
distance_Arr = np.sqrt(np.add(np.power(indices_Arr[...,0]-centreCoords[0],2),np.power(indices_Arr[...,1]-centreCoords[1],2)))
normDistance_Arr = distance_Arr / np.max(distance_Arr)
normDistanceColumn_Arr = np.squeeze(normDistance_Arr.reshape([1,normDistance_Arr.shape[0]*normDistance_Arr.shape[1]])).T
X_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],...] = np.column_stack((redChannel_Arr,entropy_arr,normDistanceColumn_Arr, intensityColumn_Arr))#,
Y_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
'''
f = open('my_classifier.pickle', 'rb')
classifier = pickle.load(f)
f.close()
'''
test_image2 = np.zeros(image_arr[0].shape)
test_image_mask2 = np.zeros(mask_arr[0].shape)
test_img_RC2 = np.zeros(image_arr[0].shape)
# test_img_GC2 = np.zeros(image_arr[0].shape)
test_image2 = image_arr[j]
test_image_mask2 = mask_arr[j]
test_image_RC2 = image_arr_red_channels[j]
test_image_GC2 = image_arr_green_channels[j]
test_image_BC2 = image_arr_blue_channels[j]
entropy_arr2 = entropy[j]
intensityColumn_Arr2 = np.squeeze(test_image2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
intensityColumn_Arr2 = (intensityColumn_Arr2-np.average(intensityColumn_Arr2)) / np.std(intensityColumn_Arr2)
redChannel_Arr2 = np.squeeze(test_image_RC2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
redChannel_Arr2 = ( redChannel_Arr2 - np.average(redChannel_Arr2) ) / np.std(redChannel_Arr2)
entropy_arr = np.squeeze(entropy_arr2.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
X_val = np.column_stack((redChannel_Arr2,entropy_arr,normDistanceColumn_Arr,intensityColumn_Arr2))#,,greenChannel_Arr2))
Y_val = np.squeeze(test_image_mask2.reshape([1,test_image_mask2.shape[0]*test_image_mask2.shape[1]])).T
# predicts
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
# Y_train_reshaped = Y_train.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"img_pred_Log_" + str(j))
#myShowImage(img_pred_Bayes,"img_pred_Bayes_" + str(j))
try:
coordsBBLog = newBBcoords(img_pred_Log,test_image)
except:
coordsBBLog = []
try:
coordsBBBay = newBBcoords(img_pred_Bayes,test_image)
except:
coordsBBBay = []
ODROILog.append(coordsBBLog)
ODROIBay.append(coordsBBBay)
ODROILog_Arr = np.array(ODROILog)
ODROIBay_Arr = np.array(ODROIBay)
np.save('ODROILog_Arr.npy',ODROILog_Arr)
np.save('ODROIBay_Arr.npy',ODROIBay_Arr)
prepareSegments = False
if prepareSegments:
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
OD_section = []
OD_mask = []
OD_section_RC = []
lenX_Arr = 0
for i in range(0,40):
try:
coords = ODROILog_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"LOG" +str(i))
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#smoothDisk = mean(smoothVessels, disk(5))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#coords = ODROIBay_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"BAY" + str(i))
except:
coords = ODROIBay_Arr[i]
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
#medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"EXCEPT" + str(i))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
#print('except')
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#myShowImage(smoothVessels)
OD_section_Arr = np.array(OD_section)
OD_mask_Arr = np.array(OD_mask)
OD_section_RC = np.array(OD_section_RC)
np.save('OD_section_Arr.npy',OD_section_Arr)
np.save('OD_mask_Arr.npy',OD_mask_Arr)
np.save('OD_section_RC.npy',OD_section_RC)
print(lenX_Arr) # len = 4577126
finalSegmentation = False
finalMaskPredicts = []
if finalSegmentation:
OD_section_Arr = np.load('OD_section_Arr.npy')
OD_mask_Arr = np.load('OD_mask_Arr.npy')
OD_section_RC = np.load('OD_section_RC.npy')
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for j in range(0,40):
removeLen = OD_section_Arr[j].shape[0] * OD_section_Arr[j].shape[1]
X_train = np.zeros([4577126-removeLen,2])
Y_train = np.zeros([4577126-removeLen,1])
for i in range(0,40):
if i == j:
continue
test_image = OD_section_Arr[i]
test_image_mask = OD_mask_Arr[i]
segRC = OD_section_RC[i]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
if (i-1)*test_image.shape[0]*test_image.shape[1] < 0 and (i)*test_image.shape[0]*test_image.shape[1] == 0:
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
continue
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
test_image = OD_section_Arr[j]
test_image_mask = OD_mask_Arr[j]
segRC = OD_section_RC[j]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
X_val = np.column_stack((intensityColumn_Arr,segRC))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"Log")
#myShowImage(img_pred_Bayes,"Bayes")
#myShowImage(test_image,"Actual")
finalMaskPredicts.append(predictsBayes)
#print('ok')
finalMaskPredicts_Arr = np.array(finalMaskPredicts)
np.save("finalMaskPredicts_Bayes.npy",finalMaskPredicts_Arr)
loadFinalSegs = False
if loadFinalSegs:
foveaBBoxCoords = []
centroidCoord = []
ODmaskPredicts = []
elips = np.load('elips_arr.npy')
originalDimsBase = np.zeros(image_arr[0].shape)
OD_section_Arr = np.load('OD_section_Arr.npy')
finalMaskPredicts_Arr = np.load("finalMaskPredicts_Bayes.npy")
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
for i in range(0,40):
originalDims = np.copy(originalDimsBase)
test_image = OD_section_Arr[i]
maskPred = finalMaskPredicts_Arr[i].reshape([test_image.shape[0],test_image.shape[1]])
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(maskPred)
finalMaskImg = np.multiply(finalMask,255)
finalMaskImg[centroidCoords[0],centroidCoords[1]] = 255
try:
coords = ODROILog_Arr[i]
failTest = (coords[2])
except:
coords = ODROIBay_Arr[i]
failTest = (coords[2])
coordsReal =[centroidCoords[0] + coords[0],centroidCoords[1] + coords[1]]
colsCoordReal = [colsCoord[0] + coords[1],colsCoord[1] + coords[1]]
originalDims[coords[0]:coords[2],coords[1]:coords[3]] = finalMaskImg
#originalDims = originalDims or elips[i]
elipsResized = cv2.resize(elips[i], dsize=(originalDims.shape[1],originalDims.shape[0]), interpolation=cv2.INTER_CUBIC)
elipsResized = np.average(elipsResized,axis = 2) # 3 channels -> 1 channel
elipsResized[elipsResized>0.5] = 1
elipsResized[elipsResized<1] = 0
elipsResized = thin(elipsResized)
elipsIndexs = np.where(elipsResized != 0)
originalDims = originalDims.astype(np.uint8)
#originalDims[elipsIndexs] = 255
indexsOD_ELi = np.where(originalDims != 0)
#myShowImage(originalDims,str(i))
checkResults = np.copy(image_arr[i])
checkResults[indexsOD_ELi] = originalDims[indexsOD_ELi]
#checkResults[0::,np.min(elipsIndexs[1])] = 255 # left
#checkResults[0::,np.max(elipsIndexs[1])] = 255 # right
if abs(coordsReal[1]-np.min(elipsIndexs[1])) < abs(coordsReal[1]-np.max(elipsIndexs[1])):
#isleft -> walk right
#relevantColumn = coordsReal[1] + 30 # based on centroid
relevantColumn = colsCoordReal[1] - 10 # based on
columnROI_f = [coordsReal[1] + round(3*radius),coordsReal[1] + round(6*radius)]
else:
#isright -> walk left
#relevantColumn = coordsReal[1] - 30
relevantColumn = colsCoordReal[0] + 10
columnROI_f = [coordsReal[1] - round(6*radius),coordsReal[1] - round(3*radius)]
relevantRows = np.where(elipsResized[...,relevantColumn]!=0)
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[0]] = 0 # 1 - columnROI_f[0]
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[1]] = 0 # 3 - columnROI_f[1]
checkResults[relevantRows[0][0],columnROI_f[0]:columnROI_f[1]] = 0 # 0 - relevantRows[0][0]
checkResults[relevantRows[0][-1],columnROI_f[0]:columnROI_f[1]] = 0 # 2 - relevantRows[0][-1]
foveaBBoxCoords.append((relevantRows[0][0],columnROI_f[0],relevantRows[0][-1],columnROI_f[1]))
centroidCoord.append(coordsReal)
originalDims = np.divide(originalDims,255)
ODmaskPredicts.append(originalDims)
#myShowImage(originalDims,str(i))
#myShowImage(checkResults,str(i))
foveaBBoxCoords_Arr = np.array(foveaBBoxCoords)
centroidCoord_Arr = np.array(centroidCoord)
ODmaskPredicts_Arr = np.array(ODmaskPredicts)
np.save("bbox_fovea.npy",foveaBBoxCoords_Arr)
np.save("centroidCoord_Arr.npy",centroidCoord_Arr)
np.save("ODmaskPredicts_Arr.npy",ODmaskPredicts_Arr)
getFoveaGTCoords = True
if getFoveaGTCoords:
foveCoordsGT = []
tempCoords =[]
imgNo = 0
with open('Datasets/fovea_location.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
#print(row)
tempCoords.append(float(row[1]))
tempCoords.append(float(row[2]))
foveCoordsGT.append(tempCoords)
tempCoords =[]
imgNo += 1
if imgNo == 40:
break
getFoveaCoordsPred = False
'''for i in range(0,40):
myShowImage(image_arr[i])
myShowImage(image_arr_red_channels[i])
myShowImage(image_arr_green_channels[i])
myShowImage(vessels[i])
myShowImage(entropy_arr[i])'''
if getFoveaCoordsPred:
foveaBBoxCoords_Arr = np.load("bbox_fovea.npy")
foveaBBoxCoords_Arr = np.absolute(foveaBBoxCoords_Arr)
removeLen = 0
realCentroidCoords_Arr = []
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for i in range(0,40): # not the best way...
if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
removeLen += bboxShape[0]*bboxShape[1]
#print(removeLen)
for j in range(0,40):
removeLen = (foveaBBoxCoords_Arr[j][2]-foveaBBoxCoords_Arr[j][0]) * (foveaBBoxCoords_Arr[j][3]-foveaBBoxCoords_Arr[j][1])
X_train = np.zeros([3187816-removeLen,3]) # 3187816 = number of points in all fovea bboxs
Y_train = np.zeros([3187816-removeLen,1])
first = 0
for i in range(0,40):
if i == j:
continue
'''if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp'''
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
last = bboxShape[0]*bboxShape[1] + first
foveaRegionGC = image_arr_green_channels[i][foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
#mask
maskBig = np.zeros(test_image.shape)
coordsFoveaCenter = [round(foveCoordsGT[i][1]/4),round(foveCoordsGT[i][0]/4)]
maskBig[coordsFoveaCenter[0]-10:coordsFoveaCenter[0]+10,coordsFoveaCenter[1]-10:coordsFoveaCenter[1]+10] = 1
mask = maskBig[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
'''if (i-1)*bboxShape[0]*bboxShape[1] < 0 and (i)*bboxShape[0]*bboxShape[1] == 0:
X_train[(i-1)*bboxShape[0]*bboxShape[1]::,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[(i-1)*bboxShape[0]*bboxShape[1]::,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
continue'''
X_train[first:last,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[first:last,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
first = last
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
'''log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()'''
test_image = image_arr[j]
fovea_region = test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
bboxShape = fovea_region.shape
foveaRegionGC = image_arr_green_channels[j][foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
X_val = np.column_stack((fovea_region,foveaRegionGC,highContrast))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape(bboxShape)
img_pred_Bayes = predictsBayes.reshape(bboxShape)
try:
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(img_pred_Bayes)
if centroidCoords.size == 0:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
except:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
maskEyes = np.copy(finalMask)
maskEyes = np.multiply(maskEyes,255)
maskEyes = maskEyes.astype(np.uint8)
#myShowImage(test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]],"fovea")
#myShowImage(maskEyes,"Mask")
#myShowImage(img_pred_Bayes,"Bay")
realCentroidCoords = [centroidCoords[0] + foveaBBoxCoords_Arr[j][0],centroidCoords[1] + foveaBBoxCoords_Arr[j][1]]
realCentroidCoords_Arr.append(realCentroidCoords)
realCentroidCoords_Arr = np.array(realCentroidCoords_Arr)
np.save('fovea_centre_coords.npy',realCentroidCoords_Arr)
#centroidCoord_Arr = np.load("centroidCoord_Arr.npy")
#ODmaskPredicts_Arr = np.load("ODmaskPredicts_Arr.npy")
#for i in range(0,40):
showGraphsClass= False
if showGraphsClass:
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, proba=False, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
if proba:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,-1]
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z,20, **params)
return out
## import some data to play with
#iris = datasets.load_iris()
## Take the first two features. We could avoid this by using a two-dim dataset
#X = iris.data[:, :2]
#y = iris.target
X = X_train_2
y = y_train_2
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
models = (clf_bayes, clf_log) #, clf_svm, clf_svm_rbf)
# title for the plots
titles = ('Bayes',
'Logistic regression')
''' ,
'SVC with linear kernel',
'SVM with RBF kernel')'''
# Set-up 2x2 grid for plotting.
#fig, sub =
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[0::500, 0], X[0::500, 1]
xx, yy = make_meshgrid(X0, X1,h=0.005)
'''_,ax_all = plt.subplots(1,2)
ax = ax_all[1]
plot_contours(ax, clf_bayes, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title("Bayes")
plt.show()'''
showPlots = False
if showPlots:
for clf, title in zip(models, titles):
_,ax_all = plt.subplots(1,2)
ax = ax_all[0]
plot_contours(ax, clf, xx, yy, proba=True, # changed proba to probability
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax = ax_all[1]
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
print("Done") | main.py | 33,843 | Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
returns coordinates of the bounding box for the region with the largest areamyShowImage(labelsImg) returns mask with the regions with the largest area, coords of centroid and radius Get train dataChange here!test_image_mask = centreMasktest_image_RC = removeBackground(test_image_RC)test_image = removeBackground(test_image)entropy_arr = (entropy_arr-np.average(entropy_arr)) / np.std(entropy_arr) Distance Array, Logistic regression test_img_GC2 = np.zeros(image_arr[0].shape),,greenChannel_Arr2)) predicts Y_train_reshaped = Y_train.reshape([test_image.shape[0],test_image.shape[1]])myShowImage(img_pred_Log,"img_pred_Log_" + str(j))myShowImage(img_pred_Bayes,"img_pred_Bayes_" + str(j))myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"LOG" +str(i))smoothDisk = mean(smoothVessels, disk(5)) coords = ODROIBay_Arr[i]myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"BAY" + str(i))medianFiltered = median(imgSegment,disk(25))myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"EXCEPT" + str(i))print('except')myShowImage(smoothVessels) len = 4577126segRC = (segRC-np.average(segRC)) / np.std(segRC),, Logistic regressionsegRC = (segRC-np.average(segRC)) / np.std(segRC)myShowImage(img_pred_Log,"Log")myShowImage(img_pred_Bayes,"Bayes")myShowImage(test_image,"Actual")print('ok')originalDims = originalDims or elips[i] 3 channels -> 1 channeloriginalDims[elipsIndexs] = 255myShowImage(originalDims,str(i))checkResults[0::,np.min(elipsIndexs[1])] = 255 leftcheckResults[0::,np.max(elipsIndexs[1])] = 255 rightisleft -> walk rightrelevantColumn = coordsReal[1] + 30 based on centroid based on isright -> walk leftrelevantColumn = coordsReal[1] - 30 1 - columnROI_f[0] 3 - columnROI_f[1] 0 - relevantRows[0][0] 2 - relevantRows[0][-1]myShowImage(originalDims,str(i))myShowImage(checkResults,str(i))print(row) not the best way...print(removeLen) 3187816 = number of points in all fovea bboxsmask, Logistic regressionmyShowImage(test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]],"fovea")myShowImage(maskEyes,"Mask")myShowImage(img_pred_Bayes,"Bay") centroidCoord_Arr = np.load("centroidCoord_Arr.npy")ODmaskPredicts_Arr = np.load("ODmaskPredicts_Arr.npy")for i in range(0,40): import some data to play withiris = datasets.load_iris() Take the first two features. We could avoid this by using a two-dim datasetX = iris.data[:, :2]y = iris.target we create an instance of SVM and fit out data. We do not scale our data since we want to plot the support vectors, clf_svm, clf_svm_rbf) title for the plots Set-up 2x2 grid for plotting.fig, sub = plt.subplots_adjust(wspace=0.4, hspace=0.4) changed proba to probability | 3,146 | en | 0.443463 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import pickle
import pandas as pd
from cgp import *
from cgp_config import *
from cnn_train import CNN_train
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evolving CAE structures')
parser.add_argument('--gpu_num', '-g', type=int, default=1, help='Num. of GPUs')
parser.add_argument('--lam', '-l', type=int, default=2, help='Num. of offsprings')
parser.add_argument('--net_info_file', default='network_info.pickle', help='Network information file name')
parser.add_argument('--log_file', default='./log_cgp.txt', help='Log file name')
parser.add_argument('--mode', '-m', default='evolution', help='Mode (evolution / retrain / reevolution)')
parser.add_argument('--init', '-i', action='store_true')
args = parser.parse_args()
# --- Optimization of the CNN architecture ---
if args.mode == 'evolution':
# Create CGP configuration and save network information
network_info = CgpInfoConvSet(rows=5, cols=30, level_back=10, min_active_num=1, max_active_num=30)
with open(args.net_info_file, mode='wb') as f:
pickle.dump(network_info, f)
# Evaluation function for CGP (training CNN and return validation accuracy)
imgSize = 32
eval_f = CNNEvaluation(gpu_num=args.gpu_num, dataset='cifar10', verbose=True, epoch_num=50, batchsize=128,
imgSize=imgSize)
# Execute evolution
cgp = CGP(network_info, eval_f, lam=args.lam, imgSize=imgSize, init=args.init)
cgp.modified_evolution(max_eval=250, mutation_rate=0.1, log_file=args.log_file)
# --- Retraining evolved architecture ---
elif args.mode == 'retrain':
print('Retrain')
# In the case of existing log_cgp.txt
# Load CGP configuration
with open(args.net_info_file, mode='rb') as f:
network_info = pickle.load(f)
# Load network architecture
cgp = CGP(network_info, None)
data = pd.read_csv(args.log_file, header=None) # Load log file
cgp.load_log(list(data.tail(1).values.flatten().astype(int))) # Read the log at final generation
print(cgp._log_data(net_info_type='active_only', start_time=0))
# Retraining the network
temp = CNN_train('cifar10', validation=False, verbose=True, batchsize=128)
acc = temp(cgp.pop[0].active_net_list(), 0, epoch_num=500, out_model='retrained_net.model')
print(acc)
# # otherwise (in the case where we do not have a log file.)
# temp = CNN_train('haze1', validation=False, verbose=True, imgSize=128, batchsize=16)
# cgp = [['input', 0], ['S_SumConvBlock_64_3', 0], ['S_ConvBlock_64_5', 1], ['S_SumConvBlock_128_1', 2], ['S_SumConvBlock_64_1', 3], ['S_SumConvBlock_64_5', 4], ['S_DeConvBlock_3_3', 5]]
# acc = temp(cgp, 0, epoch_num=500, out_model='retrained_net.model')
elif args.mode == 'reevolution':
# restart evolution
print('Restart Evolution')
imgSize = 64
with open('network_info.pickle', mode='rb') as f:
network_info = pickle.load(f)
eval_f = CNNEvaluation(gpu_num=args.gpu_num, dataset='cifar10', verbose=True, epoch_num=50, batchsize=128,
imgSize=imgSize)
cgp = CGP(network_info, eval_f, lam=args.lam, imgSize=imgSize)
data = pd.read_csv('./log_cgp.txt', header=None)
cgp.load_log(list(data.tail(1).values.flatten().astype(int)))
cgp.modified_evolution(max_eval=250, mutation_rate=0.1, log_file='./log_restat.txt')
else:
print('Undefined mode. Please check the "-m evolution or retrain or reevolution" ')
| exp_main.py | 3,730 | !/usr/bin/env python -*- coding: utf-8 -*- --- Optimization of the CNN architecture --- Create CGP configuration and save network information Evaluation function for CGP (training CNN and return validation accuracy) Execute evolution --- Retraining evolved architecture --- In the case of existing log_cgp.txt Load CGP configuration Load network architecture Load log file Read the log at final generation Retraining the network otherwise (in the case where we do not have a log file.) temp = CNN_train('haze1', validation=False, verbose=True, imgSize=128, batchsize=16) cgp = [['input', 0], ['S_SumConvBlock_64_3', 0], ['S_ConvBlock_64_5', 1], ['S_SumConvBlock_128_1', 2], ['S_SumConvBlock_64_1', 3], ['S_SumConvBlock_64_5', 4], ['S_DeConvBlock_3_3', 5]] acc = temp(cgp, 0, epoch_num=500, out_model='retrained_net.model') restart evolution | 841 | en | 0.618722 |
#!/usr/bin/env python3
import os
import shutil
import threading
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT, get_available_bytes, get_available_percent
from selfdrive.loggerd.uploader import listdir_by_creation
from selfdrive.dragonpilot.dashcam import DASHCAM_FREESPACE_LIMIT
MIN_BYTES = 5 * 1024 * 1024 * 1024
MIN_PERCENT = 10 + (DASHCAM_FREESPACE_LIMIT * 100)
def deleter_thread(exit_event):
while not exit_event.is_set():
out_of_bytes = get_available_bytes(default=MIN_BYTES + 1) < MIN_BYTES
out_of_percent = get_available_percent(default=MIN_PERCENT + 1) < MIN_PERCENT
if out_of_percent or out_of_bytes:
# remove the earliest directory we can
dirs = listdir_by_creation(ROOT)
for delete_dir in dirs:
delete_path = os.path.join(ROOT, delete_dir)
if any(name.endswith(".lock") for name in os.listdir(delete_path)):
continue
try:
cloudlog.info("deleting %s" % delete_path)
shutil.rmtree(delete_path)
break
except OSError:
cloudlog.exception("issue deleting %s" % delete_path)
exit_event.wait(.1)
else:
exit_event.wait(30)
def main():
deleter_thread(threading.Event())
if __name__ == "__main__":
main()
| selfdrive/loggerd/deleter.py | 1,285 | !/usr/bin/env python3 remove the earliest directory we can | 58 | en | 0.405588 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
from collections import defaultdict
from copy import deepcopy
from itertools import product
import re
from sqlalchemy.sql import select
from .models import Candidate, TemporarySpan, Sentence
from .udf import UDF, UDFRunner
QUEUE_COLLECT_TIMEOUT = 5
class CandidateExtractor(UDFRunner):
"""
An operator to extract Candidate objects from a Context.
:param candidate_class: The type of relation to extract, defined using
:func:`snorkel.models.candidate_subclass <snorkel.models.candidate.candidate_subclass>`
:param cspaces: one or list of :class:`CandidateSpace` objects, one for each relation argument. Defines space of
Contexts to consider
:param matchers: one or list of :class:`snorkel.matchers.Matcher` objects, one for each relation argument. Only tuples of
Contexts for which each element is accepted by the corresponding Matcher will be returned as Candidates
:param self_relations: Boolean indicating whether to extract Candidates that relate the same context.
Only applies to binary relations. Default is False.
:param nested_relations: Boolean indicating whether to extract Candidates that relate one Context with another
that contains it. Only applies to binary relations. Default is False.
:param symmetric_relations: Boolean indicating whether to extract symmetric Candidates, i.e., rel(A,B) and rel(B,A),
where A and B are Contexts. Only applies to binary relations. Default is False.
"""
def __init__(self, candidate_class, cspaces, matchers, self_relations=False, nested_relations=False, symmetric_relations=False):
super(CandidateExtractor, self).__init__(CandidateExtractorUDF,
candidate_class=candidate_class,
cspaces=cspaces,
matchers=matchers,
self_relations=self_relations,
nested_relations=nested_relations,
symmetric_relations=symmetric_relations)
def apply(self, xs, split=0, **kwargs):
super(CandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class CandidateExtractorUDF(UDF):
def __init__(self, candidate_class, cspaces, matchers, self_relations, nested_relations, symmetric_relations, **kwargs):
self.candidate_class = candidate_class
# Note: isinstance is the way to check types -- not type(x) in [...]!
self.candidate_spaces = cspaces if isinstance(cspaces, (list, tuple)) else [cspaces]
self.matchers = matchers if isinstance(matchers, (list, tuple)) else [matchers]
self.nested_relations = nested_relations
self.self_relations = self_relations
self.symmetric_relations = symmetric_relations
# Check that arity is same
if len(self.candidate_spaces) != len(self.matchers):
raise ValueError("Mismatched arity of candidate space and matcher.")
else:
self.arity = len(self.candidate_spaces)
# Make sure the candidate spaces are different so generators aren't expended!
self.candidate_spaces = list(map(deepcopy, self.candidate_spaces))
# Preallocates internal data structures
self.child_context_sets = [None] * self.arity
for i in range(self.arity):
self.child_context_sets[i] = set()
super(CandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, **kwargs):
# Generate TemporaryContexts that are children of the context using the candidate_space and filtered
# by the Matcher
for i in range(self.arity):
self.child_context_sets[i].clear()
for tc in self.matchers[i].apply(self.candidate_spaces[i].apply(context)):
tc.load_id_or_insert(self.session)
self.child_context_sets[i].add(tc)
# Generates and persists candidates
extracted = set()
candidate_args = {'split': split}
for args in product(*[enumerate(child_contexts) for child_contexts in self.child_context_sets]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations. For symmetric relations, if mentions are of the same type, maintain
# their order in the sentence.
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ((b, a) in extracted or
(self.matchers[0] == self.matchers[1] and a.char_start > b.char_start)):
continue
# Keep track of extracted
extracted.add((a,b))
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
# Checking for existence
if not clear:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
class CandidateSpace(object):
"""
Defines the **space** of candidate objects
Calling _apply(x)_ given an object _x_ returns a generator over candidates in _x_.
"""
def __init__(self):
pass
def apply(self, x):
raise NotImplementedError()
class Ngrams(CandidateSpace):
"""
Defines the space of candidates as all n-grams (n <= n_max) in a Sentence _x_,
indexing by **character offset**.
"""
def __init__(self, n_max=5, split_tokens=('-', '/')):
CandidateSpace.__init__(self)
self.n_max = n_max
self.split_rgx = r'('+r'|'.join(split_tokens)+r')' if split_tokens and len(split_tokens) > 0 else None
def apply(self, context):
# These are the character offset--**relative to the sentence start**--for each _token_
offsets = context.char_offsets
# Loop over all n-grams in **reverse** order (to facilitate longest-match semantics)
L = len(offsets)
seen = set()
for l in range(1, self.n_max+1)[::-1]:
for i in range(L-l+1):
w = context.words[i+l-1]
start = offsets[i]
end = offsets[i+l-1] + len(w) - 1
ts = TemporarySpan(char_start=start, char_end=end, sentence=context)
if ts not in seen:
seen.add(ts)
yield ts
# Check for split
# NOTE: For simplicity, we only split single tokens right now!
if l == 1 and self.split_rgx is not None and end - start > 0:
m = re.search(self.split_rgx, context.text[start-offsets[0]:end-offsets[0]+1])
if m is not None and l < self.n_max + 1:
ts1 = TemporarySpan(char_start=start, char_end=start + m.start(1) - 1, sentence=context)
if ts1 not in seen:
seen.add(ts1)
yield ts
ts2 = TemporarySpan(char_start=start + m.end(1), char_end=end, sentence=context)
if ts2 not in seen:
seen.add(ts2)
yield ts2
class PretaggedCandidateExtractor(UDFRunner):
"""UDFRunner for PretaggedCandidateExtractorUDF"""
def __init__(self, candidate_class, entity_types, self_relations=False,
nested_relations=False, symmetric_relations=True, entity_sep='~@~'):
super(PretaggedCandidateExtractor, self).__init__(
PretaggedCandidateExtractorUDF, candidate_class=candidate_class,
entity_types=entity_types, self_relations=self_relations,
nested_relations=nested_relations, entity_sep=entity_sep,
symmetric_relations=symmetric_relations,
)
def apply(self, xs, split=0, **kwargs):
super(PretaggedCandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class PretaggedCandidateExtractorUDF(UDF):
"""
An extractor for Sentences with entities pre-tagged, and stored in the entity_types and entity_cids
fields.
"""
def __init__(self, candidate_class, entity_types, self_relations=False, nested_relations=False, symmetric_relations=False, entity_sep='~@~', **kwargs):
self.candidate_class = candidate_class
self.entity_types = entity_types
self.arity = len(entity_types)
self.self_relations = self_relations
self.nested_relations = nested_relations
self.symmetric_relations = symmetric_relations
self.entity_sep = entity_sep
super(PretaggedCandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, check_for_existing=True, **kwargs):
"""Extract Candidates from a Context"""
# For now, just handle Sentences
if not isinstance(context, Sentence):
raise NotImplementedError("%s is currently only implemented for Sentence contexts." % self.__name__)
# Do a first pass to collect all mentions by entity type / cid
entity_idxs = dict((et, defaultdict(list)) for et in set(self.entity_types))
L = len(context.words)
for i in range(L):
if context.entity_types[i] is not None:
ets = context.entity_types[i].split(self.entity_sep)
cids = context.entity_cids[i].split(self.entity_sep)
for et, cid in zip(ets, cids):
if et in entity_idxs:
entity_idxs[et][cid].append(i)
# Form entity Spans
entity_spans = defaultdict(list)
entity_cids = {}
for et, cid_idxs in iteritems(entity_idxs):
for cid, idxs in iteritems(entity_idxs[et]):
while len(idxs) > 0:
i = idxs.pop(0)
char_start = context.char_offsets[i]
char_end = char_start + len(context.words[i]) - 1
while len(idxs) > 0 and idxs[0] == i + 1:
i = idxs.pop(0)
char_end = context.char_offsets[i] + len(context.words[i]) - 1
# Insert / load temporary span, also store map to entity CID
tc = TemporarySpan(char_start=char_start, char_end=char_end, sentence=context)
tc.load_id_or_insert(self.session)
entity_cids[tc.id] = cid
entity_spans[et].append(tc)
# Generates and persists candidates
candidate_args = {'split' : split}
for args in product(*[enumerate(entity_spans[et]) for et in self.entity_types]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ai > bi:
continue
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
candidate_args[arg_name + '_cid'] = entity_cids[args[i][1].id]
# Checking for existence
if check_for_existing:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
| snorkel/candidates.py | 13,432 | An operator to extract Candidate objects from a Context.
:param candidate_class: The type of relation to extract, defined using
:func:`snorkel.models.candidate_subclass <snorkel.models.candidate.candidate_subclass>`
:param cspaces: one or list of :class:`CandidateSpace` objects, one for each relation argument. Defines space of
Contexts to consider
:param matchers: one or list of :class:`snorkel.matchers.Matcher` objects, one for each relation argument. Only tuples of
Contexts for which each element is accepted by the corresponding Matcher will be returned as Candidates
:param self_relations: Boolean indicating whether to extract Candidates that relate the same context.
Only applies to binary relations. Default is False.
:param nested_relations: Boolean indicating whether to extract Candidates that relate one Context with another
that contains it. Only applies to binary relations. Default is False.
:param symmetric_relations: Boolean indicating whether to extract symmetric Candidates, i.e., rel(A,B) and rel(B,A),
where A and B are Contexts. Only applies to binary relations. Default is False.
Defines the **space** of candidate objects
Calling _apply(x)_ given an object _x_ returns a generator over candidates in _x_.
Defines the space of candidates as all n-grams (n <= n_max) in a Sentence _x_,
indexing by **character offset**.
UDFRunner for PretaggedCandidateExtractorUDF
An extractor for Sentences with entities pre-tagged, and stored in the entity_types and entity_cids
fields.
Extract Candidates from a Context
Note: isinstance is the way to check types -- not type(x) in [...]! Check that arity is same Make sure the candidate spaces are different so generators aren't expended! Preallocates internal data structures Generate TemporaryContexts that are children of the context using the candidate_space and filtered by the Matcher Generates and persists candidates TODO: Make this work for higher-order relations Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate "symmetric" relations. For symmetric relations, if mentions are of the same type, maintain their order in the sentence. Keep track of extracted Assemble candidate arguments Checking for existence Add Candidate to session These are the character offset--**relative to the sentence start**--for each _token_ Loop over all n-grams in **reverse** order (to facilitate longest-match semantics) Check for split NOTE: For simplicity, we only split single tokens right now! For now, just handle Sentences Do a first pass to collect all mentions by entity type / cid Form entity Spans Insert / load temporary span, also store map to entity CID Generates and persists candidates TODO: Make this work for higher-order relations Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate "symmetric" relations Assemble candidate arguments Checking for existence Add Candidate to session | 3,074 | en | 0.821501 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 10:38:14 2021
@author: kunal001
"""
import logging
logger = logging.getLogger(__name__)
class CreateDatabase:
def __init__(self,hier_graph,const_parse):
self.hier_graph_dict = {}
self.const_parse = const_parse
self.G = hier_graph
def read_inputs(self,name:str):
"""
read circuit graphs
"""
top_ports = []
ports_weight = {}
for node, attr in self.G.nodes(data=True):
if 'source' in attr['inst_type']:
for source_nets in self.G.neighbors(node):
top_ports.append(source_nets)
elif 'net_type' in attr:
if attr['net_type'] == "external":
top_ports.append(node)
ports_weight[node]=[]
for nbr in list(self.G.neighbors(node)):
ports_weight[node].append(self.G.get_edge_data(node, nbr)['weight'])
logger.debug("Merging nested graph hierarchies to dictionary: ")
const = self.const_parse.read_user_const(name)
self.hier_graph_dict[name] = {
"graph": self.G,
"ports": top_ports,
"ports_weight": ports_weight,
"const": const
}
self._traverse_hier_in_graph(self.G)
logger.debug(f"read graph {self.hier_graph_dict}")
return self.hier_graph_dict
def _traverse_hier_in_graph(self,G):
"""
Recusively reads all hierachies in the graph and convert them to dictionary
"""
for node, attr in G.nodes(data=True):
if "sub_graph" in attr and attr["sub_graph"]:
logger.debug(f'Traversing sub graph: {node} {attr["inst_type"]} {attr["ports"]}')
sub_ports = []
ports_weight = {}
for sub_node, sub_attr in attr["sub_graph"].nodes(data=True):
if 'net_type' in sub_attr:
if sub_attr['net_type'] == "external":
sub_ports.append(sub_node)
ports_weight[sub_node] = []
for nbr in list(attr["sub_graph"].neighbors(sub_node)):
ports_weight[sub_node].append(attr["sub_graph"].get_edge_data(sub_node, nbr)['weight'])
logger.debug(f'external ports: {sub_ports}, {attr["connection"]}, {ports_weight}')
const = self.const_parse.read_user_const(attr["inst_type"])
self.hier_graph_dict[attr["inst_type"]] = {
"graph": attr["sub_graph"],
"ports": sub_ports,
"const": const,
"ports_weight": ports_weight
}
self._traverse_hier_in_graph(attr["sub_graph"])
| align/compiler/create_database.py | 2,918 | Recusively reads all hierachies in the graph and convert them to dictionary
read circuit graphs
Created on Fri Jan 15 10:38:14 2021
@author: kunal001
!/usr/bin/env python3 -*- coding: utf-8 -*- | 195 | en | 0.749925 |
def main():
# Open a file for writing and create it if it doesn't exist
# myfile = open("textfile.txt", "w+")
# # Open the file for appending text to the end
# myfile = open("textfile.txt", "a+")
# # write some lines of data to the file
# for i in range(10):
# myfile.write("This is some new text\n")
# # close the file when done
# myfile.close()
# Open the file back up and read the contents
myfile = open("textfile.txt", "r")
if myfile.mode == 'r':
# contents = myfile.read()
# print(contents)
filelines = myfile.readlines()
for fileline in filelines:
print(fileline)
if __name__ == "__main__":
main()
| Chapter03/file_start.py | 772 | Open a file for writing and create it if it doesn't exist myfile = open("textfile.txt", "w+") Open the file for appending text to the end myfile = open("textfile.txt", "a+") write some lines of data to the file for i in range(10): myfile.write("This is some new text\n") close the file when done myfile.close() Open the file back up and read the contents contents = myfile.read() print(contents) | 402 | en | 0.800717 |
from robotMap import XboxMap
from components.Actuators.LowLevel.shooterMotors import ShooterMotors
from components.Actuators.LowLevel.intakeMotor import IntakeMotor
from components.Actuators.HighLevel.hopperMotor import HopperMotor
from utils.DirectionEnums import Direction
from enum import Enum, auto
from magicbot import tunable
import logging as log
class Type(Enum):
"""Enumeration for the two types within the feeder."""
kIntake = auto()
kHopper = auto()
class FeederMap:
"""Simple map that holds the logic for running elements of the feeder."""
compatString = ["doof", "teapot"]
shooterMotors: ShooterMotors
intakeMotor: IntakeMotor
hopperMotor: HopperMotor
xboxMap: XboxMap
loaderMotorSpeed = tunable(.2)
intakeMotorSpeed = tunable(.5)
def on_enable(self):
pass
# log.setLevel(logging.DEBUG)
def run(self, loaderFunc):
"""Called when execution of a feeder element is desired."""
if loaderFunc == Type.kIntake:
if self.xboxMap.getDriveLeftTrig() > 0 and self.xboxMap.getDriveRightTrig() == 0:
self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kForwards)
log.debug("right trig intake", self.xboxMap.getMechRightTrig())
elif self.xboxMap.getDriveRightTrig() > 0 and self.xboxMap.getDriveLeftTrig() == 0:
self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kBackwards)
log.debug("left trig intake", self.xboxMap.getMechLeftTrig())
else:
self.intakeMotor.runIntake(0, Direction.kForwards)
if loaderFunc == Type.kHopper:
if self.xboxMap.getDriveLeftTrig() > 0 and self.xboxMap.getDriveRightTrig() == 0:
self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kForwards)
self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kForwards)
log.debug("right trig manual", self.xboxMap.getMechRightTrig())
elif self.xboxMap.getDriveRightTrig() > 0 and self.xboxMap.getDriveLeftTrig() == 0:
self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kBackwards)
self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kBackwards)
log.debug("left trig manual", self.xboxMap.getMechLeftTrig())
else:
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
def execute(self):
pass
| components/Actuators/HighLevel/feederMap.py | 2,584 | Simple map that holds the logic for running elements of the feeder.
Enumeration for the two types within the feeder.
Called when execution of a feeder element is desired.
log.setLevel(logging.DEBUG) | 200 | en | 0.875602 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
| example/case_example.py | 11,982 | Implements a generic node in the graph.
Wrapper for checking if triple is contained in the graph.
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
Wrapper for iterating over all triples in the graph
Santizes the triple to contains pure rdflib terms.
Adds a property and its value to the node.
Creates and returns a Context.
This class may not have PropertyBundles.
Creates and returns a CoreObject.
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())
Generator over the triple store in graph.
NOTICE This software was produced for the U.S. Government under contract SB-1341-14-CQ-0010, and is subject to the Rights in Data-General Clause 52.227-14, Alt. IV (DEC 2007) (c) 2018 The MITRE Corporation. All Rights Reserved.==================================================== CASE API!/usr/bin/env python====================================================-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT Manually specify properties to help inforce both properties are supplied. We are going to default to json-ld instead of rdflib's default of xml. def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs): """ Serializes the document's graph to append to a destination file. """ if format == 'json-ld': if 'context' not in kwargs: kwargs['context'] = self._json_ld_context() if 'auto_compact' not in kwargs: kwargs['auto_compact'] = True graph = self.graph.serialize(format=format, **kwargs) with open(destination, "a") as fin: fin.write(graph) fin.close()====================================================-- CREATE A CASE OBJECT====================================================-- CASE OBJECT CLASSES Namespace to use when adding properties that are not of type rdflib.URIRef. Add namespace prefix to non URIRef to allow abstraction from rdflib. type: (object, object) -> object Ignore setting properties with a None value. Lists and other iterables as values are the equivelent of having multiple properties. NOTE: Lists obviously lose their order. TODO: Add support for ordered lists. Convert basic python datatypes to literals. Automatically convert non-node properties to URIRef using default prefix. Property bundles should be blank nodes because we should be referencing them through CoreObjects. | 5,537 | en | 0.754369 |
#
# Copyright 2018 PyWren Team
# (C) Copyright IBM Corp. 2020
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import pickle
import logging
from lithops import utils
from lithops.job.partitioner import create_partitions
from lithops.utils import is_object_processing_function, sizeof_fmt
from lithops.storage.utils import create_func_key, create_agg_data_key
from lithops.job.serialize import SerializeIndependent, create_module_data
from lithops.constants import MAX_AGG_DATA_SIZE, JOBS_PREFIX, LOCALHOST,\
SERVERLESS, STANDALONE, LITHOPS_TEMP_DIR
from types import SimpleNamespace
import os
import hashlib
import inspect
from lithops.utils import b64str_to_bytes
logger = logging.getLogger(__name__)
def create_map_job(config, internal_storage, executor_id, job_id, map_function,
iterdata, runtime_meta, runtime_memory, extra_env,
include_modules, exclude_modules, execution_timeout,
extra_args=None, obj_chunk_size=None, obj_chunk_number=None,
invoke_pool_threads=128):
"""
Wrapper to create a map job. It integrates COS logic to process objects.
"""
host_job_meta = {'host_job_create_tstamp': time.time()}
map_iterdata = utils.verify_args(map_function, iterdata, extra_args)
if config['lithops'].get('rabbitmq_monitor', False):
rabbit_amqp_url = config['rabbitmq'].get('amqp_url')
utils.create_rabbitmq_resources(rabbit_amqp_url, executor_id, job_id)
# Object processing functionality
parts_per_object = None
if is_object_processing_function(map_function):
create_partitions_start = time.time()
# Create partitions according chunk_size or chunk_number
logger.debug('ExecutorID {} | JobID {} - Calling map on partitions '
'from object storage flow'.format(executor_id, job_id))
map_iterdata, parts_per_object = create_partitions(config, internal_storage,
map_iterdata, obj_chunk_size,
obj_chunk_number)
host_job_meta['host_job_create_partitions_time'] = round(time.time()-create_partitions_start, 6)
# ########
job = _create_job(config=config,
internal_storage=internal_storage,
executor_id=executor_id,
job_id=job_id,
func=map_function,
iterdata=map_iterdata,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=extra_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=execution_timeout,
host_job_meta=host_job_meta,
invoke_pool_threads=invoke_pool_threads)
if parts_per_object:
job.parts_per_object = parts_per_object
return job
def create_reduce_job(config, internal_storage, executor_id, reduce_job_id,
reduce_function, map_job, map_futures, runtime_meta,
runtime_memory, reducer_one_per_object, extra_env,
include_modules, exclude_modules, execution_timeout=None):
"""
Wrapper to create a reduce job. Apply a function across all map futures.
"""
host_job_meta = {'host_job_create_tstamp': time.time()}
iterdata = [(map_futures, )]
if hasattr(map_job, 'parts_per_object') and reducer_one_per_object:
prev_total_partitons = 0
iterdata = []
for total_partitions in map_job.parts_per_object:
iterdata.append((map_futures[prev_total_partitons:prev_total_partitons+total_partitions],))
prev_total_partitons += total_partitions
reduce_job_env = {'__LITHOPS_REDUCE_JOB': True}
if extra_env is None:
ext_env = reduce_job_env
else:
ext_env = extra_env.copy()
ext_env.update(reduce_job_env)
iterdata = utils.verify_args(reduce_function, iterdata, None)
return _create_job(config=config,
internal_storage=internal_storage,
executor_id=executor_id,
job_id=reduce_job_id,
func=reduce_function,
iterdata=iterdata,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=ext_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=execution_timeout,
host_job_meta=host_job_meta)
'''
stores function and modules in temporary directory to be used later in optimized runtime
'''
def _store_func_and_modules(func_key, func_str, module_data):
# save function
func_path = '/'.join([LITHOPS_TEMP_DIR, func_key])
os.makedirs(os.path.dirname(func_path), exist_ok=True)
with open(func_path, "wb") as f:
f.write(func_str)
if module_data:
logger.debug("Writing Function dependencies to local disk")
modules_path = '/'.join([os.path.dirname(func_path), 'modules'])
for m_filename, m_data in module_data.items():
m_path = os.path.dirname(m_filename)
if len(m_path) > 0 and m_path[0] == "/":
m_path = m_path[1:]
to_make = os.path.join(modules_path, m_path)
try:
os.makedirs(to_make)
except OSError as e:
if e.errno == 17:
pass
else:
raise e
full_filename = os.path.join(to_make, os.path.basename(m_filename))
with open(full_filename, 'wb') as fid:
fid.write(b64str_to_bytes(m_data))
logger.debug("Finished storing function and modules")
def _create_job(config, internal_storage, executor_id, job_id, func,
iterdata, runtime_meta, runtime_memory, extra_env,
include_modules, exclude_modules, execution_timeout,
host_job_meta, invoke_pool_threads=128):
"""
:param func: the function to map over the data
:param iterdata: An iterable of input data
:param extra_env: Additional environment variables for CF environment. Default None.
:param extra_meta: Additional metadata to pass to CF. Default None.
:param remote_invocation: Enable remote invocation. Default False.
:param invoke_pool_threads: Number of threads to use to invoke.
:param data_all_as_one: upload the data as a single object. Default True
:param overwrite_invoke_args: Overwrite other args. Mainly used for testing.
:param exclude_modules: Explicitly keep these modules from pickled dependencies.
:return: A list with size `len(iterdata)` of futures for each job
:rtype: list of futures.
"""
ext_env = {} if extra_env is None else extra_env.copy()
if ext_env:
ext_env = utils.convert_bools_to_string(ext_env)
logger.debug("Extra environment vars {}".format(ext_env))
job = SimpleNamespace()
job.executor_id = executor_id
job.job_id = job_id
job.extra_env = ext_env
job.execution_timeout = execution_timeout or config['lithops']['execution_timeout']
job.function_name = func.__name__
job.total_calls = len(iterdata)
mode = config['lithops']['mode']
if mode == SERVERLESS:
job.invoke_pool_threads = invoke_pool_threads
job.runtime_memory = runtime_memory or config['serverless']['runtime_memory']
job.runtime_timeout = config['serverless']['runtime_timeout']
if job.execution_timeout >= job.runtime_timeout:
job.execution_timeout = job.runtime_timeout - 5
elif mode == STANDALONE:
job.runtime_memory = None
runtime_timeout = config['standalone']['hard_dismantle_timeout']
if job.execution_timeout >= runtime_timeout:
job.execution_timeout = runtime_timeout - 10
elif mode == LOCALHOST:
job.runtime_memory = None
job.runtime_timeout = execution_timeout
exclude_modules_cfg = config['lithops'].get('exclude_modules', [])
include_modules_cfg = config['lithops'].get('include_modules', [])
exc_modules = set()
inc_modules = set()
if exclude_modules_cfg:
exc_modules.update(exclude_modules_cfg)
if exclude_modules:
exc_modules.update(exclude_modules)
if include_modules_cfg is not None:
inc_modules.update(include_modules_cfg)
if include_modules_cfg is None and not include_modules:
inc_modules = None
if include_modules is not None and include_modules:
inc_modules.update(include_modules)
if include_modules is None:
inc_modules = None
logger.debug('ExecutorID {} | JobID {} - Serializing function and data'.format(executor_id, job_id))
job_serialize_start = time.time()
serializer = SerializeIndependent(runtime_meta['preinstalls'])
func_and_data_ser, mod_paths = serializer([func] + iterdata, inc_modules, exc_modules)
data_strs = func_and_data_ser[1:]
data_size_bytes = sum(len(x) for x in data_strs)
module_data = create_module_data(mod_paths)
func_str = func_and_data_ser[0]
func_module_str = pickle.dumps({'func': func_str, 'module_data': module_data}, -1)
func_module_size_bytes = len(func_module_str)
total_size = utils.sizeof_fmt(data_size_bytes+func_module_size_bytes)
host_job_meta['host_job_serialize_time'] = round(time.time()-job_serialize_start, 6)
host_job_meta['data_size_bytes'] = data_size_bytes
host_job_meta['func_module_size_bytes'] = func_module_size_bytes
if 'data_limit' in config['lithops']:
data_limit = config['lithops']['data_limit']
else:
data_limit = MAX_AGG_DATA_SIZE
if data_limit and data_size_bytes > data_limit*1024**2:
log_msg = ('ExecutorID {} | JobID {} - Total data exceeded maximum size '
'of {}'.format(executor_id, job_id, sizeof_fmt(data_limit*1024**2)))
raise Exception(log_msg)
logger.info('ExecutorID {} | JobID {} - Uploading function and data '
'- Total: {}'.format(executor_id, job_id, total_size))
# Upload data
data_key = create_agg_data_key(JOBS_PREFIX, executor_id, job_id)
job.data_key = data_key
data_bytes, data_ranges = utils.agg_data(data_strs)
job.data_ranges = data_ranges
data_upload_start = time.time()
internal_storage.put_data(data_key, data_bytes)
data_upload_end = time.time()
host_job_meta['host_data_upload_time'] = round(data_upload_end-data_upload_start, 6)
func_upload_start = time.time()
# Upload function and modules
if config[mode].get('customized_runtime'):
# Prepare function and modules locally to store in the runtime image later
function_file = func.__code__.co_filename
function_hash = hashlib.md5(open(function_file,'rb').read()).hexdigest()[:16]
mod_hash = hashlib.md5(repr(sorted(mod_paths)).encode('utf-8')).hexdigest()[:16]
uuid = f'{function_hash}{mod_hash}'
func_key = create_func_key(JOBS_PREFIX, uuid, "")
_store_func_and_modules(func_key, func_str, module_data)
job.ext_runtime_uuid = uuid
else:
func_key = create_func_key(JOBS_PREFIX, executor_id, job_id)
internal_storage.put_func(func_key, func_module_str)
job.func_key = func_key
func_upload_end = time.time()
host_job_meta['host_func_upload_time'] = round(func_upload_end - func_upload_start, 6)
host_job_meta['host_job_created_time'] = round(time.time() - host_job_meta['host_job_create_tstamp'], 6)
job.metadata = host_job_meta
return job
| lithops/job/job.py | 12,417 | :param func: the function to map over the data
:param iterdata: An iterable of input data
:param extra_env: Additional environment variables for CF environment. Default None.
:param extra_meta: Additional metadata to pass to CF. Default None.
:param remote_invocation: Enable remote invocation. Default False.
:param invoke_pool_threads: Number of threads to use to invoke.
:param data_all_as_one: upload the data as a single object. Default True
:param overwrite_invoke_args: Overwrite other args. Mainly used for testing.
:param exclude_modules: Explicitly keep these modules from pickled dependencies.
:return: A list with size `len(iterdata)` of futures for each job
:rtype: list of futures.
Wrapper to create a map job. It integrates COS logic to process objects.
Wrapper to create a reduce job. Apply a function across all map futures.
Copyright 2018 PyWren Team (C) Copyright IBM Corp. 2020 (C) Copyright Cloudlab URV 2020 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Object processing functionality Create partitions according chunk_size or chunk_number save function Upload data Upload function and modules Prepare function and modules locally to store in the runtime image later | 1,670 | en | 0.742161 |
import sys
import time
import os
import os.path as osp
import requests
import shutil
import tqdm
import pickle
import numpy as np
import torch
from cogdl.data import Data, Dataset, download_url
from . import register_dataset
def untar(path, fname, deleteTar=True):
"""
Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
class GTNDataset(Dataset):
r"""The network datasets "ACM", "DBLP" and "IMDB" from the
`"Graph Transformer Networks"
<https://arxiv.org/abs/1911.06455>`_ paper.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"gtn-acm"`,
:obj:`"gtn-dblp"`, :obj:`"gtn-imdb"`).
"""
def __init__(self, root, name):
self.name = name
self.url = f'https://github.com/cenyk1230/gtn-data/blob/master/{name}.zip?raw=true'
super(GTNDataset, self).__init__(root)
self.data = torch.load(self.processed_paths[0])
self.num_classes = torch.max(self.data.train_target).item() + 1
self.num_edge = len(self.data.adj)
self.num_nodes = self.data.x.shape[0]
@property
def raw_file_names(self):
names = ["edges.pkl", "labels.pkl", "node_features.pkl"]
return names
@property
def processed_file_names(self):
return ["data.pt"]
def read_gtn_data(self, folder):
edges = pickle.load(open(osp.join(folder, 'edges.pkl'), 'rb'))
labels = pickle.load(open(osp.join(folder, 'labels.pkl'), 'rb'))
node_features = pickle.load(open(osp.join(folder, 'node_features.pkl'), 'rb'))
data = Data()
data.x = torch.from_numpy(node_features).type(torch.FloatTensor)
num_nodes = edges[0].shape[0]
node_type = np.zeros((num_nodes), dtype=int)
assert len(edges)==4
assert len(edges[0].nonzero())==2
node_type[edges[0].nonzero()[0]] = 0
node_type[edges[0].nonzero()[1]] = 1
node_type[edges[1].nonzero()[0]] = 1
node_type[edges[1].nonzero()[1]] = 0
node_type[edges[2].nonzero()[0]] = 0
node_type[edges[2].nonzero()[1]] = 2
node_type[edges[3].nonzero()[0]] = 2
node_type[edges[3].nonzero()[1]] = 0
print(node_type)
data.pos = torch.from_numpy(node_type)
edge_list = []
for i, edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
edge_list.append(edge_tmp)
data.edge_index = torch.cat(edge_list, 1)
A = []
for i,edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
value_tmp = torch.ones(edge_tmp.shape[1]).type(torch.FloatTensor)
A.append((edge_tmp,value_tmp))
edge_tmp = torch.stack((torch.arange(0,num_nodes),torch.arange(0,num_nodes))).type(torch.LongTensor)
value_tmp = torch.ones(num_nodes).type(torch.FloatTensor)
A.append((edge_tmp,value_tmp))
data.adj = A
data.train_node = torch.from_numpy(np.array(labels[0])[:,0]).type(torch.LongTensor)
data.train_target = torch.from_numpy(np.array(labels[0])[:,1]).type(torch.LongTensor)
data.valid_node = torch.from_numpy(np.array(labels[1])[:,0]).type(torch.LongTensor)
data.valid_target = torch.from_numpy(np.array(labels[1])[:,1]).type(torch.LongTensor)
data.test_node = torch.from_numpy(np.array(labels[2])[:,0]).type(torch.LongTensor)
data.test_target = torch.from_numpy(np.array(labels[2])[:,1]).type(torch.LongTensor)
y = np.zeros((num_nodes), dtype=int)
x_index = torch.cat((data.train_node, data.valid_node, data.test_node))
y_index = torch.cat((data.train_target, data.valid_target, data.test_target))
y[x_index.numpy()] = y_index.numpy()
data.y = torch.from_numpy(y)
self.data = data
def get(self, idx):
assert idx == 0
return self.data
def apply_to_device(self, device):
self.data.x = self.data.x.to(device)
self.data.train_node = self.data.train_node.to(device)
self.data.valid_node = self.data.valid_node.to(device)
self.data.test_node = self.data.test_node.to(device)
self.data.train_target = self.data.train_target.to(device)
self.data.valid_target = self.data.valid_target.to(device)
self.data.test_target = self.data.test_target.to(device)
new_adj = []
for (t1, t2) in self.data.adj:
new_adj.append((t1.to(device), t2.to(device)))
self.data.adj = new_adj
def download(self):
download_url(self.url, self.raw_dir, name=self.name + '.zip')
untar(self.raw_dir, self.name + '.zip')
def process(self):
self.read_gtn_data(self.raw_dir)
torch.save(self.data, self.processed_paths[0])
def __repr__(self):
return "{}()".format(self.name)
@register_dataset("gtn-acm")
class ACM_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-acm"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(ACM_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-dblp")
class DBLP_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-dblp"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(DBLP_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-imdb")
class IMDB_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-imdb"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(IMDB_GTNDataset, self).__init__(path, dataset)
| cogdl/datasets/gtn_data.py | 6,101 | The network datasets "ACM", "DBLP" and "IMDB" from the
`"Graph Transformer Networks"
<https://arxiv.org/abs/1911.06455>`_ paper.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"gtn-acm"`,
:obj:`"gtn-dblp"`, :obj:`"gtn-imdb"`).
Unpacks the given archive file to the same directory, then (by default)
deletes the archive file. | 411 | en | 0.715103 |
#!/usr/bin/env python3
#
# Convert full firmware binary to rwd patch.
# Supported models:
# CR-V 5g (part num: 39990-TLA), tested
# Civic 2016 sedan (part num: 39990-TBA), tested
# Civic 2016 hatchback Australia (part num: 39990-TEA), tested
# Civic 2016 hatchback (part num: 39990-TGG), tested
#
import os
import sys
import argparse
import subprocess
import struct
# Decryption lookup table built from Civic 2016 sedan bin/rwd, also apply to CR-V 5g.
default_decrypt_lookup_table = {144: 72, 218: 55, 255: 255, 164: 1, 195: 26, 99: 2, 28: 178, 205: 158, 125: 138, 45: 118, 222: 98, 142: 78, 62: 58, 243: 38, 163: 18, 83: 254, 3: 234, 172: 214, 92: 194, 12: 174, 189: 154, 109: 134, 29: 114, 206: 94, 126: 74, 46: 54, 227: 34, 147: 14, 113: 0, 67: 250, 236: 230, 156: 210, 76: 190, 252: 170, 173: 150, 93: 130, 13: 110, 148: 253, 120: 159, 199: 148, 198: 137, 77: 126, 23: 104, 73: 83, 203: 73, 78: 62, 123: 53, 254: 42, 43: 33, 90: 23, 161: 12, 10: 3, 132: 249, 191: 239, 226: 220, 197: 201, 248: 191, 117: 181, 34: 172, 37: 161, 88: 151, 141: 142, 8: 131, 134: 121, 185: 111, 54: 101, 190: 90, 57: 79, 128: 68, 139: 57, 14: 46, 138: 35, 131: 10, 100: 241, 1: 228, 146: 200, 133: 185, 168: 171, 104: 155, 40: 139, 251: 85, 94: 66, 91: 45, 103: 124, 55: 112, 231: 156, 80: 56, 224: 92, 102: 113, 96: 60, 98: 188, 97: 252, 140: 206, 122: 31, 232: 187, 16: 40, 202: 51, 26: 7, 239: 251, 5: 153, 219: 77, 119: 128, 21: 157, 238: 102, 180: 5, 217: 119, 30: 50, 7: 100, 32: 44, 183: 144, 50: 176, 110: 70, 157: 146, 2: 164, 44: 182, 145: 8, 58: 15, 27: 29, 64: 52, 9: 67, 31: 199, 179: 22, 42: 11, 193: 20, 211: 30, 129: 4, 241: 32, 74: 19, 178: 208, 247: 160, 112: 64, 242: 224, 114: 192, 165: 193, 0: 36, 59: 37, 196: 9, 154: 39, 75: 41, 72: 147, 249: 127, 162: 204, 130: 196, 229: 209, 182: 133, 48: 48, 86: 109, 240: 96, 137: 99, 151: 136, 209: 24, 108: 198, 181: 197, 212: 13, 244: 21, 11: 25, 118: 117, 228: 17, 214: 141, 52: 229, 160: 76, 115: 6, 106: 27, 56: 143, 25: 71, 36: 225, 194: 212, 208: 88, 187: 69, 171: 65, 153: 103, 38: 97, 207: 243, 82: 184, 184: 175, 188: 218, 213: 205, 121: 95, 15: 195, 81: 248, 24: 135, 70: 105, 150: 125, 174: 86, 158: 82, 220: 226, 201: 115, 71: 116, 51: 246, 177: 16, 176: 80, 22: 93, 39: 108, 159: 231, 223: 247, 186: 47, 169: 107, 245: 213, 235: 81, 192: 84, 124: 202, 175: 235, 84: 237, 79: 211, 234: 59, 143: 227, 237: 166, 33: 236, 253: 106, 65: 244, 111: 219, 200: 179, 101: 177, 17: 232, 20: 221, 166: 129, 60: 186, 61: 122, 167: 140, 204: 222, 87: 120, 41: 75, 135: 132, 136: 163, 49: 240, 250: 63, 107: 49, 170: 43, 18: 168, 221: 162, 35: 242, 225: 28, 149: 189, 85: 173, 152: 167, 95: 215, 53: 165, 89: 87, 66: 180, 6: 89, 47: 203, 210: 216, 215: 152, 233: 123, 116: 245, 127: 223, 19: 238, 69: 169, 105: 91, 4: 217, 216: 183, 68: 233, 63: 207, 155: 61, 246: 149, 230: 145}
# sum of x, x is unsigned shorts
def checksum_by_sum(fw, start, end):
s = 0
for i in range(start, end - start, 2):
s += struct.unpack('!H', fw[i:i + 2])[0]
return s
# sum of -x, x is unsigned shorts
def checksum_by_negative_sum(fw, start, end):
s = 0
for i in range(start, end - start, 2):
s += -struct.unpack('!H', fw[i:i + 2])[0]
return s
checksum_funcs = [checksum_by_sum, checksum_by_negative_sum]
car_models = {
'39990-TLA-A030': { #CR-V thanks to joe1
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TLA-A030', '39990-TLA-A040', '39990-TLA,A030', '39990-TLA,A040'],
'security-key': ['0x011101121120', '0x011101121120', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x6c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x6bf80), (1, 0x6bffe)] #original bin checksums are 0x419b at offset 0x6FF80 and 0x24ef at 0x6FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
'39990-TBA-A030': { #civic sedan thanks to mystery leaker
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TBA-A000', '39990-TBA-A010', '39990-TBA-A020', '39990-TBA-A030'],
'security-key': ['0x011100121020', '0x011100121020', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)] #original bin checksums are 0xDD23 at offset 0x4FF80 and 0xEDDF at 0x4FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
'39990-TEA-T330': { #civic hatch au thanks to ming
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TEA-T330'],
'security-key': ['0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TEA-H010': { # bccw test
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TEA-H010', '39990-TEA-H020', '39990-TEA,H020'],
'security-key': ['0x0111011211', '0x0111011211', '0x0111011211'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TGG-A120': { #civic hatch thanks to R3DLOBST3R
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TGG-A120'],
'security-key': ['0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TRW-A020': { #clarity thanks to wirelessnet2
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TRW-A010', '39990-TRW-A020', '39990-TRW,A010', '39990-TRW,A020'],
'security-key': ['0x011101121120', '0x011101121120', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
#(checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TBX-3050': { #civic sedan thanks to mystery leaker
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TBX-H110', '39990-TBX-H120', '39990-TBX-3050'],
'security-key': ['0x0211021212', '0x0211021212', '0x0211021212'],
'encryption-key': '0xbf109e',
'start-address': 0x13000,
'data-size': 0xed000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)] #original bin checksums are 0xDD23 at offset 0x4FF80 and 0xEDDF at 0x4FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
}
def main():
# example: python3 bin_to_rwd.py --input_bin crv_5g_user_patched.bin --model 39990-TLA-A030
parser = argparse.ArgumentParser()
parser.add_argument("--input_bin", required=True, help="Full firmware binary file")
parser.add_argument("--model", default='39990-TLA-A030', help="EPS part number")
args = parser.parse_args()
if not args.model in car_models:
print('Car model %s not found' % args.model)
sys.exit(-1)
print('Creating rwd for model %s' % args.model)
m = car_models[args.model]
if not os.path.exists(args.input_bin):
print('%s not found' % args.input_bin)
sys.exit(-1)
encrypt_lookup_table = {}
for k, v in default_decrypt_lookup_table.items():
encrypt_lookup_table[v] = k
with open(args.input_bin, 'rb') as f:
full_fw = f.read()
patch_fw = full_fw[m['start-address']:(m['start-address'] + m['data-size'])]
for func_idx, off in m['checksum-offsets']:
old_checksum = struct.unpack('!H', patch_fw[off:off+2])[0] & 0xFFFF
new_checksum = checksum_funcs[func_idx](patch_fw, 0, off) & 0xFFFF
print('Update checksum at offset %s from %s to %s' % (hex(off), hex(old_checksum), hex(new_checksum)))
patch_fw = patch_fw[:off] + struct.pack('!H', new_checksum & 0xFFFF) + patch_fw[off+2:]
encrypted = bytearray()
for b in patch_fw:
encrypted.append(encrypt_lookup_table[b])
out_enc_path = args.input_bin + '.enc'
with open(out_enc_path, 'wb') as out_f:
out_f.write(encrypted)
print('Encryption done, saved to %s.' % out_enc_path)
cur_dir = os.path.dirname(os.path.abspath(__file__))
cmds = [
'python2',
'rwd-builder.py',
'--can-address', m['can-address'],
'--supported-versions', *m['supported-versions'],
'--security-key', *m['security-key'],
'--encryption-key', m['encryption-key'],
'--encrypted-file', out_enc_path,
'--start-address', hex(m['start-address']),
'--data-size', hex(m['data-size'])
]
subprocess.check_call(cmds, cwd=cur_dir)
print('RWD file %s created.' % (out_enc_path[:-4] + '.rwd'))
if __name__== "__main__":
main()
| tools/bin_to_rwd.py | 8,871 | !/usr/bin/env python3 Convert full firmware binary to rwd patch. Supported models: CR-V 5g (part num: 39990-TLA), tested Civic 2016 sedan (part num: 39990-TBA), tested Civic 2016 hatchback Australia (part num: 39990-TEA), tested Civic 2016 hatchback (part num: 39990-TGG), tested Decryption lookup table built from Civic 2016 sedan bin/rwd, also apply to CR-V 5g. sum of x, x is unsigned shorts sum of -x, x is unsigned shortsCR-V thanks to joe1 (checksum func idx, offset)original bin checksums are 0x419b at offset 0x6FF80 and 0x24ef at 0x6FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordinglycivic sedan thanks to mystery leaker (checksum func idx, offset)original bin checksums are 0xDD23 at offset 0x4FF80 and 0xEDDF at 0x4FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordinglycivic hatch au thanks to ming (checksum func idx, offset) bccw test (checksum func idx, offset)civic hatch thanks to R3DLOBST3R (checksum func idx, offset)clarity thanks to wirelessnet2(checksum func idx, offset)civic sedan thanks to mystery leaker (checksum func idx, offset)original bin checksums are 0xDD23 at offset 0x4FF80 and 0xEDDF at 0x4FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly example: python3 bin_to_rwd.py --input_bin crv_5g_user_patched.bin --model 39990-TLA-A030 | 1,409 | en | 0.682646 |
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from ManagementStudents.jinja2 import Environment
# This enables us to use Django template tags like {% url ‘index’ %} or {% static ‘path/to/static/file.js’ %} in our Jinja2 templates.
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse,
})
return env
| 14_Tran_An_Thien/ManagementStudents/ManagementStudents/customsettings.py | 469 | This enables us to use Django template tags like {% url ‘index’ %} or {% static ‘path/to/static/file.js’ %} in our Jinja2 templates. | 132 | en | 0.144252 |
"""
This module used for serializing data
CategorySchema - data from Category model
VacancySchema - data from Vacancy model
"""
# pylint: disable=too-many-ancestors
# pylint: disable=missing-class-docstring
# pylint: disable=too-few-public-methods
from app import ma
from app.models.model import Category, Vacancy
class CategorySchema(ma.SQLAlchemyAutoSchema):
"""
Used for serialize Category data
"""
class Meta:
model = Category
fields = ("name", )
class VacancySchema(ma.SQLAlchemyAutoSchema):
"""
Used for serialize Vacancy data
"""
class Meta:
model = Vacancy
fields = ("name", "salary", "info", "contacts")
ordered = True
categories_schema = CategorySchema(many=True)
vacancies_schema = VacancySchema(many=True)
| app/rest/serializers.py | 798 | Used for serialize Category data
Used for serialize Vacancy data
This module used for serializing data
CategorySchema - data from Category model
VacancySchema - data from Vacancy model
pylint: disable=too-many-ancestors pylint: disable=missing-class-docstring pylint: disable=too-few-public-methods | 301 | en | 0.562361 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class RouteType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The routing methodology to where the ICE server will be located from the client. "any" will
have higher reliability while "nearest" will have lower latency. It is recommended to default
to use the "any" routing method unless there are specific scenarios which minimizing latency is
critical.
"""
ANY = "any"
NEAREST = "nearest"
| sdk/communication/azure-communication-networktraversal/azure/communication/networktraversal/_generated/models/_communication_network_traversal_client_enums.py | 999 | The routing methodology to where the ICE server will be located from the client. "any" will
have higher reliability while "nearest" will have lower latency. It is recommended to default
to use the "any" routing method unless there are specific scenarios which minimizing latency is
critical.
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 746 | en | 0.805632 |
# -*- coding: utf-8 -*-
"""Class that defines the abstract interface for an object repository.
The scope of this class is intentionally very narrow. Any backend implementation should merely provide the methods to
store binary blobs, or "objects", and return a string-based key that unique identifies the object that was just created.
This key should then be able to be used to retrieve the bytes of the corresponding object or to delete it.
"""
import abc
import contextlib
import hashlib
import io
import pathlib
from typing import BinaryIO, Iterable, Iterator, List, Optional, Tuple, Union
from aiida.common.hashing import chunked_file_hash
__all__ = ('AbstractRepositoryBackend',)
class AbstractRepositoryBackend(metaclass=abc.ABCMeta):
"""Class that defines the abstract interface for an object repository.
The repository backend only deals with raw bytes, both when creating new objects as well as when returning a stream
or the content of an existing object. The encoding and decoding of the byte content should be done by the client
upstream. The file repository backend is also not expected to keep any kind of file hierarchy but must be assumed
to be a simple flat data store. When files are created in the file object repository, the implementation will return
a string-based key with which the content of the stored object can be addressed. This key is guaranteed to be unique
and persistent. Persisting the key or mapping it onto a virtual file hierarchy is again up to the client upstream.
"""
@property
@abc.abstractmethod
def uuid(self) -> Optional[str]:
"""Return the unique identifier of the repository."""
@property
@abc.abstractmethod
def key_format(self) -> Optional[str]:
"""Return the format for the keys of the repository.
Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is
necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match
with the repository).
"""
@abc.abstractmethod
def initialise(self, **kwargs) -> None:
"""Initialise the repository if it hasn't already been initialised.
:param kwargs: parameters for the initialisation.
"""
@property
@abc.abstractmethod
def is_initialised(self) -> bool:
"""Return whether the repository has been initialised."""
@abc.abstractmethod
def erase(self) -> None:
"""Delete the repository itself and all its contents.
.. note:: This should not merely delete the contents of the repository but any resources it created. For
example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not
just its contents.
"""
@staticmethod
def is_readable_byte_stream(handle) -> bool:
return hasattr(handle, 'read') and hasattr(handle, 'mode') and 'b' in handle.mode
def put_object_from_filelike(self, handle: BinaryIO) -> str:
"""Store the byte contents of a file in the repository.
:param handle: filelike object with the byte content to be stored.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
if not isinstance(handle, io.BufferedIOBase) and not self.is_readable_byte_stream(handle):
raise TypeError(f'handle does not seem to be a byte stream: {type(handle)}.')
return self._put_object_from_filelike(handle)
@abc.abstractmethod
def _put_object_from_filelike(self, handle: BinaryIO) -> str:
pass
def put_object_from_file(self, filepath: Union[str, pathlib.Path]) -> str:
"""Store a new object with contents of the file located at `filepath` on this file system.
:param filepath: absolute path of file whose contents to copy to the repository.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
with open(filepath, mode='rb') as handle:
return self.put_object_from_filelike(handle)
@abc.abstractmethod
def has_objects(self, keys: List[str]) -> List[bool]:
"""Return whether the repository has an object with the given key.
:param keys:
list of fully qualified identifiers for objects within the repository.
:return:
list of logicals, in the same order as the keys provided, with value True if the respective
object exists and False otherwise.
"""
def has_object(self, key: str) -> bool:
"""Return whether the repository has an object with the given key.
:param key: fully qualified identifier for the object within the repository.
:return: True if the object exists, False otherwise.
"""
return self.has_objects([key])[0]
@abc.abstractmethod
def list_objects(self) -> Iterable[str]:
"""Return iterable that yields all available objects by key.
:return: An iterable for all the available object keys.
"""
@contextlib.contextmanager
def open(self, key: str) -> Iterator[BinaryIO]:
"""Open a file handle to an object stored under the given key.
.. note:: this should only be used to open a handle to read an existing file. To write a new file use the method
``put_object_from_filelike`` instead.
:param key: fully qualified identifier for the object within the repository.
:return: yield a byte stream object.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
if not self.has_object(key):
raise FileNotFoundError(f'object with key `{key}` does not exist.')
def get_object_content(self, key: str) -> bytes:
"""Return the content of a object identified by key.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return handle.read()
@abc.abstractmethod
def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[str, BinaryIO]]:
"""Return an iterator over the (read-only) byte streams of objects identified by key.
.. note:: handles should only be read within the context of this iterator.
:param keys: fully qualified identifiers for the objects within the repository.
:return: an iterator over the object byte streams.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if a file could not be opened.
"""
def get_object_hash(self, key: str) -> str:
"""Return the SHA-256 hash of an object stored under the given key.
.. important::
A SHA-256 hash should always be returned,
to ensure consistency across different repository implementations.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return chunked_file_hash(handle, hashlib.sha256)
@abc.abstractmethod
def delete_objects(self, keys: List[str]) -> None:
"""Delete the objects from the repository.
:param keys: list of fully qualified identifiers for the objects within the repository.
:raise FileNotFoundError: if any of the files does not exist.
:raise OSError: if any of the files could not be deleted.
"""
keys_exist = self.has_objects(keys)
if not all(keys_exist):
error_message = 'some of the keys provided do not correspond to any object in the repository:\n'
for indx, key_exists in enumerate(keys_exist):
if not key_exists:
error_message += f' > object with key `{keys[indx]}` does not exist.\n'
raise FileNotFoundError(error_message)
def delete_object(self, key: str) -> None:
"""Delete the object from the repository.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be deleted.
"""
return self.delete_objects([key])
| aiida/repository/backend/abstract.py | 8,766 | Class that defines the abstract interface for an object repository.
The repository backend only deals with raw bytes, both when creating new objects as well as when returning a stream
or the content of an existing object. The encoding and decoding of the byte content should be done by the client
upstream. The file repository backend is also not expected to keep any kind of file hierarchy but must be assumed
to be a simple flat data store. When files are created in the file object repository, the implementation will return
a string-based key with which the content of the stored object can be addressed. This key is guaranteed to be unique
and persistent. Persisting the key or mapping it onto a virtual file hierarchy is again up to the client upstream.
Delete the object from the repository.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be deleted.
Delete the objects from the repository.
:param keys: list of fully qualified identifiers for the objects within the repository.
:raise FileNotFoundError: if any of the files does not exist.
:raise OSError: if any of the files could not be deleted.
Delete the repository itself and all its contents.
.. note:: This should not merely delete the contents of the repository but any resources it created. For
example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not
just its contents.
Return the content of a object identified by key.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
Return the SHA-256 hash of an object stored under the given key.
.. important::
A SHA-256 hash should always be returned,
to ensure consistency across different repository implementations.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
Return whether the repository has an object with the given key.
:param key: fully qualified identifier for the object within the repository.
:return: True if the object exists, False otherwise.
Return whether the repository has an object with the given key.
:param keys:
list of fully qualified identifiers for objects within the repository.
:return:
list of logicals, in the same order as the keys provided, with value True if the respective
object exists and False otherwise.
Initialise the repository if it hasn't already been initialised.
:param kwargs: parameters for the initialisation.
Return whether the repository has been initialised.
Return an iterator over the (read-only) byte streams of objects identified by key.
.. note:: handles should only be read within the context of this iterator.
:param keys: fully qualified identifiers for the objects within the repository.
:return: an iterator over the object byte streams.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if a file could not be opened.
Return the format for the keys of the repository.
Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is
necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match
with the repository).
Return iterable that yields all available objects by key.
:return: An iterable for all the available object keys.
Open a file handle to an object stored under the given key.
.. note:: this should only be used to open a handle to read an existing file. To write a new file use the method
``put_object_from_filelike`` instead.
:param key: fully qualified identifier for the object within the repository.
:return: yield a byte stream object.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
Store a new object with contents of the file located at `filepath` on this file system.
:param filepath: absolute path of file whose contents to copy to the repository.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
Store the byte contents of a file in the repository.
:param handle: filelike object with the byte content to be stored.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
Return the unique identifier of the repository.
Class that defines the abstract interface for an object repository.
The scope of this class is intentionally very narrow. Any backend implementation should merely provide the methods to
store binary blobs, or "objects", and return a string-based key that unique identifies the object that was just created.
This key should then be able to be used to retrieve the bytes of the corresponding object or to delete it.
-*- coding: utf-8 -*- pylint: disable=not-context-manager pylint: disable=not-context-manager | 5,153 | en | 0.847417 |
"""personal_gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('gallery.urls'))
]
| personal_gallery/urls.py | 819 | personal_gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) | 642 | en | 0.681664 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .feature_client import FeatureClient
from .version import VERSION
__all__ = ['FeatureClient']
__version__ = VERSION
| azure-mgmt-resource/azure/mgmt/resource/features/__init__.py | 1,049 | coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft and contributors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 883 | en | 0.767341 |
import sqlite3
import os
MSG_HELP = """List of commands:
!help
List commands
!listAll
List all animals
!show <animal>
Give description
!getFlag
Give flag (Admin only)
!serverInfo
Give server info (Dragonite only)
!addAdmin <id>
Make user an admin (Dragonite only)
!hint
Give you a hint.
Source_code:
https://github.com/Bankde/Hack-me-bot"""
MSG_NO_DRAGONITE = "You're not Dragonite. Go away !!"
MSG_SEARCH_ERROR = "We cannot find this animal in our database"
MSG_NO_ADMIN = "You are not Admin. Go away !!"
MSG_ANIMAL_CMD = "Please specify animal: e.g. !show dog"
APP_DB = "app.db"
HINT_URL = "https://i.imgur.com/QPKpeJL.jpg"
def init():
serverInfo = os.getenv('SERVER_INFO', None)
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (serverInfo,)
cursor.execute("UPDATE ServerInfo SET info=?", values)
conn.commit()
values = ("TestLogUser", "TestLogMsg", )
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Log userId and their msg here
def _msgLog(user, msg):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (user, msg,)
# CREATE TABLE MsgLog (user TEXT, msg TEXT);
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Show animal description
def _showAnimal(animal):
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT description FROM Animals WHERE animal='%s'" % (animal))
all_data = cursor.fetchone()
conn.close()
if all_data == None or len(all_data) == 0:
return MSG_SEARCH_ERROR
else:
return all_data[0]
except:
print("SQL error for arg: %s" % (animal))
return None
# List every animals
def _listAnimal():
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT animal FROM Animals")
all_data = cursor.fetchall()
conn.close()
return ", ".join([data[0] for data in all_data])
# My own reminder
def _getServerInfo(user):
if user.lower() == "dragonite":
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE ServerInfo (info TEXT);
cursor.execute("SELECT info FROM ServerInfo")
all_data = cursor.fetchone()
conn.close()
return all_data[0]
else:
return MSG_NO_DRAGONITE
# You should ask Dragonite to add you to admin list
def _addAdmin(user, arg):
if user.lower() == "dragonite":
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (arg,)
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("INSERT INTO Admins VALUES (?)", values)
conn.commit()
conn.close()
return "Successfully add %s into admin" % (arg)
except:
return "You're already an admin"
else:
return MSG_NO_DRAGONITE
# Flag is secret. No one besides admin should see it.
def _getFlag(user):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("SELECT user FROM Admins WHERE user='%s'" % (user))
all_data = cursor.fetchone()
conn.close()
if all_data != None and len(all_data) == 1:
flag = os.getenv('FLAG', None)
return flag
else:
print("Alert: %s is not admin." % (user))
return MSG_NO_ADMIN
def runCmd(message, user):
_msgLog(user, message)
if message.lower() == "help" or message.lower() == "!help":
return MSG_HELP
elif message == "!listAll":
return _listAnimal()
elif message == ("!show"):
return MSG_ANIMAL_CMD
elif message.startswith("!show "):
return _showAnimal(message[6:])
elif message == "!serverInfo":
return _getServerInfo(user)
elif message == "!getFlag":
return _getFlag(user)
elif message[:10] == "!addAdmin ":
arg = message[10:]
return _addAdmin(user, arg)
elif message == "!hint":
return HINT_URL
else:
return ""
| botCmd.py | 4,322 | Log userId and their msg here CREATE TABLE MsgLog (user TEXT, msg TEXT); Show animal description CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT); List every animals CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT); My own reminder CREATE TABLE ServerInfo (info TEXT); You should ask Dragonite to add you to admin list CREATE TABLE Admins (user TEXT PRIMARY KEY); Flag is secret. No one besides admin should see it. CREATE TABLE Admins (user TEXT PRIMARY KEY); | 482 | en | 0.402912 |
import unittest
import xmlrunner
# from selenium import webdriver
import pagemodels.headerpage
import tests.pickledlogin
import browserconfig
# VIDEO OF EXECUTION
# https://gyazo.com/b20fd223076bf34c1f2c9b94a4f1fe0a
# 2020-04-20 All tests passing, refactor complete
# All tests passed 5 executions in a row. v1 ready to ship.
# BUG- First execution will murder the cookies and break the following tests.
# interestingly, every subsequent test will pass once cookies are hard reset.
class HeaderPageTests(unittest.TestCase):
"""Test cases for the use of the header features atop most netflix pages."""
@classmethod
def setUpClass(cls):
"""Launch the webdriver of choice with selected options(see browserconfig.py).
Then login using pickled cookies(see tests/pickledlogin.py)."""
if browserconfig.current_browser in ['chrome', 'firefox']:
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
desired_capabilities=browserconfig.capabilities
)
elif browserconfig.current_browser == 'edge':
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
capabilities=browserconfig.capabilities
)
tests.pickledlogin.pickled_login(cls.driver)
@classmethod
def tearDownClass(cls):
"""Closes the browser and shuts down the driver executable."""
cls.driver.quit()
def setUp(self):
"""Return to the home page, netflix.com/browse, the staging place for header tests."""
self.driver.get("https://netflix.com/browse")
def test_logout_from_header(self):
"""Logout from the header."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.logout()
# user is redirected to https://www.netflix.com/logout after loging out
self.assertIn('logout', self.driver.current_url)
# CLEANUP
# log back in using the pickled cookies
tests.pickledlogin.pickled_login(self.driver)
def test_navigate_home_from_my_list(self):
"""Using the giant Netflix logo in the top left, navigate to the home page /browse/
from the my-list page."""
self.driver.get("https://www.netflix.com/browse/my-list")
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.navigate_to_home()
self.assertEqual("https://www.netflix.com/browse", self.driver.current_url)
def test_navigate_to_manage_profile(self):
"""Using the header account dropdown, navigate to the manage profile page."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.navigate_to_manage_profile()
# user is redirected to https://www.netflix.com/profiles/manage
self.assertIn('profiles/manage', self.driver.current_url)
def test_search_for_shawshank(self):
"""Using the search field, search for 'shawshank' and assert that shawshank was found."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.search("shawshank")
self.assertIn("The Shawshank Redemption", self.driver.page_source)
# I kind of like this assert now that I think about it. Its testing both the search
# function and Netflix's search algorithm.
# NOTE- test will not fail if "The Shawkshank Redemeption" is removed. Netflix displays
# "similar to {title_name}" for titles its search algorithm recognizes
def test_click_top_notification(self):
"""Click the top notification and assert that the page has changed."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.click_top_notification()
# Assert that we navigated to a notification page or a title page(only 2 options)
self.assertTrue(
'title' in self.driver.current_url or 'notification' in self.driver.current_url
)
# DIDNT MAKE THE FIRST CUT OF TESTS
# I could have 5 more test here for each one of the header buttons.
# Those are about as elementary of tests as possible. Skipping them but TODO- OKAY TO HAVE
# def test_clear_all_notifications(self):
# """ this is easy to do, but impossible to perfect. Netflix doesnt allow any sort of
# 'mark notification as unread' so I have no way of generating notifications. Since I have
# no way of managing the state, THIS TEST CAN NEVER BE RAN MORE THAN ONCE A DAY. Thus I am
# forced to leave it out in order to avoid inconsistent test results"""
# header_page = pagemodels.headerpage.HeaderPage(self.driver)
# header_page.clear_notifications()
if __name__ == '__main__':
with open(r'xmltestresults\pretestresults.xml', 'wb') as output:
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False)
| tests/test_headerpage.py | 5,022 | Test cases for the use of the header features atop most netflix pages.
Return to the home page, netflix.com/browse, the staging place for header tests.
Launch the webdriver of choice with selected options(see browserconfig.py).
Then login using pickled cookies(see tests/pickledlogin.py).
Closes the browser and shuts down the driver executable.
Click the top notification and assert that the page has changed.
Logout from the header.
Using the giant Netflix logo in the top left, navigate to the home page /browse/
from the my-list page.
Using the header account dropdown, navigate to the manage profile page.
Using the search field, search for 'shawshank' and assert that shawshank was found.
from selenium import webdriver VIDEO OF EXECUTION https://gyazo.com/b20fd223076bf34c1f2c9b94a4f1fe0a 2020-04-20 All tests passing, refactor complete All tests passed 5 executions in a row. v1 ready to ship. BUG- First execution will murder the cookies and break the following tests. interestingly, every subsequent test will pass once cookies are hard reset. user is redirected to https://www.netflix.com/logout after loging out CLEANUP log back in using the pickled cookies user is redirected to https://www.netflix.com/profiles/manage I kind of like this assert now that I think about it. Its testing both the search function and Netflix's search algorithm. NOTE- test will not fail if "The Shawkshank Redemeption" is removed. Netflix displays "similar to {title_name}" for titles its search algorithm recognizes Assert that we navigated to a notification page or a title page(only 2 options) DIDNT MAKE THE FIRST CUT OF TESTS I could have 5 more test here for each one of the header buttons. Those are about as elementary of tests as possible. Skipping them but TODO- OKAY TO HAVE def test_clear_all_notifications(self): """ this is easy to do, but impossible to perfect. Netflix doesnt allow any sort of 'mark notification as unread' so I have no way of generating notifications. Since I have no way of managing the state, THIS TEST CAN NEVER BE RAN MORE THAN ONCE A DAY. Thus I am forced to leave it out in order to avoid inconsistent test results""" header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.clear_notifications() | 2,270 | en | 0.866819 |
import os
import sys
import copy as copy
from tensor_view_1d import TensorView1D
from tensor_view_2d import TensorView2D
from tensor_view_act import TensorViewAct
from tensor_view_filter import TensorViewFilter
from tensor_data import TensorData
import inspect
from PyQt4 import QtGui, QtCore
from pyqt_env import PyQTEnv
import xml.etree.ElementTree as ET
TEST_WATERFALL_VIEW = False
gui_root_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
class MainWindow(QtGui.QMainWindow):
def __init__(self, args):
super(MainWindow, self).__init__()
self.setGeometry(1400,70,600,370)
self.setWindowTitle("VISUALIZATION")
self.action_cb = args
#self.tensor_input_list = args['tensor_input_list']
quitAction = QtGui.QAction('Quit', self)
quitAction.triggered.connect(self.close_application)
saveAction = QtGui.QAction('Save', self)
saveAction.setShortcut('Ctrl+S')
saveAction.triggered.connect(self.save_WatchList)
loadAction = QtGui.QAction('Open File...', self)
loadAction.setShortcut('Ctrl+O')
loadAction.triggered.connect(self.action_cb['load_WatchList'])
input_file = QtGui.QAction('Open input file', self)
input_file.setShortcut('Ctrl+I')
input_file.triggered.connect(self.open_input_file)
menu = self.menuBar()
filemenu = menu.addMenu('&File')
filemenu.addAction(saveAction)
filemenu.addAction(loadAction)
filemenu.addAction(input_file)
self.toolBar = self.addToolBar("ToolBar")
self.toolBar.addAction(quitAction)
self.create_sub_windows()
def create_sub_windows(self):
pausecheck = QtGui.QCheckBox('Pause', self)
pausecheck.move(520,120)
pausecheck.toggle()
pausecheck.stateChanged.connect(self.action_cb['on_pause'])
self.step_btn = QtGui.QPushButton("Step",self)
self.step_btn.setStyleSheet("color: blue; font: bold 14px")
self.step_btn.resize(50,25)
self.step_btn.move(520,80)
self.step_btn.clicked.connect(self.action_cb['on_step'])
self.watch_com = QtGui.QLabel(self)
self.watch_com.setText('Watch :')
self.watch_com.move(520,244)
self.watch_com.setFont(QtGui.QFont("Times",13,weight=QtGui.QFont.Bold))
self.watch_choice = QtGui.QComboBox(self)
self.watch_choice.setStyleSheet("font: bold 14px")
self.watch_choice.move(520,280)
self.watch_choice.addItem('1-DIM')
self.watch_choice.addItem('2-DIM')
self.watch_choice.addItem('Activation')
self.watch_choice.addItem('Filter')
self.watch_choice.resize(70,30)
self.watch_choice.show()
self.watch_choice.activated[str].connect(self.action_cb['on_add_watch'])
self.showbtn = QtGui.QCheckBox('Show',self)
self.showbtn.move(520,195)
self.showbtn.toggle()
self.showbtn.hide()
self.showbtn.stateChanged.connect(self.action_cb['on_set_show'])
self.show_remove_btn = QtGui.QPushButton("Remove",self)
self.show_remove_btn.setStyleSheet("color: red; font: bold 14px")
self.show_remove_btn.resize(70,30)
self.show_remove_btn.move(520,240)
self.show_remove_btn.hide()
self.show_remove_btn.clicked.connect(self.action_cb['on_remove_watch'])
self.hd_all_btn = QtGui.QPushButton("Hide All",self)
self.hd_all_btn.setStyleSheet("color: red; font: bold 14px")
self.hd_all_btn.resize(84,30)
self.hd_all_btn.move(510,280)
self.hd_all_btn.hide()
self.hd_all_btn.clicked.connect(self.action_cb['on_hide_all'])
self.tensor_label = QtGui.QLabel(self)
self.tensor_label.setAlignment(QtCore.Qt.AlignCenter)
self.tensor_label.setGeometry(QtCore.QRect(80,180,200,20))
self.tensor_label.setFont(QtGui.QFont("Times",12,weight=QtGui.QFont.Bold))
self.tensor_reshape_label = QtGui.QLabel(self)
self.tensor_reshape_label.setAlignment(QtCore.Qt.AlignCenter)
self.tensor_reshape_label.setGeometry(QtCore.QRect(80,220,200,20))
self.tensor_reshape_label.setFont(QtGui.QFont("Times",12,weight=QtGui.QFont.Bold))
self.reshape_inlb = QtGui.QLabel(self)
self.reshape_inlb.move(80,220)
self.reshape_inlb.setText('Reshape: ')
self.reshape_inlb.setFont(QtGui.QFont('Times',12,weight=QtGui.QFont.Bold))
self.tensor_shape_input = QtGui.QLineEdit(self)
self.tensor_shape_input.textChanged.connect(self.action_cb['on_tensor_shape_input'])
self.tensor_shape_input.move(160,220)
self.sourceInput_list = QtGui.QComboBox(self)
self.sourceInput_list.move(160,270)
self.sourceInput_list.activated[str].connect(self.action_cb['on_input_select'])
listcombo = QtGui.QComboBox(self)
listcombo.addItem("Select List")
listcombo.addItem("Watch List")
listcombo.move(50,100)
subcombo = QtGui.QComboBox(self)
subcombo.addItem('USER_LIST')
subcombo.addItem('TRAINABLE_VARIABLES')
subcombo.addItem('ACTIVATIONS')
subcombo.addItem('GLOBAL_VARIABLES')
subcombo.addItem('ALL_OPS')
subcombo.move(180,100)
listcombo.activated[str].connect(self.action_cb['on_list_type_select'])
subcombo.activated[str].connect(self.action_cb['on_filter_type_select'])
self.create_list_view()
fontset = QtGui.QFont()
fontset.setPointSize(12)
self.filter_comment = QtGui.QLabel(self)
self.filter_comment.setText('Search Only in ALL_OPS:')
self.filter_comment.setGeometry(QtCore.QRect(100,34,180,25))
self.filter_comment.setFont(fontset)
self.filter_in = QtGui.QLineEdit(self)
self.filter_in.textChanged.connect(self.action_cb['on_filter_str_input'])
self.filter_in.move(290,30)
self.filter_in.resize(190,40)
self.show()
def create_list_view(self):
self.list_view=QtGui.QListView(self)
self.list_view.main = self
self.list_view.setEditTriggers(QtGui.QListView.NoEditTriggers)
self.list_view.setMouseTracking(True)
self.list_model = QtGui.QStandardItemModel()
self.list_view.setModel(self.list_model)
entries = [str(i) for i in range(50)]
for i in entries:
item = QtGui.QStandardItem(i)
self.list_model.appendRow(item)
self.list_view.setMinimumSize(170,200)
self.list_view.move(310,130)
self.list_view.clicked.connect(self.action_cb['on_tensor_select'])
def close_application(self):
choice = QtGui.QMessageBox.question(self, 'Warning',
"Do you want to quit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
self.action_cb['on_close']()
else:
pass
def save_WatchList(self):
choice = QtGui.QMessageBox.question(self, '',
"Do you want to save the watch_list?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
self.action_cb['on_save']()
else:
pass
def update_tensor_list(self, list_type, list, pos, reset_pos):
items_str = [t.disp_name for t in list]
self.list_model.clear()
for text in items_str:
item = QtGui.QStandardItem(text)
self.list_model.appendRow(item)
def open_input_file(self):
name = QtGui.QFileDialog.getOpenFileName(self, 'Open input file')
input_file = open(name, 'r')
DIYname = QtGui.QInputDialog.getText(self, 'Name your input choice', None)
save_name = DIYname[0]
self.action_cb['add_input'](save_name, input_file.name)
def update_input_list(self, input_list):
self.sourceInput_list.clear()
for item in input_list:
self.sourceInput_list.addItem(item.name)
def enable_filter_input(self, enable):
if enable is False:
self.filter_in.setDisabled(True)
else:
self.filter_in.setDisabled(False)
class TensorItem(object):
def __init__(self, name, shape, op, input_name):
self.name = name
self.op = op
self.input_name = input_name
#self.data_source = TensorData(start_step=ControlPanel.step_count)
self.disp_name = name
try:
shape_str = '(' + ', '.join(map(str, shape)) + ')'
self.shape_str = shape_str
self.reshape = []
except: #TypeError: #fix for python3
self.shape_str = ""
self.reshape = []
####
#self.pyqt_window_id = None
#self.view = None
def copy(self, obj):
self.name = copy.copy(obj.name)
self.input_name = copy.copy(obj.input_name)
self.op = obj.op
self.disp_name = copy.copy(obj.disp_name)
self.shape_str = copy.copy(obj.shape_str)
self.reshape = copy.copy(obj.reshape)
def get_reshape_str(self):
return ', '.join(map(str, self.reshape))
class ControlPanel(object):
quit = False
pause = True
single_step_flag = False
step_count = 0
cur_list_type = 0
cur_filter_type_index = 0
tensor_select_list = []
select_list_cur_pos = 0
tensor_watch_list = []
watch_list_cur_pos = 0
tensor_input_list = []
console_cmd_list = []
pyqt_env = None
class TensorSelectItem(TensorItem):
def __init__(self, name, shape, op, input_name):
TensorItem.__init__(self, name, shape, op, input_name)
class TensorWatchItem(TensorItem):
def __init__(self, tensor_select_item):
self.showstate = True
self.copy(tensor_select_item)
self.data_source = TensorData(start_step=ControlPanel.step_count)
self.pyqt_window_id = None
self.picDIM = '1-DIM'
class TensorInputItem(object):
def __init__(self, name, input_obj):
self.name = name
self.input_obj = input_obj
"""
tensor panel
"""
def __open_tensor_view(self, index, text):
tensor_item = self.tensor_watch_list[index]
tensor_item.pyqt_window_id = self.pyqt_env.get_free_identity()
if text == '2-DIM':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorView2D,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = '2-DIM'
elif text == '1-DIM':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorView1D,
{'data_source':tensor_item.data_source, 'name':tensor_item.name})
self.tensor_watch_list[index].picDIM = '1-DIM'
elif text == 'Activation':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorViewAct,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = 'Activation'
elif text == 'Filter':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorViewFilter,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = 'Filter'
def __close_tensor_view(self, index):
tensor_item = self.tensor_watch_list[index]
if tensor_item.pyqt_window_id is not None:
self.pyqt_env.close(tensor_item.pyqt_window_id)
tensor_item.pyqt_window_id = None
def __close_all_tensor_views(self):
for i in range(len(self.tensor_watch_list)):
self.__close_tensor_view(i)
def __on_tensor_shape_input(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
dims = text.split(',')
titem.reshape = []
for dim in dims:
try:
titem.reshape.append(int(dim))
except ValueError:
pass
def __on_add_watch(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
new_titem = self.TensorWatchItem(titem)
"""
new_titem = copy.copy(titem) #shallow copy
new_titem.reshape = copy.copy(titem.reshape)
"""
self.tensor_watch_list.append(new_titem)
index = len(self.tensor_watch_list)-1
self.__open_tensor_view(index,text)
def __on_remove_watch(self):
self.__close_tensor_view(self.watch_list_cur_pos)
del self.tensor_watch_list[self.watch_list_cur_pos]
item_num = len(self.tensor_watch_list)
if self.watch_list_cur_pos >= item_num and item_num > 0:
self.watch_list_cur_pos = item_num-1
if self.cur_list_type==0:
list = self.tensor_select_list
pos = self.select_list_cur_pos
else:
list = self.tensor_watch_list
pos = self.watch_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=list, pos=pos, reset_pos=False)
def __on_set_show(self, state):
if state == QtCore.Qt.Checked and self.tensor_watch_list[self.watch_list_cur_pos].showstate == False:
self.__open_tensor_view(self.watch_list_cur_pos, self.tensor_watch_list[self.watch_list_cur_pos].picDIM)
self.tensor_watch_list[self.watch_list_cur_pos].showstate = True
if state != QtCore.Qt.Checked and self.tensor_watch_list[self.watch_list_cur_pos].showstate == True:
self.__close_tensor_view(self.watch_list_cur_pos)
self.tensor_watch_list[self.watch_list_cur_pos].showstate = False
def __on_input_select(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
titem.input_name = text
input_obj = self.__get_input_obj(text)
if input_obj is not None:
input_obj.show()
def __on_tensor_select(self, index):
index = index.row()
if self.cur_list_type == 0:
self.select_list_cur_pos = index
list = self.tensor_select_list
print(list[index].shape_str)
else:
self.watch_list_cur_pos = index
list = self.tensor_watch_list
if self.tensor_watch_list[index].showstate == False:
self.main_window.showbtn.setChecked(False)
else:
self.main_window.showbtn.setChecked(True)
self.main_window.tensor_reshape_label.setText('Reshape: ('+str(list[index].get_reshape_str())+')')
self.main_window.tensor_label.setText('Shape: '+list[index].shape_str)
"""
global control
"""
def __on_list_type_select(self, text):
if text == 'Select List':
index = 0
else:
index = 1
if index != self.cur_list_type:
if index == 0:
self.main_window.enable_filter_input(True)
else:
self.main_window.enable_filter_input(False)
self.cur_list_type = index
self.on_switch_btn(self.cur_list_type)
if self.cur_list_type == 0:
pos = self.select_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_select_list, pos=pos, reset_pos=False)
else:
pos = self.watch_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_watch_list, pos=pos, reset_pos=False)
def on_switch_btn(self,index):
if index == 0:
self.main_window.watch_choice.show()
self.main_window.show_remove_btn.hide()
self.main_window.hd_all_btn.hide()
self.main_window.showbtn.hide()
self.main_window.watch_com.show()
self.main_window.tensor_label.show()
self.main_window.tensor_label.setText('Shape: '+self.tensor_select_list[0].shape_str)
self.main_window.tensor_shape_input.show()
self.main_window.reshape_inlb.show()
self.main_window.tensor_shape_input.clear()
self.main_window.tensor_reshape_label.hide()
else:
self.main_window.watch_choice.hide()
self.main_window.show_remove_btn.show()
self.main_window.hd_all_btn.show()
self.main_window.watch_com.hide()
self.main_window.tensor_shape_input.hide()
if self.tensor_watch_list != []:
self.main_window.showbtn.show()
self.main_window.tensor_label.show()
self.main_window.tensor_reshape_label.show()
self.main_window.tensor_label.setText('Shape: '+self.tensor_watch_list[0].shape_str)
self.main_window.tensor_reshape_label.setText('Reshape: ('+str(self.tensor_watch_list[0].get_reshape_str())+')')
if self.tensor_watch_list[0].showstate == True:
self.main_window.showbtn.setChecked(True)
else:
self.main_window.showbtn.setChecked(False)
else:
self.main_window.showbtn.hide()
self.main_window.tensor_label.hide()
self.main_window.tensor_reshape_label.hide()
self.main_window.reshape_inlb.hide()
def __on_filter_type_select(self, text):
pwd = {'USER_LIST':0, 'TRAINABLE_VARIABLES':1, 'ACTIVATIONS':2, 'GLOBAL_VARIABLES':3, 'ALL_OPS':4 }
self.cur_filter_type_index = pwd[text]
if pwd[text] == 2:
pass
else:
pass
def __on_filter_str_input(self, text):
text = str(text)
self.filter_str = text.strip()
def __on_pause(self, state):
if state == QtCore.Qt.Checked:
self.pause = True
else:
self.pause = False
print(self.pause)
def __on_step(self):
self.pause = True
self.single_step_flag = True
def __on_hide_all(self):
self.__close_all_tensor_views()
self.main_window.showbtn.hide()
def __on_console_str_input(self):
return
cmd = copy.copy(text.strip())
self.console_cmd_list.append(cmd)
def __on_close(self):
self.quit = True
def __on_save(self):
NoWatchItem = len(self.tensor_watch_list)
watchlist = [None]*NoWatchItem
root = ET.Element('root')
for i in range(NoWatchItem):
watchlist[i] = ET.SubElement(root, 'Item'+str(i+1))
name = ET.SubElement(watchlist[i], 'name')
shape = ET.SubElement(watchlist[i], 'shape')
reshape = ET.SubElement(watchlist[i], 'reshape')
visType = ET.SubElement(watchlist[i], 'visType')
win_x = ET.SubElement(watchlist[i], 'win_x')
win_y = ET.SubElement(watchlist[i], 'win_y')
win_w = ET.SubElement(watchlist[i], 'win_w')
win_h = ET.SubElement(watchlist[i], 'win_h')
name.text = self.tensor_watch_list[i].name
shape.text = self.tensor_watch_list[i].shape_str
reshape.text = self.tensor_watch_list[i].reshape
visType.text = self.tensor_watch_list[i].picDIM
(x,y,w,h) = self.pyqt_env.get_win_pos_size(self.tensor_watch_list[i].pyqt_window_id)
win_x.text = str(x)
win_y.text = str(y)
win_w.text = str(w)
win_h.text = str(h)
my = ET.tostring(root)
myfile = open('Saved_WatchList.xml', 'wb')
myfile.write(my)
def __load_WatchList(self):
tree = ET.parse('Saved_WatchList.xml')
root = tree.getroot()
count = len(self.tensor_watch_list)
print(count)
for elem in root:
n = elem[0].text
for t in self.all_ops:
if t.name == n:
tem_select = self.TensorSelectItem(t.name, t.shape, t.op, self.tensor_input_list[0].name)
new = self.TensorWatchItem(tem_select)
self.tensor_watch_list.append(new)
print('now',len(self.tensor_watch_list), 'but count: ', count)
self.__open_tensor_view(count, elem[3].text)
self.pyqt_env.set_win_pos_size(self.tensor_watch_list[count].pyqt_window_id, \
int(elem[4].text),int(elem[5].text),int(elem[6].text),int(elem[7].text))
break
count += 1
def __create_main_window(self, args):
self.main_window = MainWindow(
{
'filter_type_list':self.filter_type_list,
'tensor_input_list': self.tensor_input_list,
'on_close':self.__on_close,
'on_save':self.__on_save,
# global control
'on_pause':self.__on_pause,
'on_step':self.__on_step,
'on_hide_all':self.__on_hide_all,
'on_console_str_input':self.__on_console_str_input,
'on_filter_type_select':self.__on_filter_type_select,
'on_filter_str_input':self.__on_filter_str_input,
'on_list_type_select':self.__on_list_type_select,
##
'on_tensor_select':self.__on_tensor_select,
# tensor select panel
'on_tensor_shape_input':self.__on_tensor_shape_input,
'on_input_select':self.__on_input_select,
# tensor watch panel
'on_remove_watch':self.__on_remove_watch,
'on_add_watch':self.__on_add_watch,
'on_set_show':self.__on_set_show,
'load_WatchList':self.__load_WatchList,
'add_input':self.__add_input
}
)
return None
def __init__(self, filter_type_list, input_list, loaded_list):
for input_name in input_list:
self.tensor_input_list.append(self.TensorInputItem(input_name, None))
self.filter_str = ""
self.filter_type_list = filter_type_list
self.pyqt_env = PyQTEnv()
self.pyqt_env.run(self.__create_main_window, None)
self.main_window.update_input_list(self.tensor_input_list)
print('control_panel _init')
self.all_ops = loaded_list
### add_input test
#for test/alexnet
#self.__add_input('img_input')
#for test/basic_test
#self.__add_input('test_input')
#self.pyqt_env.run(self.__load_input, None)
'''
def __load_input(self, args):
### add_input test
#for test/alexnet
self.__add_input('my_img_input', '../alexnet/img_input.py')
#for test/basic_test
self.__add_input('test_input', '../basic_test/test_input.py')
'''
def __get_input_obj(self, name):
for input_item in self.tensor_input_list:
if input_item.name == name:
return input_item.input_obj
return None
def __add_input(self, input_name, filename, config_dict={}):
import importlib
try:
placeholder_dict={}
for t in self.all_ops:
if t.op.op.type == 'Placeholder':
placeholder_dict[t.name] = t.op
names = os.path.split(os.path.abspath(filename))
path = names[0]
module_name = names[1].split('.')[-2]
print('* input_name is: %s, filename is: %s'%(input_name, filename))
print('* config_dict is:', config_dict)
print('* module path is: %s, name is: %s'%(path, module_name))
#add module search path
sys.path.append(path)
temp_module = importlib.import_module(module_name)
input_obj = temp_module.TensorInput(placeholder_dict, config_dict)
input_obj.show()
input_item = self.TensorInputItem(input_name, input_obj)
self.tensor_input_list.append(input_item)
self.main_window.update_input_list(self.tensor_input_list)
except Exception as e:
print('Add_input error:', e)
"""
public methods
"""
def update_tensor_list(self, tensor_list):
self.tensor_select_list = []
for t in tensor_list:
if len(self.tensor_input_list)>0:
input_name = self.tensor_input_list[0].name
else:
input_name = ''
self.tensor_select_list.append(self.TensorSelectItem(t[0], t[1], t[2], input_name))
if self.cur_list_type == 0:
self.select_list_cur_pos = 0
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_select_list, pos=0, reset_pos=True)
def get_tensor_watch_list(self):
dict = {}
for input_item in self.tensor_input_list:
list = []
for t in self.tensor_watch_list:
if t.pyqt_window_id is not None and input_item.name == t.input_name:
list.append((t.name, t.reshape, t.op, t.data_source, t.input_name))
if len(list)>0:
dict[input_item] = list
return dict
def beat(self, update_step_flag):
if update_step_flag:
self.single_step_flag = False
ControlPanel.step_count += 1
if self.quit:
self.pyqt_env.quit()
return not self.quit
def is_pause(self):
return self.pause
def is_step(self):
return self.single_step_flag
def get_filter_type(self):
return [self.filter_type_list[self.cur_filter_type_index], self.filter_str]
def get_console_command(self):
if len(self.console_cmd_list)>0:
cmd = self.console_cmd_list.pop()
return cmd
| TensorMonitor/control_panel.py | 26,784 | self.tensor_input_list = args['tensor_input_list']self.data_source = TensorData(start_step=ControlPanel.step_count)TypeError: fix for python3self.pyqt_window_id = Noneself.view = None global control tensor select panel tensor watch panel add_input testfor test/alexnetself.__add_input('img_input')for test/basic_testself.__add_input('test_input')self.pyqt_env.run(self.__load_input, None)add module search path | 410 | en | 0.175219 |
"""TensorFlow ops for deep neural networks."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import nn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.contrib.learn.python.learn.ops import dropout_ops
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None):
"""Creates fully connected deep neural network subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function between layers. Can be None.
dropout: if not None, will add a dropout layer with given probability.
Returns:
A tensor which would be a deep neural network.
"""
with vs.variable_scope('dnn'):
for i, n_units in enumerate(hidden_units):
with vs.variable_scope('layer%d' % i):
tensor_in = rnn_cell.linear(tensor_in, n_units, True)
if activation is not None:
tensor_in = activation(tensor_in)
if dropout is not None:
tensor_in = dropout_ops.dropout(tensor_in, prob=(1.0 - dropout))
return tensor_in
| tensorflow/contrib/learn/python/learn/ops/dnn_ops.py | 1,844 | Creates fully connected deep neural network subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function between layers. Can be None.
dropout: if not None, will add a dropout layer with given probability.
Returns:
A tensor which would be a deep neural network.
TensorFlow ops for deep neural networks.
Copyright 2015-present The Scikit Flow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1,013 | en | 0.857006 |
"""
This code was created by Tyler Adam Martinez for the BMEN3310 Final
#This are the varibles and what they stand for.
Hemodynamic Parameter Analysis
CS // Cross-Sectional Area of the heart valve
vR // Radius of Valve
DR // Disk Radius
TiA // Area of the Titanium wire
TiV // Volume of the Titanium wire
IRV // Inner Ring Volume
ORV // Outer Ring Volume
DR // Disk Volume
NS // Cost of Nitinol Stent per signal unit
PPC // Pure Pyrolytic Carbon per unit volume
Tf // Teflon Fabric per unit volume
Ti // Titanium Wire per unit volume
Hemodynamic Calculations
SVR // Systemic Vascular Resistance or Afterload on the heart
MAP // Mean Arterial Pressure
CVP // Central Venous Pressure
CO // Cardiac Output
SV // Stroke Volume
HR // Heart Rate
SBP // Systomic Blood Pressure
DBP // Diastolic Blood Pressure
"""
import math
pi = 3.14159265359;
## Hemodynamic Parameter Analysis
CS = input("The cross-sectional area of the valve: ");
CS = int(CS);
vR = math.sqrt(CS/pi); #Convert CS to v radius
height = 5.0; #mm
thinkness = 1.5; #mm
DR = vR - (2*thinkness); #calculating for the two outer disks
Diskheight = 1.5; #mm
#calculating the volumes of each material
TiA = 0.1024 * pi; #.32mm is radius of Titanium wire, and .1024 is r^2
TiV = 2*vR *TiA; #mm^3
IRV = pi * pow((DR + thinkness), 2) - (pi * pow(DR, 2)) * height; #mm^3
ORV = pi * pow((DR + (2*thinkness)), 2) - pi * pow((DR + thinkness),2) * height; #mm^3
DV = pi * pow(DR, 2) * Diskheight; #mm^3
#Constant Cost per volume values
NS = 100; # $ per unit
PPC = 0.00052; # $ per 1 mm^3
TF = 0.00014; # $ per 1 mm^3
Ti = 0.00064; # $ per 1 mm^3
#Material Cost = Volume of Material * Cost per Unit Volume
ORcost = ORV * TF + NS;
IRcost = IRV * PPC;
Dcost = (DV*(.9)*PPC) + (DV*(.1)*TF) + TiV*Ti;
TotalCost = ORcost + IRcost + Dcost;
#Outputting result to user
print("The total cost of your heart valve is $",format(TotalCost,'.2f'));
## Hemodynamic Calculations
SV = input("Enter in the Stroke Volume of the patient: ");
SV = int(SV);
HR = input("Enter in the Heart Rate of the patient: ");
HR = int(HR);
CO = SV * HR;
print("The Cardiac Output of the patient is ",CO);
SBP = input("Enter in the Systomic Blood Pressure of the patient: ");
SBP = int(SBP);
DBP = input("Enter in the Diastolic Blood Pressure of the patient: ");
DBP = int(DBP);
MAP = (((SBP) + (2 *(DBP)))/ 3);
print("The Mean Arterial Pressure of the patient is ",format(MAP, '.3f'));
CVP = input("Enter in the Central Venous Pressure of the patient: ");
CVP = int(CVP);
SVR = ((MAP - CVP)/(CO)) * 80;
print("The Systemic Vascular Resistance of the patient is ",format(SVR,'.3f'));
| Hemodynamic_Parameter_Analysis.py | 2,707 | This code was created by Tyler Adam Martinez for the BMEN3310 Final
#This are the varibles and what they stand for.
Hemodynamic Parameter Analysis
CS // Cross-Sectional Area of the heart valve
vR // Radius of Valve
DR // Disk Radius
TiA // Area of the Titanium wire
TiV // Volume of the Titanium wire
IRV // Inner Ring Volume
ORV // Outer Ring Volume
DR // Disk Volume
NS // Cost of Nitinol Stent per signal unit
PPC // Pure Pyrolytic Carbon per unit volume
Tf // Teflon Fabric per unit volume
Ti // Titanium Wire per unit volume
Hemodynamic Calculations
SVR // Systemic Vascular Resistance or Afterload on the heart
MAP // Mean Arterial Pressure
CVP // Central Venous Pressure
CO // Cardiac Output
SV // Stroke Volume
HR // Heart Rate
SBP // Systomic Blood Pressure
DBP // Diastolic Blood Pressure
Hemodynamic Parameter AnalysisConvert CS to v radius mmmmcalculating for the two outer disksmmcalculating the volumes of each material .32mm is radius of Titanium wire, and .1024 is r^2mm^3mm^3mm^3mm^3Constant Cost per volume values $ per unit $ per 1 mm^3 $ per 1 mm^3 $ per 1 mm^3Material Cost = Volume of Material * Cost per Unit Volume Outputting result to user Hemodynamic Calculations | 1,205 | en | 0.374302 |
# -*- coding: utf-8 -*-
from ..Qt import QtGui, QtCore
from .GraphicsView import GraphicsView
from ..graphicsItems.GradientEditorItem import GradientEditorItem
import weakref
import numpy as np
__all__ = ['GradientWidget']
class GradientWidget(GraphicsView):
"""
Widget displaying an editable color gradient. The user may add, move, recolor,
or remove colors from the gradient. Additionally, a context menu allows the
user to select from pre-defined gradients.
"""
sigGradientChanged = QtCore.Signal(object)
sigGradientChangeFinished = QtCore.Signal(object)
def __init__(self, parent=None, orientation='bottom', *args, **kargs):
"""
The *orientation* argument may be 'bottom', 'top', 'left', or 'right'
indicating whether the gradient is displayed horizontally (top, bottom)
or vertically (left, right) and on what side of the gradient the editable
ticks will appear.
All other arguments are passed to
:func:`GradientEditorItem.__init__ <pyqtgraph.GradientEditorItem.__init__>`.
Note: For convenience, this class wraps methods from
:class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.
"""
GraphicsView.__init__(self, parent, useOpenGL=False, background=None)
self.maxDim = 31
kargs['tickPen'] = 'k'
self.item = GradientEditorItem(*args, **kargs)
self.item.sigGradientChanged.connect(self.sigGradientChanged)
self.item.sigGradientChangeFinished.connect(self.sigGradientChangeFinished)
self.setCentralItem(self.item)
self.setOrientation(orientation)
self.setCacheMode(self.CacheNone)
self.setRenderHints(QtGui.QPainter.Antialiasing | QtGui.QPainter.TextAntialiasing)
self.setFrameStyle(QtGui.QFrame.NoFrame | QtGui.QFrame.Plain)
#self.setBackgroundRole(QtGui.QPalette.NoRole)
#self.setBackgroundBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
#self.setAutoFillBackground(False)
#self.setAttribute(QtCore.Qt.WA_PaintOnScreen, False)
#self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
def setOrientation(self, ort):
"""Set the orientation of the widget. May be one of 'bottom', 'top',
'left', or 'right'."""
self.item.setOrientation(ort)
self.orientation = ort
self.setMaxDim()
def setMaxDim(self, mx=None):
if mx is None:
mx = self.maxDim
else:
self.maxDim = mx
if self.orientation in ['bottom', 'top']:
self.setFixedHeight(mx)
self.setMaximumWidth(16777215)
else:
self.setFixedWidth(mx)
self.setMaximumHeight(16777215)
def __getattr__(self, attr):
### wrap methods from GradientEditorItem
return getattr(self.item, attr)
| scripts/pyqtgraph-develop/pyqtgraph/widgets/GradientWidget.py | 2,975 | Widget displaying an editable color gradient. The user may add, move, recolor,
or remove colors from the gradient. Additionally, a context menu allows the
user to select from pre-defined gradients.
The *orientation* argument may be 'bottom', 'top', 'left', or 'right'
indicating whether the gradient is displayed horizontally (top, bottom)
or vertically (left, right) and on what side of the gradient the editable
ticks will appear.
All other arguments are passed to
:func:`GradientEditorItem.__init__ <pyqtgraph.GradientEditorItem.__init__>`.
Note: For convenience, this class wraps methods from
:class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.
Set the orientation of the widget. May be one of 'bottom', 'top',
'left', or 'right'.
-*- coding: utf-8 -*-self.setBackgroundRole(QtGui.QPalette.NoRole)self.setBackgroundBrush(QtGui.QBrush(QtCore.Qt.NoBrush))self.setAutoFillBackground(False)self.setAttribute(QtCore.Qt.WA_PaintOnScreen, False)self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True) wrap methods from GradientEditorItem | 1,051 | en | 0.450397 |
from typing_extensions import Final # noqa: F401
CONTAINER_CLIENT_PACKAGES = 'compressedpackages' # type: Final
CONTAINER_EMAILS = 'emails' # type: Final
CONTAINER_MAILBOX = 'mailbox' # type: Final
CONTAINER_SENDGRID_MIME = 'sendgridinboundemails' # type: Final
TABLE_DOMAIN_X_DELIVERED = 'emaildomainxdelivered' # type: Final
TABLE_AUTH = 'clientsauth' # type: Final
QUEUE_CLIENT_PACKAGE = 'lokoleinboundemails' # type: Final
QUEUE_EMAIL_SEND = 'sengridoutboundemails' # type: Final
QUEUE_SENDGRID_MIME = 'sengridinboundemails' # type: Final
| opwen_email_server/constants/azure.py | 554 | noqa: F401 type: Final type: Final type: Final type: Final type: Final type: Final type: Final type: Final type: Final | 118 | en | 0.473341 |
#
#
# Copyright 2020-21 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..interface.graph import Graph as AbstractGraph
from typing import TypeVar, Callable, Awaitable, Set
import neo4j
R = TypeVar('R')
class Graph (AbstractGraph[neo4j.graph.Graph]):
"""A conceptual wrapper for a neo4j query which will return a neo4j.graph.Graph object.
To execute the query and return the underlying object await this object. But the
returned neo4j.graph.Graph is unlikely to be very useful outside of the context managers
in which it was created.
A better way to use this object is to use the 'nodes' coroutine property.
"""
def __init__(
self,
execute: Callable[[Callable[[neo4j.Transaction], neo4j.graph.Graph]], Awaitable[neo4j.graph.Graph]],
func: Callable[[neo4j.Transaction], neo4j.graph.Graph]
):
self._func = func
self._execute = execute
def __await__(self):
return self._execute(self._func).__await__()
@property
async def nodes(self) -> Set[neo4j.graph.Node]:
"""This property is a Coroutine, which is weird, but better matches the neo4j interface.
When awaited this property will execute the query and return you a Set[neo4j.graph.Node]
containing all of the nodes which the query matched.
"""
return await self._execute(lambda tx: set(self._func(tx).nodes))
@property
async def relationships(self) -> Set[neo4j.graph.Relationship]:
"""This property is a Coroutine, which is weird, but better matches the neo4j interface.
When awaited this property will execute the query and return you a Set[neo4j.graph.Relationship]
containing all of the relationships which the query matched.
"""
return await self._execute(lambda tx: set(self._func(tx).relationships))
| aiocypher/aioneo4j/graph.py | 2,387 | A conceptual wrapper for a neo4j query which will return a neo4j.graph.Graph object.
To execute the query and return the underlying object await this object. But the
returned neo4j.graph.Graph is unlikely to be very useful outside of the context managers
in which it was created.
A better way to use this object is to use the 'nodes' coroutine property.
Copyright 2020-21 British Broadcasting Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 930 | en | 0.905433 |
# -*- coding: utf-8 -*-
__version__ = "0.1.0"
| resources_crawler/__init__.py | 48 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import io
import json
import os
import unittest
import mock
import six
from six.moves import http_client
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class Test_Blob(unittest.TestCase):
@staticmethod
def _make_one(*args, **kw):
from google.cloud.storage.blob import Blob
properties = kw.pop('properties', None)
blob = Blob(*args, **kw)
blob._properties = properties or {}
return blob
def test_ctor_wo_encryption_key(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
properties = {'key': 'value'}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertIs(blob.bucket, bucket)
self.assertEqual(blob.name, BLOB_NAME)
self.assertEqual(blob._properties, properties)
self.assertFalse(blob._acl.loaded)
self.assertIs(blob._acl.blob, blob)
self.assertEqual(blob._encryption_key, None)
def test_ctor_with_encoded_unicode(self):
blob_name = b'wet \xe2\x9b\xb5'
blob = self._make_one(blob_name, bucket=None)
unicode_name = u'wet \N{sailboat}'
self.assertNotIsInstance(blob.name, bytes)
self.assertIsInstance(blob.name, six.text_type)
self.assertEqual(blob.name, unicode_name)
def test_ctor_w_encryption_key(self):
KEY = b'01234567890123456789012345678901' # 32 bytes
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY)
self.assertEqual(blob._encryption_key, KEY)
def test_chunk_size_ctor(self):
from google.cloud.storage.blob import Blob
BLOB_NAME = 'blob-name'
BUCKET = object()
chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE
blob = self._make_one(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size)
self.assertEqual(blob._chunk_size, chunk_size)
def test_chunk_size_getter(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob.chunk_size)
VALUE = object()
blob._chunk_size = VALUE
self.assertIs(blob.chunk_size, VALUE)
def test_chunk_size_setter(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
blob.chunk_size = 20
self.assertEqual(blob._chunk_size, 20)
def test_chunk_size_setter_bad_value(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
with self.assertRaises(ValueError):
blob.chunk_size = 11
def test_acl_property(self):
from google.cloud.storage.acl import ObjectACL
fake_bucket = _Bucket()
blob = self._make_one(u'name', bucket=fake_bucket)
acl = blob.acl
self.assertIsInstance(acl, ObjectACL)
self.assertIs(acl, blob._acl)
def test_path_bad_bucket(self):
fake_bucket = object()
name = u'blob-name'
blob = self._make_one(name, bucket=fake_bucket)
self.assertRaises(AttributeError, getattr, blob, 'path')
def test_path_no_name(self):
bucket = _Bucket()
blob = self._make_one(u'', bucket=bucket)
self.assertRaises(ValueError, getattr, blob, 'path')
def test_path_normal(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/%s' % BLOB_NAME)
def test_path_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/parent%2Fchild')
def test_path_with_non_ascii(self):
blob_name = u'Caf\xe9'
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/Caf%C3%A9')
def test_public_url(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.public_url,
'https://storage.googleapis.com/name/%s' %
BLOB_NAME)
def test_public_url_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(
blob.public_url,
'https://storage.googleapis.com/name/parent%2Fchild')
def test_public_url_with_non_ascii(self):
blob_name = u'winter \N{snowman}'
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
expected_url = 'https://storage.googleapis.com/name/winter%20%E2%98%83'
self.assertEqual(blob.public_url, expected_url)
def _basic_generate_signed_url_helper(self, credentials=None):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_uri = blob.generate_signed_url(EXPIRATION,
credentials=credentials)
self.assertEqual(signed_uri, URI)
PATH = '/name/%s' % (BLOB_NAME,)
if credentials is None:
EXPECTED_ARGS = (_Connection.credentials,)
else:
EXPECTED_ARGS = (credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': PATH,
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_default_method(self):
self._basic_generate_signed_url_helper()
def test_generate_signed_url_w_content_type(self):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
CONTENT_TYPE = "text/html"
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_url = blob.generate_signed_url(EXPIRATION,
content_type=CONTENT_TYPE)
self.assertEqual(signed_url, URI)
PATH = '/name/%s' % (BLOB_NAME,)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': PATH,
'content_type': CONTENT_TYPE,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_credentials(self):
credentials = object()
self._basic_generate_signed_url_helper(credentials=credentials)
def test_generate_signed_url_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_url = blob.generate_signed_url(EXPIRATION)
self.assertEqual(signed_url, URI)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': '/name/parent%2Fchild',
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_method_arg(self):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_uri = blob.generate_signed_url(EXPIRATION, method='POST')
self.assertEqual(signed_uri, URI)
PATH = '/name/%s' % (BLOB_NAME,)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'POST',
'resource': PATH,
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_exists_miss(self):
NONESUCH = 'nonesuch'
not_found_response = ({'status': http_client.NOT_FOUND}, b'')
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(NONESUCH, bucket=bucket)
self.assertFalse(blob.exists())
def test_exists_hit(self):
BLOB_NAME = 'blob-name'
found_response = ({'status': http_client.OK}, b'')
connection = _Connection(found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
self.assertTrue(blob.exists())
def test_delete(self):
BLOB_NAME = 'blob-name'
not_found_response = ({'status': http_client.NOT_FOUND}, b'')
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
blob.delete()
self.assertFalse(blob.exists())
self.assertEqual(bucket._deleted, [(BLOB_NAME, None)])
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test__make_transport(self, fake_session_factory):
client = mock.Mock(spec=[u'_credentials'])
blob = self._make_one(u'blob-name', bucket=None)
transport = blob._make_transport(client)
self.assertIs(transport, fake_session_factory.return_value)
fake_session_factory.assert_called_once_with(client._credentials)
def test__get_download_url_with_media_link(self):
blob_name = 'something.txt'
bucket = mock.Mock(spec=[])
blob = self._make_one(blob_name, bucket=bucket)
media_link = 'http://test.invalid'
# Set the media link on the blob
blob._properties['mediaLink'] = media_link
download_url = blob._get_download_url()
self.assertEqual(download_url, media_link)
def test__get_download_url_on_the_fly(self):
blob_name = 'bzzz-fly.txt'
bucket = mock.Mock(path='/b/buhkit', spec=['path'])
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.media_link)
download_url = blob._get_download_url()
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'buhkit/o/bzzz-fly.txt?alt=media')
self.assertEqual(download_url, expected_url)
def test__get_download_url_on_the_fly_with_generation(self):
blob_name = 'pretend.txt'
bucket = mock.Mock(path='/b/fictional', spec=['path'])
blob = self._make_one(blob_name, bucket=bucket)
generation = 1493058489532987
# Set the media link on the blob
blob._properties['generation'] = str(generation)
self.assertIsNone(blob.media_link)
download_url = blob._get_download_url()
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'fictional/o/pretend.txt?alt=media&generation=1493058489532987')
self.assertEqual(download_url, expected_url)
@staticmethod
def _mock_requests_response(status_code, headers, content=b''):
import requests
response = requests.Response()
response.status_code = status_code
response.headers.update(headers)
response._content = content
response.request = requests.Request(
'POST', 'http://example.com').prepare()
return response
def _mock_download_transport(self):
fake_transport = mock.Mock(spec=['request'])
# Give the transport two fake responses.
chunk1_response = self._mock_requests_response(
http_client.PARTIAL_CONTENT,
{'content-length': '3', 'content-range': 'bytes 0-2/6'},
content=b'abc')
chunk2_response = self._mock_requests_response(
http_client.PARTIAL_CONTENT,
{'content-length': '3', 'content-range': 'bytes 3-5/6'},
content=b'def')
fake_transport.request.side_effect = [chunk1_response, chunk2_response]
return fake_transport
def _check_session_mocks(self, client, fake_session_factory,
expected_url, headers=None):
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
fake_transport = fake_session_factory.return_value
# Check that the transport was called exactly twice.
self.assertEqual(fake_transport.request.call_count, 2)
if headers is None:
headers = {}
# NOTE: bytes=0-2 never shows up because the mock was called with
# **MUTABLE** headers and it was mutated before the
# second request.
headers['range'] = 'bytes=3-5'
call = mock.call(
'GET', expected_url, data=None, headers=headers)
self.assertEqual(fake_transport.request.mock_calls, [call, call])
def test__do_download_simple(self):
blob_name = 'blob-name'
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Make sure this will not be chunked.
self.assertIsNone(blob.chunk_size)
transport = mock.Mock(spec=['request'])
transport.request.return_value = self._mock_requests_response(
http_client.OK,
{'content-length': '6', 'content-range': 'bytes 0-5/6'},
content=b'abcdef')
file_obj = io.BytesIO()
download_url = 'http://test.invalid'
headers = {}
blob._do_download(transport, file_obj, download_url, headers)
# Make sure the download was as expected.
self.assertEqual(file_obj.getvalue(), b'abcdef')
transport.request.assert_called_once_with(
'GET', download_url, data=None, headers=headers)
def test__do_download_chunked(self):
blob_name = 'blob-name'
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
transport = self._mock_download_transport()
file_obj = io.BytesIO()
download_url = 'http://test.invalid'
headers = {}
blob._do_download(transport, file_obj, download_url, headers)
# Make sure the download was as expected.
self.assertEqual(file_obj.getvalue(), b'abcdef')
# Check that the transport was called exactly twice.
self.assertEqual(transport.request.call_count, 2)
# ``headers`` was modified (in place) once for each API call.
self.assertEqual(headers, {'range': 'bytes=3-5'})
call = mock.call(
'GET', download_url, data=None, headers=headers)
self.assertEqual(transport.request.mock_calls, [call, call])
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_file_with_failure(self, fake_session_factory):
from google.cloud import exceptions
blob_name = 'blob-name'
transport = mock.Mock(spec=['request'])
bad_response_headers = {
'Content-Length': '9',
'Content-Type': 'text/html; charset=UTF-8',
}
transport.request.return_value = self._mock_requests_response(
http_client.NOT_FOUND, bad_response_headers, content=b'Not found')
fake_session_factory.return_value = transport
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Set the media link on the blob
blob._properties['mediaLink'] = 'http://test.invalid'
file_obj = io.BytesIO()
with self.assertRaises(exceptions.NotFound):
blob.download_to_file(file_obj)
self.assertEqual(file_obj.tell(), 0)
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
# Check that the transport was called once.
transport.request.assert_called_once_with(
'GET', blob.media_link, data=None, headers={})
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_file_wo_media_link(self, fake_session_factory):
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
self.assertEqual(file_obj.getvalue(), b'abcdef')
# Make sure the media link is still unknown.
self.assertIsNone(blob.media_link)
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'name/o/blob-name?alt=media')
self._check_session_mocks(client, fake_session_factory, expected_url)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def _download_to_file_helper(self, fake_session_factory, use_chunks=False):
blob_name = 'blob-name'
fake_transport = self._mock_download_transport()
fake_session_factory.return_value = fake_transport
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
if use_chunks:
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
else:
# Modify the response.
single_chunk_response = self._mock_requests_response(
http_client.OK,
{'content-length': '6', 'content-range': 'bytes 0-5/6'},
content=b'abcdef')
fake_transport.request.side_effect = [single_chunk_response]
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
self.assertEqual(file_obj.getvalue(), b'abcdef')
if use_chunks:
self._check_session_mocks(client, fake_session_factory, media_link)
else:
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
fake_transport.request.assert_called_once_with(
'GET', media_link, data=None, headers={})
def test_download_to_file_default(self):
self._download_to_file_helper()
def test_download_to_file_with_chunk_size(self):
self._download_to_file_helper(use_chunks=True)
def _download_to_filename_helper(self, fake_session_factory, updated=None):
import os
import time
from google.cloud._testing import _NamedTemporaryFile
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
if updated is not None:
properties['updated'] = updated
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
with open(temp.name, 'rb') as file_obj:
wrote = file_obj.read()
if updated is None:
self.assertIsNone(blob.updated)
else:
mtime = os.path.getmtime(temp.name)
updated_time = time.mktime(blob.updated.timetuple())
self.assertEqual(mtime, updated_time)
self.assertEqual(wrote, b'abcdef')
self._check_session_mocks(client, fake_session_factory, media_link)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename(self, fake_session_factory):
updated = '2014-12-06T13:13:50.690Z'
self._download_to_filename_helper(
fake_session_factory, updated=updated)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename_wo_updated(self, fake_session_factory):
self._download_to_filename_helper(fake_session_factory)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename_w_key(self, fake_session_factory):
import os
import time
from google.cloud._testing import _NamedTemporaryFile
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link,
'updated': '2014-12-06T13:13:50.690Z'}
key = b'aa426195405adee2c8081bb9e7e74b19'
blob = self._make_one(
blob_name, bucket=bucket, properties=properties, encryption_key=key)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
with open(temp.name, 'rb') as file_obj:
wrote = file_obj.read()
mtime = os.path.getmtime(temp.name)
updated_time = time.mktime(blob.updated.timetuple())
self.assertEqual(wrote, b'abcdef')
self.assertEqual(mtime, updated_time)
header_key_value = 'YWE0MjYxOTU0MDVhZGVlMmM4MDgxYmI5ZTdlNzRiMTk='
header_key_hash_value = 'V3Kwe46nKc3xLv96+iJ707YfZfFvlObta8TQcx2gpm0='
key_headers = {
'X-Goog-Encryption-Key-Sha256': header_key_hash_value,
'X-Goog-Encryption-Algorithm': 'AES256',
'X-Goog-Encryption-Key': header_key_value,
}
self._check_session_mocks(
client, fake_session_factory, media_link, headers=key_headers)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_as_string(self, fake_session_factory):
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
fetched = blob.download_as_string()
self.assertEqual(fetched, b'abcdef')
self._check_session_mocks(client, fake_session_factory, media_link)
def test__get_content_type_explicit(self):
blob = self._make_one(u'blob-name', bucket=None)
content_type = u'text/plain'
return_value = blob._get_content_type(content_type)
self.assertEqual(return_value, content_type)
def test__get_content_type_from_blob(self):
blob = self._make_one(u'blob-name', bucket=None)
blob.content_type = u'video/mp4'
return_value = blob._get_content_type(None)
self.assertEqual(return_value, blob.content_type)
def test__get_content_type_from_filename(self):
blob = self._make_one(u'blob-name', bucket=None)
return_value = blob._get_content_type(None, filename='archive.tar')
self.assertEqual(return_value, 'application/x-tar')
def test__get_content_type_default(self):
blob = self._make_one(u'blob-name', bucket=None)
return_value = blob._get_content_type(None)
self.assertEqual(return_value, u'application/octet-stream')
def test__get_writable_metadata_no_changes(self):
name = u'blob-name'
blob = self._make_one(name, bucket=None)
object_metadata = blob._get_writable_metadata()
expected = {'name': name}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_with_changes(self):
name = u'blob-name'
blob = self._make_one(name, bucket=None)
blob.storage_class = 'NEARLINE'
blob.cache_control = 'max-age=3600'
blob.metadata = {'color': 'red'}
object_metadata = blob._get_writable_metadata()
expected = {
'cacheControl': blob.cache_control,
'metadata': blob.metadata,
'name': name,
'storageClass': blob.storage_class,
}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_unwritable_field(self):
name = u'blob-name'
properties = {'updated': '2016-10-16T18:18:18.181Z'}
blob = self._make_one(name, bucket=None, properties=properties)
# Fake that `updated` is in changes.
blob._changes.add('updated')
object_metadata = blob._get_writable_metadata()
expected = {'name': name}
self.assertEqual(object_metadata, expected)
def test__get_upload_arguments(self):
name = u'blob-name'
key = b'[pXw@,p@@AfBfrR3x-2b2SCHR,.?YwRO'
blob = self._make_one(name, bucket=None, encryption_key=key)
blob.content_disposition = 'inline'
content_type = u'image/jpeg'
info = blob._get_upload_arguments(content_type)
headers, object_metadata, new_content_type = info
header_key_value = 'W3BYd0AscEBAQWZCZnJSM3gtMmIyU0NIUiwuP1l3Uk8='
header_key_hash_value = 'G0++dxF4q5rG4o9kE8gvEKn15RH6wLm0wXV1MgAlXOg='
expected_headers = {
'X-Goog-Encryption-Algorithm': 'AES256',
'X-Goog-Encryption-Key': header_key_value,
'X-Goog-Encryption-Key-Sha256': header_key_hash_value,
}
self.assertEqual(headers, expected_headers)
expected_metadata = {
'contentDisposition': blob.content_disposition,
'name': name,
}
self.assertEqual(object_metadata, expected_metadata)
self.assertEqual(new_content_type, content_type)
def _mock_transport(self, status_code, headers, content=b''):
fake_transport = mock.Mock(spec=['request'])
fake_response = self._mock_requests_response(
status_code, headers, content=content)
fake_transport.request.return_value = fake_response
return fake_transport
def _do_multipart_success(self, mock_get_boundary, size=None,
num_retries=None):
bucket = mock.Mock(path='/b/w00t', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
self.assertIsNone(blob.chunk_size)
# Create mocks to be checked for doing transport.
fake_transport = self._mock_transport(http_client.OK, {})
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments.
client = mock.sentinel.client
data = b'data here hear hier'
stream = io.BytesIO(data)
content_type = u'application/xml'
response = blob._do_multipart_upload(
client, stream, content_type, size, num_retries)
# Check the mocks and the returned value.
self.assertIs(response, fake_transport.request.return_value)
if size is None:
data_read = data
self.assertEqual(stream.tell(), len(data))
else:
data_read = data[:size]
self.assertEqual(stream.tell(), size)
blob._make_transport.assert_called_once_with(client)
mock_get_boundary.assert_called_once_with()
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=multipart')
payload = (
b'--==0==\r\n' +
b'content-type: application/json; charset=UTF-8\r\n\r\n' +
b'{"name": "blob-name"}\r\n' +
b'--==0==\r\n' +
b'content-type: application/xml\r\n\r\n' +
data_read +
b'\r\n--==0==--')
headers = {'content-type': b'multipart/related; boundary="==0=="'}
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=headers)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_no_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_with_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, size=10)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_with_retry(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, num_retries=8)
def test__do_multipart_upload_bad_size(self):
blob = self._make_one(u'blob-name', bucket=None)
data = b'data here hear hier'
stream = io.BytesIO(data)
size = 50
self.assertGreater(size, len(data))
with self.assertRaises(ValueError) as exc_info:
blob._do_multipart_upload(None, stream, None, size, None)
exc_contents = str(exc_info.exception)
self.assertIn(
'was specified but the file-like object only had', exc_contents)
self.assertEqual(stream.tell(), len(data))
def _initiate_resumable_helper(self, size=None, extra_headers=None,
chunk_size=None, num_retries=None):
from google.resumable_media.requests import ResumableUpload
bucket = mock.Mock(path='/b/whammy', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
blob.metadata = {'rook': 'takes knight'}
blob.chunk_size = 3 * blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
# Need to make sure **same** dict is used because ``json.dumps()``
# will depend on the hash order.
object_metadata = blob._get_writable_metadata()
blob._get_writable_metadata = mock.Mock(
return_value=object_metadata, spec=[])
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=hey-you'
response_headers = {'location': resumable_url}
fake_transport = self._mock_transport(
http_client.OK, response_headers)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments and call the method under test.
client = mock.sentinel.client
data = b'hello hallo halo hi-low'
stream = io.BytesIO(data)
content_type = u'text/plain'
upload, transport = blob._initiate_resumable_upload(
client, stream, content_type, size, num_retries,
extra_headers=extra_headers, chunk_size=chunk_size)
# Check the returned values.
self.assertIsInstance(upload, ResumableUpload)
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=resumable')
self.assertEqual(upload.upload_url, upload_url)
if extra_headers is None:
self.assertEqual(upload._headers, {})
else:
self.assertEqual(upload._headers, extra_headers)
self.assertIsNot(upload._headers, extra_headers)
self.assertFalse(upload.finished)
if chunk_size is None:
self.assertEqual(upload._chunk_size, blob.chunk_size)
else:
self.assertNotEqual(blob.chunk_size, chunk_size)
self.assertEqual(upload._chunk_size, chunk_size)
self.assertIs(upload._stream, stream)
if size is None:
self.assertIsNone(upload._total_bytes)
else:
self.assertEqual(upload._total_bytes, size)
self.assertEqual(upload._content_type, content_type)
self.assertEqual(upload.resumable_url, resumable_url)
retry_strategy = upload._retry_strategy
self.assertEqual(retry_strategy.max_sleep, 64.0)
if num_retries is None:
self.assertEqual(retry_strategy.max_cumulative_retry, 600.0)
self.assertIsNone(retry_strategy.max_retries)
else:
self.assertIsNone(retry_strategy.max_cumulative_retry)
self.assertEqual(retry_strategy.max_retries, num_retries)
self.assertIs(transport, fake_transport)
# Make sure we never read from the stream.
self.assertEqual(stream.tell(), 0)
# Check the mocks.
blob._get_writable_metadata.assert_called_once_with()
blob._make_transport.assert_called_once_with(client)
payload = json.dumps(object_metadata).encode('utf-8')
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-type': content_type,
}
if size is not None:
expected_headers['x-upload-content-length'] = str(size)
if extra_headers is not None:
expected_headers.update(extra_headers)
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=expected_headers)
def test__initiate_resumable_upload_no_size(self):
self._initiate_resumable_helper()
def test__initiate_resumable_upload_with_size(self):
self._initiate_resumable_helper(size=10000)
def test__initiate_resumable_upload_with_chunk_size(self):
one_mb = 1048576
self._initiate_resumable_helper(chunk_size=one_mb)
def test__initiate_resumable_upload_with_extra_headers(self):
extra_headers = {'origin': 'http://not-in-kansas-anymore.invalid'}
self._initiate_resumable_helper(extra_headers=extra_headers)
def test__initiate_resumable_upload_with_retry(self):
self._initiate_resumable_helper(num_retries=11)
def _make_resumable_transport(self, headers1, headers2,
headers3, total_bytes):
from google import resumable_media
fake_transport = mock.Mock(spec=['request'])
fake_response1 = self._mock_requests_response(
http_client.OK, headers1)
fake_response2 = self._mock_requests_response(
resumable_media.PERMANENT_REDIRECT, headers2)
json_body = '{{"size": "{:d}"}}'.format(total_bytes)
fake_response3 = self._mock_requests_response(
http_client.OK, headers3,
content=json_body.encode('utf-8'))
responses = [fake_response1, fake_response2, fake_response3]
fake_transport.request.side_effect = responses
return fake_transport, responses
@staticmethod
def _do_resumable_upload_call0(blob, content_type, size=None):
# First mock transport.request() does initiates upload.
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
blob.bucket.path +
'/o?uploadType=resumable')
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-type': content_type,
}
if size is not None:
expected_headers['x-upload-content-length'] = str(size)
payload = json.dumps({'name': blob.name}).encode('utf-8')
return mock.call(
'POST', upload_url, data=payload, headers=expected_headers)
@staticmethod
def _do_resumable_upload_call1(blob, content_type, data,
resumable_url, size=None):
# Second mock transport.request() does sends first chunk.
if size is None:
content_range = 'bytes 0-{:d}/*'.format(blob.chunk_size - 1)
else:
content_range = 'bytes 0-{:d}/{:d}'.format(
blob.chunk_size - 1, size)
expected_headers = {
'content-type': content_type,
'content-range': content_range,
}
payload = data[:blob.chunk_size]
return mock.call(
'PUT', resumable_url, data=payload, headers=expected_headers)
@staticmethod
def _do_resumable_upload_call2(blob, content_type, data,
resumable_url, total_bytes):
# Third mock transport.request() does sends last chunk.
content_range = 'bytes {:d}-{:d}/{:d}'.format(
blob.chunk_size, total_bytes - 1, total_bytes)
expected_headers = {
'content-type': content_type,
'content-range': content_range,
}
payload = data[blob.chunk_size:]
return mock.call(
'PUT', resumable_url, data=payload, headers=expected_headers)
def _do_resumable_helper(self, use_size=False, num_retries=None):
bucket = mock.Mock(path='/b/yesterday', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
blob.chunk_size = blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
# Data to be uploaded.
data = b'<html>' + (b'A' * blob.chunk_size) + b'</html>'
total_bytes = len(data)
if use_size:
size = total_bytes
else:
size = None
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=and-then-there-was-1'
headers1 = {'location': resumable_url}
headers2 = {'range': 'bytes=0-{:d}'.format(blob.chunk_size - 1)}
fake_transport, responses = self._make_resumable_transport(
headers1, headers2, {}, total_bytes)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments and call the method under test.
client = mock.sentinel.client
stream = io.BytesIO(data)
content_type = u'text/html'
response = blob._do_resumable_upload(
client, stream, content_type, size, num_retries)
# Check the returned values.
self.assertIs(response, responses[2])
self.assertEqual(stream.tell(), total_bytes)
# Check the mocks.
blob._make_transport.assert_called_once_with(client)
call0 = self._do_resumable_upload_call0(blob, content_type, size=size)
call1 = self._do_resumable_upload_call1(
blob, content_type, data, resumable_url, size=size)
call2 = self._do_resumable_upload_call2(
blob, content_type, data, resumable_url, total_bytes)
self.assertEqual(
fake_transport.request.mock_calls, [call0, call1, call2])
def test__do_resumable_upload_no_size(self):
self._do_resumable_helper()
def test__do_resumable_upload_with_size(self):
self._do_resumable_helper(use_size=True)
def test__do_resumable_upload_with_retry(self):
self._do_resumable_helper(num_retries=6)
def _do_upload_helper(self, chunk_size=None, num_retries=None):
blob = self._make_one(u'blob-name', bucket=None)
# Create a fake response.
response = mock.Mock(spec=[u'json'])
response.json.return_value = mock.sentinel.json
# Mock **both** helpers.
blob._do_multipart_upload = mock.Mock(return_value=response, spec=[])
blob._do_resumable_upload = mock.Mock(return_value=response, spec=[])
if chunk_size is None:
self.assertIsNone(blob.chunk_size)
else:
blob.chunk_size = chunk_size
self.assertIsNotNone(blob.chunk_size)
client = mock.sentinel.client
stream = mock.sentinel.stream
content_type = u'video/mp4'
size = 12345654321
# Make the request and check the mocks.
created_json = blob._do_upload(
client, stream, content_type, size, num_retries)
self.assertIs(created_json, mock.sentinel.json)
response.json.assert_called_once_with()
if chunk_size is None:
blob._do_multipart_upload.assert_called_once_with(
client, stream, content_type, size, num_retries)
blob._do_resumable_upload.assert_not_called()
else:
blob._do_multipart_upload.assert_not_called()
blob._do_resumable_upload.assert_called_once_with(
client, stream, content_type, size, num_retries)
def test__do_upload_without_chunk_size(self):
self._do_upload_helper()
def test__do_upload_with_chunk_size(self):
chunk_size = 1024 * 1024 * 1024 # 1GB
self._do_upload_helper(chunk_size=chunk_size)
def test__do_upload_with_retry(self):
self._do_upload_helper(num_retries=20)
def _upload_from_file_helper(self, side_effect=None, **kwargs):
from google.cloud._helpers import UTC
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'updated': '2017-01-01T09:09:09.081Z'}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
if side_effect is not None:
blob._do_upload.side_effect = side_effect
# Make sure `updated` is empty before the request.
self.assertIsNone(blob.updated)
data = b'data is here'
stream = io.BytesIO(data)
stream.seek(2) # Not at zero.
content_type = u'font/woff'
client = mock.sentinel.client
ret_val = blob.upload_from_file(
stream, size=len(data), content_type=content_type,
client=client, **kwargs)
# Check the response and side-effects.
self.assertIsNone(ret_val)
new_updated = datetime.datetime(
2017, 1, 1, 9, 9, 9, 81000, tzinfo=UTC)
self.assertEqual(blob.updated, new_updated)
# Check the mock.
num_retries = kwargs.get('num_retries')
blob._do_upload.assert_called_once_with(
client, stream, content_type, len(data), num_retries)
return stream
def test_upload_from_file_success(self):
stream = self._upload_from_file_helper()
assert stream.tell() == 2
@mock.patch('warnings.warn')
def test_upload_from_file_with_retries(self, mock_warn):
from google.cloud.storage import blob as blob_module
self._upload_from_file_helper(num_retries=20)
mock_warn.assert_called_once_with(
blob_module._NUM_RETRIES_MESSAGE, DeprecationWarning)
def test_upload_from_file_with_rewind(self):
stream = self._upload_from_file_helper(rewind=True)
assert stream.tell() == 0
def test_upload_from_file_failure(self):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = b'Someone is already in this spot.'
response = requests.Response()
response._content = message
response.status_code = http_client.CONFLICT
response.request = requests.Request(
'POST', 'http://example.com').prepare()
side_effect = InvalidResponse(response)
with self.assertRaises(exceptions.Conflict) as exc_info:
self._upload_from_file_helper(side_effect=side_effect)
self.assertIn(message.decode('utf-8'), exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def _do_upload_mock_call_helper(self, blob, client, content_type, size):
self.assertEqual(blob._do_upload.call_count, 1)
mock_call = blob._do_upload.mock_calls[0]
call_name, pos_args, kwargs = mock_call
self.assertEqual(call_name, '')
self.assertEqual(len(pos_args), 5)
self.assertEqual(pos_args[0], client)
self.assertEqual(pos_args[2], content_type)
self.assertEqual(pos_args[3], size)
self.assertIsNone(pos_args[4]) # num_retries
self.assertEqual(kwargs, {})
return pos_args[1]
def test_upload_from_filename(self):
from google.cloud._testing import _NamedTemporaryFile
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'metadata': {'mint': 'ice-cream'}}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.metadata)
data = b'soooo much data'
content_type = u'image/svg+xml'
client = mock.sentinel.client
with _NamedTemporaryFile() as temp:
with open(temp.name, 'wb') as file_obj:
file_obj.write(data)
ret_val = blob.upload_from_filename(
temp.name, content_type=content_type, client=client)
# Check the response and side-effects.
self.assertIsNone(ret_val)
self.assertEqual(blob.metadata, created_json['metadata'])
# Check the mock.
stream = self._do_upload_mock_call_helper(
blob, client, content_type, len(data))
self.assertTrue(stream.closed)
self.assertEqual(stream.mode, 'rb')
self.assertEqual(stream.name, temp.name)
def _upload_from_string_helper(self, data, **kwargs):
from google.cloud._helpers import _to_bytes
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'componentCount': '5'}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.component_count)
client = mock.sentinel.client
ret_val = blob.upload_from_string(data, client=client, **kwargs)
# Check the response and side-effects.
self.assertIsNone(ret_val)
self.assertEqual(blob.component_count, 5)
# Check the mock.
payload = _to_bytes(data, encoding='utf-8')
stream = self._do_upload_mock_call_helper(
blob, client, 'text/plain', len(payload))
self.assertIsInstance(stream, io.BytesIO)
self.assertEqual(stream.getvalue(), payload)
def test_upload_from_string_w_bytes(self):
data = b'XB]jb\xb8tad\xe0'
self._upload_from_string_helper(data)
def test_upload_from_string_w_text(self):
data = u'\N{snowman} \N{sailboat}'
self._upload_from_string_helper(data)
def _create_resumable_upload_session_helper(self, origin=None,
side_effect=None):
bucket = mock.Mock(path='/b/alex-trebek', spec=[u'path'])
blob = self._make_one('blob-name', bucket=bucket)
chunk_size = 99 * blob._CHUNK_SIZE_MULTIPLE
blob.chunk_size = chunk_size
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=clean-up-everybody'
response_headers = {'location': resumable_url}
fake_transport = self._mock_transport(
http_client.OK, response_headers)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
if side_effect is not None:
fake_transport.request.side_effect = side_effect
# Create some mock arguments and call the method under test.
content_type = u'text/plain'
size = 10000
client = mock.sentinel.client
new_url = blob.create_resumable_upload_session(
content_type=content_type, size=size,
origin=origin, client=client)
# Check the returned value and (lack of) side-effect.
self.assertEqual(new_url, resumable_url)
self.assertEqual(blob.chunk_size, chunk_size)
# Check the mocks.
blob._make_transport.assert_called_once_with(client)
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=resumable')
payload = b'{"name": "blob-name"}'
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-length': str(size),
'x-upload-content-type': content_type,
}
if origin is not None:
expected_headers['Origin'] = origin
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=expected_headers)
def test_create_resumable_upload_session(self):
self._create_resumable_upload_session_helper()
def test_create_resumable_upload_session_with_origin(self):
self._create_resumable_upload_session_helper(
origin='http://google.com')
def test_create_resumable_upload_session_with_failure(self):
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = b'5-oh-3 woe is me.'
response = self._mock_requests_response(
content=message, status_code=http_client.SERVICE_UNAVAILABLE,
headers={})
side_effect = InvalidResponse(response)
with self.assertRaises(exceptions.ServiceUnavailable) as exc_info:
self._create_resumable_upload_session_helper(
side_effect=side_effect)
self.assertIn(message.decode('utf-8'), exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def test_get_iam_policy(self):
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.cloud.iam import Policy
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
ETAG = 'DEADBEEF'
VERSION = 17
OWNER1 = 'user:phred@example.com'
OWNER2 = 'group:cloud-logs@google.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
RETURNED = {
'resourceId': PATH,
'etag': ETAG,
'version': VERSION,
'bindings': [
{'role': STORAGE_OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': STORAGE_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': STORAGE_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
],
}
after = ({'status': http_client.OK}, RETURNED)
EXPECTED = {
binding['role']: set(binding['members'])
for binding in RETURNED['bindings']}
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
policy = blob.get_iam_policy()
self.assertIsInstance(policy, Policy)
self.assertEqual(policy.etag, RETURNED['etag'])
self.assertEqual(policy.version, RETURNED['version'])
self.assertEqual(dict(policy), EXPECTED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '%s/iam' % (PATH,))
def test_set_iam_policy(self):
import operator
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.cloud.iam import Policy
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
ETAG = 'DEADBEEF'
VERSION = 17
OWNER1 = 'user:phred@example.com'
OWNER2 = 'group:cloud-logs@google.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
BINDINGS = [
{'role': STORAGE_OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': STORAGE_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': STORAGE_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
]
RETURNED = {
'etag': ETAG,
'version': VERSION,
'bindings': BINDINGS,
}
after = ({'status': http_client.OK}, RETURNED)
policy = Policy()
for binding in BINDINGS:
policy[binding['role']] = binding['members']
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
returned = blob.set_iam_policy(policy)
self.assertEqual(returned.etag, ETAG)
self.assertEqual(returned.version, VERSION)
self.assertEqual(dict(returned), dict(policy))
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PUT')
self.assertEqual(kw[0]['path'], '%s/iam' % (PATH,))
sent = kw[0]['data']
self.assertEqual(sent['resourceId'], PATH)
self.assertEqual(len(sent['bindings']), len(BINDINGS))
key = operator.itemgetter('role')
for found, expected in zip(
sorted(sent['bindings'], key=key),
sorted(BINDINGS, key=key)):
self.assertEqual(found['role'], expected['role'])
self.assertEqual(
sorted(found['members']), sorted(expected['members']))
def test_test_iam_permissions(self):
from google.cloud.storage.iam import STORAGE_OBJECTS_LIST
from google.cloud.storage.iam import STORAGE_BUCKETS_GET
from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
PERMISSIONS = [
STORAGE_OBJECTS_LIST,
STORAGE_BUCKETS_GET,
STORAGE_BUCKETS_UPDATE,
]
ALLOWED = PERMISSIONS[1:]
RETURNED = {'permissions': ALLOWED}
after = ({'status': http_client.OK}, RETURNED)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
allowed = blob.test_iam_permissions(PERMISSIONS)
self.assertEqual(allowed, ALLOWED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '%s/iam/testPermissions' % (PATH,))
self.assertEqual(kw[0]['query_params'], {'permissions': PERMISSIONS})
def test_make_public(self):
from google.cloud.storage.acl import _ACLEntity
BLOB_NAME = 'blob-name'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after = ({'status': http_client.OK}, {'acl': permissive})
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.acl.loaded = True
blob.make_public()
self.assertEqual(list(blob.acl), permissive)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME)
self.assertEqual(kw[0]['data'], {'acl': permissive})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_compose_wo_content_type_set(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
with self.assertRaises(ValueError):
destination.compose(sources=[source_1, source_2])
def test_compose_minimal(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
RESOURCE = {
'etag': 'DEADBEEF'
}
after = ({'status': http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = 'text/plain'
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, 'DEADBEEF')
SENT = {
'sourceObjects': [
{'name': source_1.name},
{'name': source_2.name},
],
'destination': {
'contentType': 'text/plain',
},
}
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], '/b/name/o/%s/compose' % DESTINATION)
self.assertEqual(kw[0]['data'], SENT)
def test_compose_w_additional_property_changes(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
RESOURCE = {
'etag': 'DEADBEEF'
}
after = ({'status': http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = 'text/plain'
destination.content_language = 'en-US'
destination.metadata = {'my-key': 'my-value'}
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, 'DEADBEEF')
SENT = {
'sourceObjects': [
{'name': source_1.name},
{'name': source_2.name},
],
'destination': {
'contentType': 'text/plain',
'contentLanguage': 'en-US',
'metadata': {
'my-key': 'my-value',
}
},
}
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], '/b/name/o/%s/compose' % DESTINATION)
self.assertEqual(kw[0]['data'], SENT)
def test_rewrite_response_without_resource(self):
SOURCE_BLOB = 'source'
DEST_BLOB = 'dest'
DEST_BUCKET = 'other-bucket'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 33,
'objectSize': 42,
'done': False,
'rewriteToken': TOKEN,
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
def test_rewrite_other_bucket_other_name_no_encryption_partial(self):
SOURCE_BLOB = 'source'
DEST_BLOB = 'dest'
DEST_BUCKET = 'other-bucket'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 33,
'objectSize': 42,
'done': False,
'rewriteToken': TOKEN,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/%s/o/%s' % (
SOURCE_BLOB, DEST_BUCKET, DEST_BLOB)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertNotIn('X-Goog-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Encryption-Key', headers)
self.assertNotIn('X-Goog-Encryption-Key-Sha256', headers)
def test_rewrite_same_name_no_old_key_new_key_done(self):
import base64
import hashlib
KEY = b'01234567890123456789012345678901' # 32 bytes
KEY_B64 = base64.b64encode(KEY).rstrip().decode('ascii')
KEY_HASH = hashlib.sha256(KEY).digest()
KEY_HASH_B64 = base64.b64encode(KEY_HASH).rstrip().decode('ascii')
BLOB_NAME = 'blob'
RESPONSE = {
'totalBytesRewritten': 42,
'objectSize': 42,
'done': True,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
plain = self._make_one(BLOB_NAME, bucket=bucket)
encrypted = self._make_one(BLOB_NAME, bucket=bucket,
encryption_key=KEY)
token, rewritten, size = encrypted.rewrite(plain)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertEqual(headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(headers['X-Goog-Encryption-Key'], KEY_B64)
self.assertEqual(headers['X-Goog-Encryption-Key-Sha256'], KEY_HASH_B64)
def test_rewrite_same_name_no_key_new_key_w_token(self):
import base64
import hashlib
SOURCE_KEY = b'01234567890123456789012345678901' # 32 bytes
SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode('ascii')
SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest()
SOURCE_KEY_HASH_B64 = base64.b64encode(
SOURCE_KEY_HASH).rstrip().decode('ascii')
DEST_KEY = b'90123456789012345678901234567890' # 32 bytes
DEST_KEY_B64 = base64.b64encode(DEST_KEY).rstrip().decode('ascii')
DEST_KEY_HASH = hashlib.sha256(DEST_KEY).digest()
DEST_KEY_HASH_B64 = base64.b64encode(
DEST_KEY_HASH).rstrip().decode('ascii')
BLOB_NAME = 'blob'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 42,
'objectSize': 42,
'done': True,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
source = self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY)
dest = self._make_one(BLOB_NAME, bucket=bucket,
encryption_key=DEST_KEY)
token, rewritten, size = dest.rewrite(source, token=TOKEN)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {'rewriteToken': TOKEN})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key'], SOURCE_KEY_B64)
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key-Sha256'],
SOURCE_KEY_HASH_B64)
self.assertEqual(
headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Encryption-Key'], DEST_KEY_B64)
self.assertEqual(
headers['X-Goog-Encryption-Key-Sha256'], DEST_KEY_HASH_B64)
def test_update_storage_class_invalid(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
with self.assertRaises(ValueError):
blob.update_storage_class(u'BOGUS')
def test_update_storage_class_wo_encryption_key(self):
BLOB_NAME = 'blob-name'
STORAGE_CLASS = u'NEARLINE'
RESPONSE = {
'resource': {'storageClass': STORAGE_CLASS},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.update_storage_class('NEARLINE')
self.assertEqual(blob.storage_class, 'NEARLINE')
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertNotIn('query_params', kw[0])
SENT = {'storageClass': STORAGE_CLASS}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
# Blob has no key, and therefore the relevant headers are not sent.
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertNotIn('X-Goog-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Encryption-Key', headers)
self.assertNotIn('X-Goog-Encryption-Key-Sha256', headers)
def test_update_storage_class_w_encryption_key(self):
import base64
import hashlib
BLOB_NAME = 'blob-name'
BLOB_KEY = b'01234567890123456789012345678901' # 32 bytes
BLOB_KEY_B64 = base64.b64encode(BLOB_KEY).rstrip().decode('ascii')
BLOB_KEY_HASH = hashlib.sha256(BLOB_KEY).digest()
BLOB_KEY_HASH_B64 = base64.b64encode(
BLOB_KEY_HASH).rstrip().decode('ascii')
STORAGE_CLASS = u'NEARLINE'
RESPONSE = {
'resource': {'storageClass': STORAGE_CLASS},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=BLOB_KEY)
blob.update_storage_class('NEARLINE')
self.assertEqual(blob.storage_class, 'NEARLINE')
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertNotIn('query_params', kw[0])
SENT = {'storageClass': STORAGE_CLASS}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
# Blob has key, and therefore the relevant headers are sent.
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key'], BLOB_KEY_B64)
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key-Sha256'],
BLOB_KEY_HASH_B64)
self.assertEqual(
headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Encryption-Key'], BLOB_KEY_B64)
self.assertEqual(
headers['X-Goog-Encryption-Key-Sha256'], BLOB_KEY_HASH_B64)
def test_cache_control_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CACHE_CONTROL = 'no-cache'
properties = {'cacheControl': CACHE_CONTROL}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_cache_control_setter(self):
BLOB_NAME = 'blob-name'
CACHE_CONTROL = 'no-cache'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.cache_control)
blob.cache_control = CACHE_CONTROL
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_component_count(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'componentCount': COMPONENT_COUNT})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_component_count_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.component_count)
def test_component_count_string_val(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one(
'blob-name', bucket=BUCKET,
properties={'componentCount': str(COMPONENT_COUNT)})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_content_disposition_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
properties = {'contentDisposition': CONTENT_DISPOSITION}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_disposition_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_disposition)
blob.content_disposition = CONTENT_DISPOSITION
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_encoding_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_ENCODING = 'gzip'
properties = {'contentEncoding': CONTENT_ENCODING}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_encoding_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_ENCODING = 'gzip'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_encoding)
blob.content_encoding = CONTENT_ENCODING
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_language_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_LANGUAGE = 'pt-BR'
properties = {'contentLanguage': CONTENT_LANGUAGE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_language_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_LANGUAGE = 'pt-BR'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_language)
blob.content_language = CONTENT_LANGUAGE
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_type_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_TYPE = 'image/jpeg'
properties = {'contentType': CONTENT_TYPE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_content_type_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_TYPE = 'image/jpeg'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_type)
blob.content_type = CONTENT_TYPE
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_crc32c_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CRC32C = 'DEADBEEF'
properties = {'crc32c': CRC32C}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.crc32c, CRC32C)
def test_crc32c_setter(self):
BLOB_NAME = 'blob-name'
CRC32C = 'DEADBEEF'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.crc32c)
blob.crc32c = CRC32C
self.assertEqual(blob.crc32c, CRC32C)
def test_etag(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ETAG = 'ETAG'
properties = {'etag': ETAG}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.etag, ETAG)
def test_generation(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'generation': GENERATION})
self.assertEqual(blob.generation, GENERATION)
def test_generation_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.generation)
def test_generation_string_val(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'generation': str(GENERATION)})
self.assertEqual(blob.generation, GENERATION)
def test_id(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ID = 'ID'
properties = {'id': ID}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.id, ID)
def test_md5_hash_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MD5_HASH = 'DEADBEEF'
properties = {'md5Hash': MD5_HASH}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_md5_hash_setter(self):
BLOB_NAME = 'blob-name'
MD5_HASH = 'DEADBEEF'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.md5_hash)
blob.md5_hash = MD5_HASH
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_media_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.media_link, MEDIA_LINK)
def test_metadata_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
METADATA = {'foo': 'Foo'}
properties = {'metadata': METADATA}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.metadata, METADATA)
def test_metadata_setter(self):
BLOB_NAME = 'blob-name'
METADATA = {'foo': 'Foo'}
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.metadata)
blob.metadata = METADATA
self.assertEqual(blob.metadata, METADATA)
def test_metageneration(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'metageneration': METAGENERATION})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_metageneration_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.metageneration)
def test_metageneration_string_val(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one(
'blob-name', bucket=BUCKET,
properties={'metageneration': str(METAGENERATION)})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_owner(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'}
properties = {'owner': OWNER}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
owner = blob.owner
self.assertEqual(owner['entity'], 'project-owner-12345')
self.assertEqual(owner['entityId'], '23456')
def test_self_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
SELF_LINK = 'http://example.com/self/'
properties = {'selfLink': SELF_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.self_link, SELF_LINK)
def test_size(self):
BUCKET = object()
SIZE = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'size': SIZE})
self.assertEqual(blob.size, SIZE)
def test_size_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.size)
def test_size_string_val(self):
BUCKET = object()
SIZE = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'size': str(SIZE)})
self.assertEqual(blob.size, SIZE)
def test_storage_class_getter(self):
blob_name = 'blob-name'
bucket = _Bucket()
storage_class = 'MULTI_REGIONAL'
properties = {'storageClass': storage_class}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
self.assertEqual(blob.storage_class, storage_class)
def test_storage_class_setter(self):
blob_name = 'blob-name'
bucket = _Bucket()
storage_class = 'COLDLINE'
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.storage_class)
blob.storage_class = storage_class
self.assertEqual(blob.storage_class, storage_class)
self.assertEqual(blob._properties, {'storageClass': storage_class})
def test_time_deleted(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeDeleted': TIME_DELETED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_deleted, TIMESTAMP)
def test_time_deleted_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.time_deleted)
def test_time_created(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeCreated': TIME_CREATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_created, TIMESTAMP)
def test_time_created_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.time_created)
def test_updated(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'updated': UPDATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.updated, TIMESTAMP)
def test_updated_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.updated)
class Test__quote(unittest.TestCase):
@staticmethod
def _call_fut(value):
from google.cloud.storage.blob import _quote
return _quote(value)
def test_bytes(self):
quoted = self._call_fut(b'\xDE\xAD\xBE\xEF')
self.assertEqual(quoted, '%DE%AD%BE%EF')
def test_unicode(self):
helicopter = u'\U0001f681'
quoted = self._call_fut(helicopter)
self.assertEqual(quoted, '%F0%9F%9A%81')
def test_bad_type(self):
with self.assertRaises(TypeError):
self._call_fut(None)
class Test__maybe_rewind(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _maybe_rewind
return _maybe_rewind(*args, **kwargs)
def test_default(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_not_rewind(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream, rewind=False)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_rewind(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream, rewind=True)
self.assertIsNone(ret_val)
stream.seek.assert_called_once_with(0, os.SEEK_SET)
class Test__raise_from_invalid_response(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _raise_from_invalid_response
return _raise_from_invalid_response(*args, **kwargs)
def _helper(self, message, **kwargs):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
response = requests.Response()
response.request = requests.Request(
'GET', 'http://example.com').prepare()
response.status_code = http_client.BAD_REQUEST
response._content = message
error = InvalidResponse(response)
with self.assertRaises(exceptions.BadRequest) as exc_info:
self._call_fut(error, **kwargs)
return exc_info
def test_default(self):
message = b'Failure'
exc_info = self._helper(message)
message_str = message.decode('utf-8')
expected = 'GET http://example.com/: {}'.format(message_str)
self.assertEqual(exc_info.exception.message, expected)
self.assertEqual(exc_info.exception.errors, [])
class _Connection(object):
API_BASE_URL = 'http://example.com'
USER_AGENT = 'testing 1.2.3'
credentials = object()
def __init__(self, *responses):
self._responses = responses[:]
self._requested = []
self._signed = []
def _respond(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
info, content = self._respond(**kw)
if info.get('status') == http_client.NOT_FOUND:
raise NotFound(info)
return content
class _Bucket(object):
def __init__(self, client=None, name='name'):
if client is None:
connection = _Connection()
client = _Client(connection)
self.client = client
self._blobs = {}
self._copied = []
self._deleted = []
self.name = name
self.path = '/b/' + name
def delete_blob(self, blob_name, client=None):
del self._blobs[blob_name]
self._deleted.append((blob_name, client))
class _Signer(object):
def __init__(self):
self._signed = []
def __call__(self, *args, **kwargs):
self._signed.append((args, kwargs))
return ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=%s' % kwargs.get('expiration'))
class _Client(object):
def __init__(self, connection):
self._base_connection = connection
@property
def _connection(self):
return self._base_connection
@property
def _credentials(self):
return self._base_connection.credentials
| storage/tests/unit/test_blob.py | 91,732 | Copyright 2014 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 32 bytes Set the media link on the blob Set the media link on the blob Give the transport two fake responses. Check that exactly one transport was created. Check that the transport was called exactly twice. NOTE: bytes=0-2 never shows up because the mock was called with **MUTABLE** headers and it was mutated before the second request. Create a fake client/bucket and use them in the Blob() constructor. Make sure this will not be chunked. Make sure the download was as expected. Create a fake client/bucket and use them in the Blob() constructor. Modify the blob so there there will be 2 chunks of size 3. Make sure the download was as expected. Check that the transport was called exactly twice. ``headers`` was modified (in place) once for each API call. Create a fake client/bucket and use them in the Blob() constructor. Set the media link on the blob Check that exactly one transport was created. Check that the transport was called once. Create a fake client/bucket and use them in the Blob() constructor. Modify the blob so there there will be 2 chunks of size 3. Make sure the media link is still unknown. Create a fake client/bucket and use them in the Blob() constructor. Modify the blob so there there will be 2 chunks of size 3. Modify the response. Check that exactly one transport was created. Create a fake client/bucket and use them in the Blob() constructor. Modify the blob so there there will be 2 chunks of size 3. Create a fake client/bucket and use them in the Blob() constructor. Modify the blob so there there will be 2 chunks of size 3. Create a fake client/bucket and use them in the Blob() constructor. Modify the blob so there there will be 2 chunks of size 3. Fake that `updated` is in changes. Create mocks to be checked for doing transport. Create some mock arguments. Check the mocks and the returned value. Need to make sure **same** dict is used because ``json.dumps()`` will depend on the hash order. Create mocks to be checked for doing transport. Create some mock arguments and call the method under test. Check the returned values. Make sure we never read from the stream. Check the mocks. First mock transport.request() does initiates upload. Second mock transport.request() does sends first chunk. Third mock transport.request() does sends last chunk. Data to be uploaded. Create mocks to be checked for doing transport. Create some mock arguments and call the method under test. Check the returned values. Check the mocks. Create a fake response. Mock **both** helpers. Make the request and check the mocks. 1GB Mock low-level upload helper on blob (it is tested elsewhere). Make sure `updated` is empty before the request. Not at zero. Check the response and side-effects. Check the mock. num_retries Mock low-level upload helper on blob (it is tested elsewhere). Make sure `metadata` is empty before the request. Check the response and side-effects. Check the mock. Mock low-level upload helper on blob (it is tested elsewhere). Make sure `metadata` is empty before the request. Check the response and side-effects. Check the mock. Create mocks to be checked for doing transport. Create some mock arguments and call the method under test. Check the returned value and (lack of) side-effect. Check the mocks. 32 bytes 32 bytes 32 bytes Blob has no key, and therefore the relevant headers are not sent. 32 bytes Blob has key, and therefore the relevant headers are sent. | 3,974 | en | 0.89547 |
class Hero:
def __init__(self,name,health,attackPower):
self.__name = name
self.__health = health
self.__attPower = attackPower
# getter
def getName(self):
return self.__name
def getHealth(self):
return self.__health
# setter
def diserang(self,serangPower):
self.__health -= serangPower
def setAttPower(self,nilaibaru):
self.__attPower = nilaibaru
# awal dari game
earthshaker = Hero("earthshaker",50, 5)
# game berjalan
print(earthshaker.getName())
print(earthshaker.getHealth())
earthshaker.diserang(5)
print(earthshaker.getHealth()) | Python OOP/test.py | 565 | getter setter awal dari game game berjalan | 42 | id | 0.559041 |
import argparse
import datasets
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help='Path to the directory with input dataset')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
dataset = datasets.load_from_disk(args.input).shuffle()
for part in dataset:
print()
print('part', part)
xs = []
ys = []
for i, x in enumerate(dataset[part]):
print(x['tse'], len(x['input_ids']))
xs.append(len(x['input_ids']))
ys.append(x['tse'])
if i >= 10000:
break
plt.clf()
plt.cla()
plt.title(f'{part} CDF')
# plt.xlabel('len')
# plt.ylabel('tse / len')
# plt.scatter(xs, ys)
# plt.hist(ys, bins=5000)
ys.sort()
ys = np.array(ys)
plt.plot(ys, np.arange(len(ys)))
plt.savefig(f'{part}.png')
| src/cluster/sort_dataset_by_column/test.py | 1,069 | plt.xlabel('len') plt.ylabel('tse / len') plt.scatter(xs, ys) plt.hist(ys, bins=5000) | 85 | hu | 0.06424 |
# coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from products_and_services_client.api_client import ApiClient
class InvoiceFinancingsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_business_invoice_financings(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_invoice_financings(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponseBusinessInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_business_invoice_financings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_business_invoice_financings_with_http_info(**kwargs) # noqa: E501
return data
def get_business_invoice_financings_with_http_info(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_invoice_financings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponseBusinessInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_business_invoice_financings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page-size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/business-invoice-financings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseBusinessInvoiceFinancings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_personal_invoice_financings(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_personal_invoice_financings(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponsePersonalInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_personal_invoice_financings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_personal_invoice_financings_with_http_info(**kwargs) # noqa: E501
return data
def get_personal_invoice_financings_with_http_info(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_personal_invoice_financings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponsePersonalInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_personal_invoice_financings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page-size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/personal-invoice-financings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponsePersonalInvoiceFinancings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| products_and_services_client/api/invoice_financings_api.py | 8,940 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_invoice_financings(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponseBusinessInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_invoice_financings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponseBusinessInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_personal_invoice_financings(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponsePersonalInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_personal_invoice_financings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponsePersonalInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 python 2 and python 3 compatibility library noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 | 3,572 | pt | 0.503537 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime
import itertools
import pytest
from numpy.random import randn
from numpy import nan
import numpy as np
from pandas.compat import u
from pandas import (DataFrame, Index, Series, MultiIndex, date_range,
Timedelta, Period)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
with catch_warnings(record=True):
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
tm.assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with tm.assert_raises_regex(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('w', 'b', 'j')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in NaN entries similar to above
result = data.unstack(fill_value='d')
assert_frame_equal(result, expected)
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
pytest.raises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_unused_levels(self):
# GH 17845: unused labels in index make unstack() cast int to float
idx = pd.MultiIndex.from_product([['a'], ['A', 'B', 'C', 'D']])[:-1]
df = pd.DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = pd.MultiIndex.from_product([[0, 1], ['A', 'B', 'C']])
expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=['a'],
columns=exp_col)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = pd.MultiIndex(levels, labels)
block = np.arange(4).reshape(2, 2)
df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = pd.DataFrame(np.concatenate([block * 2, block * 2 + 1],
axis=1),
columns=idx)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# With mixed dtype and NaN
levels = [['a', 2, 'c'], [1, 3, 5, 7]]
labels = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = pd.MultiIndex(levels, labels)
data = np.arange(8)
df = pd.DataFrame(data.reshape(4, 2), index=idx)
cases = ((0, [13, 16, 6, 9, 2, 5, 8, 11],
[np.nan, 'a', 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16],
[np.nan, 5, 1], [np.nan, 'a', 2]))
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(exp_data.reshape(3, 6),
index=idx_level, columns=cols)
# Broken (GH 18455):
# tm.assert_frame_equal(result, expected)
diff = result - expected
assert(diff.sum().sum() == 0)
assert((diff + 1).sum().sum() == 8)
assert((result.columns.levels[1] == idx.levels[level]).all())
@pytest.mark.parametrize("cols", [['A', 'C'], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused labels on the unstacked level
df = pd.DataFrame([[2010, 'a', 'I'],
[2011, 'b', 'II']],
columns=['A', 'B', 'C'])
ind = df.set_index(['A', 'B', 'C'], drop=False)
selection = ind.loc[(slice(None), slice(None), 'I'), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product([expected.columns, ['I']],
names=[None, 'C'])
expected.index = expected.index.droplevel('C')
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C': np.arange(10),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(
full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3),
columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(
levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1],
[1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_stack_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
for labels in [list("yxz"), list("yxy")]:
cidx = pd.CategoricalIndex(labels, categories=list("xyz"),
ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MutliIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = pd.Series(['a', 'b', 'c', 'a'], dtype='object')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = pd.DataFrame(
{'a': ['a', np.nan, 'a'], 'b': ['b', 'c', np.nan]},
index=list('xyz')
)
assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value='d')
expected = pd.DataFrame(
{'a': ['a', 'd', 'a'], 'b': ['b', 'c', 'd']},
index=list('xyz')
)
assert_frame_equal(result, expected)
| pandas/tests/frame/test_reshape.py | 36,111 | -*- coding: utf-8 -*- name tracking don't specify values pivot multiple columns gh-3962 omit values GH 18310 flat columns: MultiIndex columns: as above, but used labels in level are actually of homogeneous type GH 9746: fill_value keyword argument for Series and DataFrame unstack From a series From a series with incorrect data type for fill_value GH 13971: fill_value when unstacking multiple levels: Workaround for GH 17886 (unnecessarily casts to float): From a series From a dataframe From a mixed type dataframe From a dataframe with incorrect data type for fill_value Test unstacking with date times Test unstacking with time deltas Test unstacking with period Test unstacking with categorical By default missing values will be NaN Fill with non-category results in NaN entries similar to above Fill with category value replaces missing values as expected Checks fix for 11847 GH 8584: Need to check that stacking works when a number is passed that is both a level name and in the range of the level numbers When mixed types are passed and the ints are not level names, raise GH 8584: Having 0 in the level names could raise a strange error about lexsort depth Out-of-order int column names GH9856 check reversibility check NA handling check composability of unstack GH 2929 single dtype mixed GH7405 GH 17845: unused labels in index make unstack() cast int to float Unused items on both levels With mixed dtype and NaN Broken (GH 18455): tm.assert_frame_equal(result, expected) GH 18562 : unused labels on the unstacked level GH7466 GH7403 GH7401 GH4862 GH9497 - multiple unstack with nulls GH 8039 GH 8844 Stacking a single level should not make any all-NaN rows, so df.stack(level=level, dropna=False) should be the same as df.stack(level=level, dropna=True). GH13854 `MutliIndex.from_product` preserves categorical dtype - it's tested elsewhere. GH12815 Test unstacking with object. By default missing values will be NaN Fill with any value replaces missing values as expected | 1,987 | en | 0.6918 |
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
all_raw = open(sys.argv[1], 'r')
# init empty lists
cell0v = []
cell1v = []
cell2v = []
cell3v = []
totalv = []
# Process data into lists
for line in all_raw:
if 'voltage cell 0: ' in line:
try:
cell0v.append(float(line.replace('voltage cell 0: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 1: ' in line:
try:
cell1v.append(float(line.replace('voltage cell 1: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 2: ' in line:
try:
cell2v.append(float(line.replace('voltage cell 2: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 3: ' in line:
try:
cell3v.append(float(line.replace('voltage cell 3: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage total: ' in line:
try:
totalv.append(float(line.replace('voltage total: ', '')[:-4]))
except:
print('Malformed data: ' + line)
# Write images
# Total voltage of pack
plt.figure(figsize=(15, 15))
plt.tight_layout()
plt.plot(totalv)
plt.savefig(sys.argv[1]+'_total_voltage.png')
plt.clf()
# Cells
plt.figure(figsize=(15, 15))
plt.tight_layout()
plt.plot(cell0v, color='blue')
plt.plot(cell1v, color='red')
plt.plot(cell2v, color='green')
plt.plot(cell3v, color='cyan')
plt.xlabel('C0 = blue C1 = red C2 = green C3 = cyan')
plt.savefig(sys.argv[1]+'_cell_voltage.png')
| plot_battery.py | 1,626 | !/usr/bin/env python init empty lists Process data into lists Write images Total voltage of pack Cells | 102 | en | 0.529774 |
import time
from messaging_pyx import Context, Poller, SubSocket, PubSocket # pylint: disable=no-name-in-module, import-error
MSGS = 1e5
if __name__ == "__main__":
c = Context()
sub_sock = SubSocket()
pub_sock = PubSocket()
sub_sock.connect(c, "controlsState")
pub_sock.connect(c, "controlsState")
poller = Poller()
poller.registerSocket(sub_sock)
t = time.time()
for i in range(int(MSGS)):
bts = i.to_bytes(4, 'little')
pub_sock.send(bts)
for s in poller.poll(100):
dat = s.receive()
ii = int.from_bytes(dat, 'little')
assert(i == ii)
dt = time.time() - t
print("%.1f msg/s" % (MSGS / dt))
| selfdrive/messaging/demo.py | 652 | pylint: disable=no-name-in-module, import-error | 47 | en | 0.234269 |
###########################
#
# #21 Amicable numbers - Project Euler
# https://projecteuler.net/problem=21
#
# Code by Kevin Marciniak
#
###########################
def sumproperdivisors(num):
sum = 0
for x in range(1, int((num / 2)) + 1):
if num % x == 0:
sum += x
return sum
amicableList = []
for x in range(0, 10000):
temp = sumproperdivisors(x)
if sumproperdivisors(temp) == x and sumproperdivisors(x) == temp and temp != x:
if x not in amicableList and temp not in amicableList:
amicableList.append(x)
amicableList.append(temp)
totalSum = 0
for y in range(0, len(amicableList)):
totalSum += amicableList[y]
print(totalSum)
| problem0021.py | 712 | 21 Amicable numbers - Project Euler https://projecteuler.net/problem=21 Code by Kevin Marciniak | 95 | en | 0.362048 |
import threading
from typing import Callable, List, MutableMapping, NamedTuple
from dagster import check
from dagster.core.events.log import EventLogEntry
from .sql_event_log import SqlEventLogStorage
POLLING_CADENCE = 0.1 # 100 ms
class CallbackAfterCursor(NamedTuple):
"""Callback passed from Observer class in event polling
start_cursor (int): Only process EventLogEntrys with an id >= start_cursor
(earlier ones have presumably already been processed)
callback (Callable[[EventLogEntry], None]): callback passed from Observer
to call on new EventLogEntrys
"""
start_cursor: int
callback: Callable[[EventLogEntry], None]
class SqlPollingEventWatcher:
"""Event Log Watcher that uses a multithreaded polling approach to retrieving new events for run_ids
This class' job is to manage a collection of threads that each poll the event log for a given run_id
Uses one thread (SqlPollingRunIdEventWatcherThread) per watched run_id
LOCKING INFO:
ORDER: _dict_lock -> run_id_thread.callback_fn_list_lock
INVARIANTS: _dict_lock protects _run_id_to_watcher_dict
"""
def __init__(self, event_log_storage: SqlEventLogStorage):
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqlEventLogStorage
)
# INVARIANT: dict_lock protects _run_id_to_watcher_dict
self._dict_lock: threading.Lock = threading.Lock()
self._run_id_to_watcher_dict: MutableMapping[str, SqlPollingRunIdEventWatcherThread] = {}
self._disposed = False
def has_run_id(self, run_id: str) -> bool:
run_id = check.str_param(run_id, "run_id")
with self._dict_lock:
_has_run_id = run_id in self._run_id_to_watcher_dict
return _has_run_id
def watch_run(self, run_id: str, start_cursor: int, callback: Callable[[EventLogEntry], None]):
run_id = check.str_param(run_id, "run_id")
start_cursor = check.int_param(start_cursor, "start_cursor")
callback = check.callable_param(callback, "callback")
with self._dict_lock:
if run_id not in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id] = SqlPollingRunIdEventWatcherThread(
self._event_log_storage, run_id
)
self._run_id_to_watcher_dict[run_id].daemon = True
self._run_id_to_watcher_dict[run_id].start()
self._run_id_to_watcher_dict[run_id].add_callback(start_cursor, callback)
def unwatch_run(self, run_id: str, handler: Callable[[EventLogEntry], None]):
run_id = check.str_param(run_id, "run_id")
handler = check.callable_param(handler, "handler")
with self._dict_lock:
if run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].remove_callback(handler)
if self._run_id_to_watcher_dict[run_id].should_thread_exit.is_set():
del self._run_id_to_watcher_dict[run_id]
def __del__(self):
self.close()
def close(self):
if not self._disposed:
self._disposed = True
with self._dict_lock:
for watcher_thread in self._run_id_to_watcher_dict.values():
if not watcher_thread.should_thread_exit.is_set():
watcher_thread.should_thread_exit.set()
for run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].join()
del self._run_id_to_watcher_dict
class SqlPollingRunIdEventWatcherThread(threading.Thread):
"""subclass of Thread that watches a given run_id for new Events by polling every POLLING_CADENCE
Holds a list of callbacks (_callback_fn_list) each passed in by an `Observer`. Note that
the callbacks have a cursor associated; this means that the callbacks should be
only executed on EventLogEntrys with an associated id >= callback.start_cursor
Exits when `self.should_thread_exit` is set.
LOCKING INFO:
INVARIANTS: _callback_fn_list_lock protects _callback_fn_list
"""
def __init__(self, event_log_storage: SqlEventLogStorage, run_id: str):
super(SqlPollingRunIdEventWatcherThread, self).__init__()
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqlEventLogStorage
)
self._run_id = check.str_param(run_id, "run_id")
self._callback_fn_list_lock: threading.Lock = threading.Lock()
self._callback_fn_list: List[CallbackAfterCursor] = []
self._should_thread_exit = threading.Event()
self.name = f"mysql-event-watch-run-id-{self._run_id}"
@property
def should_thread_exit(self) -> threading.Event:
return self._should_thread_exit
def add_callback(self, start_cursor: int, callback: Callable[[EventLogEntry], None]):
"""Observer has started watching this run.
Add a callback to execute on new EventLogEntrys st. id >= start_cursor
Args:
start_cursor (int): minimum event_id for the callback to execute
callback (Callable[[EventLogEntry], None]): callback to update the Dagster UI
"""
start_cursor = check.int_param(start_cursor, "start_cursor")
callback = check.callable_param(callback, "callback")
with self._callback_fn_list_lock:
self._callback_fn_list.append(CallbackAfterCursor(start_cursor, callback))
def remove_callback(self, callback: Callable[[EventLogEntry], None]):
"""Observer has stopped watching this run;
Remove a callback from the list of callbacks to execute on new EventLogEntrys
Also kill thread if no callbacks remaining (i.e. no Observers are watching this run_id)
Args:
callback (Callable[[EventLogEntry], None]): callback to remove from list of callbacks
"""
callback = check.callable_param(callback, "callback")
with self._callback_fn_list_lock:
self._callback_fn_list = [
callback_with_cursor
for callback_with_cursor in self._callback_fn_list
if callback_with_cursor.callback != callback
]
if not self._callback_fn_list:
self._should_thread_exit.set()
def run(self):
"""Polling function to update Observers with EventLogEntrys from Event Log DB.
Wakes every POLLING_CADENCE &
1. executes a SELECT query to get new EventLogEntrys
2. fires each callback (taking into account the callback.cursor) on the new EventLogEntrys
Uses max_index_so_far as a cursor in the DB to make sure that only new records are retrieved
"""
cursor = -1
while not self._should_thread_exit.wait(POLLING_CADENCE):
events = self._event_log_storage.get_logs_for_run(self._run_id, cursor=cursor)
for event_record in events:
cursor += 1
with self._callback_fn_list_lock:
for callback_with_cursor in self._callback_fn_list:
if callback_with_cursor.start_cursor < cursor:
callback_with_cursor.callback(event_record)
| python_modules/dagster/dagster/core/storage/event_log/polling_event_watcher.py | 7,354 | Callback passed from Observer class in event polling
start_cursor (int): Only process EventLogEntrys with an id >= start_cursor
(earlier ones have presumably already been processed)
callback (Callable[[EventLogEntry], None]): callback passed from Observer
to call on new EventLogEntrys
Event Log Watcher that uses a multithreaded polling approach to retrieving new events for run_ids
This class' job is to manage a collection of threads that each poll the event log for a given run_id
Uses one thread (SqlPollingRunIdEventWatcherThread) per watched run_id
LOCKING INFO:
ORDER: _dict_lock -> run_id_thread.callback_fn_list_lock
INVARIANTS: _dict_lock protects _run_id_to_watcher_dict
subclass of Thread that watches a given run_id for new Events by polling every POLLING_CADENCE
Holds a list of callbacks (_callback_fn_list) each passed in by an `Observer`. Note that
the callbacks have a cursor associated; this means that the callbacks should be
only executed on EventLogEntrys with an associated id >= callback.start_cursor
Exits when `self.should_thread_exit` is set.
LOCKING INFO:
INVARIANTS: _callback_fn_list_lock protects _callback_fn_list
Observer has started watching this run.
Add a callback to execute on new EventLogEntrys st. id >= start_cursor
Args:
start_cursor (int): minimum event_id for the callback to execute
callback (Callable[[EventLogEntry], None]): callback to update the Dagster UI
Observer has stopped watching this run;
Remove a callback from the list of callbacks to execute on new EventLogEntrys
Also kill thread if no callbacks remaining (i.e. no Observers are watching this run_id)
Args:
callback (Callable[[EventLogEntry], None]): callback to remove from list of callbacks
Polling function to update Observers with EventLogEntrys from Event Log DB.
Wakes every POLLING_CADENCE &
1. executes a SELECT query to get new EventLogEntrys
2. fires each callback (taking into account the callback.cursor) on the new EventLogEntrys
Uses max_index_so_far as a cursor in the DB to make sure that only new records are retrieved
100 ms INVARIANT: dict_lock protects _run_id_to_watcher_dict | 2,177 | en | 0.793207 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'bdg-sequila'
copyright = u'2019, biodatageeks.org'
author = u'biodatageeks.org'
# The short X.Y version
version = u'|version|'
# The full version, including alpha/beta/rc tags
release = u'|version|'
project_name = u'bdg-sequila'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinxcontrib.github_ribbon',
# 'sphinx.ext.ifconfig',
'sphinxcontrib.bibtex',
'sphinx.ext.autosectionlabel',
'rst2pdf.pdfbuilder'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
#html_logo='sequila.png'
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bdg-sequiladoc'
#--- Options for PDF ------------------------
pdf_documents = [('index', u'rst2pdf', u'SeQuiLa documentation', u'biodatageeks.org'),]
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bdg-sequila.tex', u'SeQuiLa Documentation',
u'biodatageeks.org', 'howto'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bdg-sequila', u'bdg-spark-granges Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bdg-spark-granges', u'bdg-spark-granges Documentation',
author, 'bdg-spark-granges', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
##github
#pip install sphinxcontrib-github_ribbon
github_ribbon_repo = 'ZSI-Bio/bdg-sequila'
github_ribbon_position = "right"
github_ribbon_color ="red"
#latexpdf
text_add_secnumbers = False
#latex_logo = "sequila.png"
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| docs/source/conf.py | 6,179 | -*- coding: utf-8 -*- Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/stable/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- The short X.Y version The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 'sphinx.ext.ifconfig', Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path . The name of the Pygments (syntax highlighting) style to use. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. html_theme = 'alabaster' Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. The default sidebars (for documents that don't match any pattern) are defined by theme itself. Builtin themes are using these templates by default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']``. html_sidebars = {}html_logo='sequila.png' -- Options for HTMLHelp output --------------------------------------------- Output file base name for HTML help builder.--- Options for PDF ------------------------ -- Options for LaTeX output ------------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Options for Epub output ------------------------------------------------- Bibliographic Dublin Core info. The unique identifier of the text. This can be a ISBN number or the project homepage. epub_identifier = '' A unique identification for the text. epub_uid = '' A list of files that should not be packed into the epub file.githubpip install sphinxcontrib-github_ribbonlatexpdflatex_logo = "sequila.png" -- Extension configuration ------------------------------------------------- -- Options for todo extension ---------------------------------------------- If true, `todo` and `todoList` produce output, else they produce nothing. | 4,454 | en | 0.565657 |
from typing import Optional, Type
from pydantic import UUID4
from tortoise import fields, models
from tortoise.exceptions import DoesNotExist
from fastapi_users.db.base import BaseUserDatabase
from fastapi_users.models import UD
class TortoiseBaseUserModel(models.Model):
id = fields.UUIDField(pk=True, generated=False)
email = fields.CharField(index=True, unique=True, null=False, max_length=255)
hashed_password = fields.CharField(null=False, max_length=255)
is_active = fields.BooleanField(default=True, null=False)
is_superuser = fields.BooleanField(default=False, null=False)
async def to_dict(self):
d = {}
for field in self._meta.db_fields:
d[field] = getattr(self, field)
for field in self._meta.backward_fk_fields:
d[field] = await getattr(self, field).all().values()
return d
class Meta:
abstract = True
class TortoiseBaseOAuthAccountModel(models.Model):
id = fields.UUIDField(pk=True, generated=False, max_length=255)
oauth_name = fields.CharField(null=False, max_length=255)
access_token = fields.CharField(null=False, max_length=255)
expires_at = fields.IntField(null=False)
refresh_token = fields.CharField(null=True, max_length=255)
account_id = fields.CharField(index=True, null=False, max_length=255)
account_email = fields.CharField(null=False, max_length=255)
class Meta:
abstract = True
class TortoiseUserDatabase(BaseUserDatabase[UD]):
"""
Database adapter for Tortoise ORM.
:param user_db_model: Pydantic model of a DB representation of a user.
:param model: Tortoise ORM model.
:param oauth_account_model: Optional Tortoise ORM model of a OAuth account.
"""
model: Type[TortoiseBaseUserModel]
oauth_account_model: Optional[Type[TortoiseBaseOAuthAccountModel]]
def __init__(
self,
user_db_model: Type[UD],
model: Type[TortoiseBaseUserModel],
oauth_account_model: Optional[Type[TortoiseBaseOAuthAccountModel]] = None,
):
super().__init__(user_db_model)
self.model = model
self.oauth_account_model = oauth_account_model
async def get(self, id: UUID4) -> Optional[UD]:
try:
query = self.model.get(id=id)
if self.oauth_account_model is not None:
query = query.prefetch_related("oauth_accounts")
user = await query
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
except DoesNotExist:
return None
async def get_by_email(self, email: str) -> Optional[UD]:
query = self.model.filter(email__iexact=email).first()
if self.oauth_account_model is not None:
query = query.prefetch_related("oauth_accounts")
user = await query
if user is None:
return None
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
async def get_by_oauth_account(self, oauth: str, account_id: str) -> Optional[UD]:
try:
query = self.model.get(
oauth_accounts__oauth_name=oauth, oauth_accounts__account_id=account_id
).prefetch_related("oauth_accounts")
user = await query
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
except DoesNotExist:
return None
async def create(self, user: UD) -> UD:
user_dict = user.dict()
oauth_accounts = user_dict.pop("oauth_accounts", None)
model = self.model(**user_dict)
await model.save()
if oauth_accounts and self.oauth_account_model:
oauth_account_objects = []
for oauth_account in oauth_accounts:
oauth_account_objects.append(
self.oauth_account_model(user=model, **oauth_account)
)
await self.oauth_account_model.bulk_create(oauth_account_objects)
return user
async def update(self, user: UD) -> UD:
user_dict = user.dict()
user_dict.pop("id") # Tortoise complains if we pass the PK again
oauth_accounts = user_dict.pop("oauth_accounts", None)
model = await self.model.get(id=user.id)
for field in user_dict:
setattr(model, field, user_dict[field])
await model.save()
if oauth_accounts and self.oauth_account_model:
await model.oauth_accounts.all().delete()
oauth_account_objects = []
for oauth_account in oauth_accounts:
oauth_account_objects.append(
self.oauth_account_model(user=model, **oauth_account)
)
await self.oauth_account_model.bulk_create(oauth_account_objects)
return user
async def delete(self, user: UD) -> None:
await self.model.filter(id=user.id).delete()
| fastapi_users/db/tortoise.py | 4,952 | Database adapter for Tortoise ORM.
:param user_db_model: Pydantic model of a DB representation of a user.
:param model: Tortoise ORM model.
:param oauth_account_model: Optional Tortoise ORM model of a OAuth account.
Tortoise complains if we pass the PK again | 261 | en | 0.465863 |
from datetime import datetime
from typing import List, Optional
from uuid import getnode
from .ballot import (
CiphertextBallot,
CiphertextBallotContest,
CiphertextBallotSelection,
PlaintextBallot,
PlaintextBallotContest,
PlaintextBallotSelection,
make_ciphertext_ballot_contest,
make_ciphertext_ballot_selection,
make_ciphertext_ballot,
)
from .ballot_code import get_hash_for_device
from .election import CiphertextElectionContext
from .elgamal import elgamal_encrypt
from .group import ElementModP, ElementModQ, rand_q
from .logs import log_info, log_warning
from .manifest import (
InternalManifest,
ContestDescription,
ContestDescriptionWithPlaceholders,
SelectionDescription,
)
from .nonces import Nonces
from .serializable import Serializable
from .utils import get_optional, get_or_else_optional_func
class EncryptionDevice(Serializable):
"""
Metadata for encryption device
"""
device_id: int
"""Unique identifier for device"""
session_id: int
"""Used to identify session and protect the timestamp"""
launch_code: int
"""Election initialization value"""
location: str
"""Arbitary string to designate the location of device"""
def __init__(
self,
device_id: int,
session_id: int,
launch_code: int,
location: str,
) -> None:
self.device_id = device_id
self.session_id = session_id
self.launch_code = launch_code
self.location = location
log_info(f": EncryptionDevice: Created: UUID: {device_id} at: {location}")
def get_hash(self) -> ElementModQ:
"""
Get hash for encryption device
:return: Starting hash
"""
return get_hash_for_device(
self.device_id, self.session_id, self.launch_code, self.location
)
# pylint: disable=no-self-use
def get_timestamp(self) -> int:
"""
Get the current timestamp in utc
"""
return int(datetime.utcnow().timestamp())
class EncryptionMediator:
"""
An object for caching election and encryption state.
It composes Elections and Ballots.
"""
_internal_manifest: InternalManifest
_context: CiphertextElectionContext
_encryption_seed: ElementModQ
def __init__(
self,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
encryption_device: EncryptionDevice,
):
self._internal_manifest = internal_manifest
self._context = context
self._encryption_seed = encryption_device.get_hash()
def encrypt(self, ballot: PlaintextBallot) -> Optional[CiphertextBallot]:
"""
Encrypt the specified ballot using the cached election context.
"""
log_info(f" encrypt: objectId: {ballot.object_id}")
encrypted_ballot = encrypt_ballot(
ballot, self._internal_manifest, self._context, self._encryption_seed
)
if encrypted_ballot is not None and encrypted_ballot.code is not None:
self._encryption_seed = encrypted_ballot.code
return encrypted_ballot
def generate_device_uuid() -> int:
"""
Get unique identifier for device
:return: Unique identifier
"""
return getnode()
def selection_from(
description: SelectionDescription,
is_placeholder: bool = False,
is_affirmative: bool = False,
) -> PlaintextBallotSelection:
"""
Construct a `BallotSelection` from a specific `SelectionDescription`.
This function is useful for filling selections when a voter undervotes a ballot.
It is also used to create placeholder representations when generating the `ConstantChaumPedersenProof`
:param description: The `SelectionDescription` which provides the relevant `object_id`
:param is_placeholder: Mark this selection as a placeholder value
:param is_affirmative: Mark this selection as `yes`
:return: A BallotSelection
"""
return PlaintextBallotSelection(
description.object_id,
vote=1 if is_affirmative else 0,
is_placeholder_selection=is_placeholder,
)
def contest_from(description: ContestDescription) -> PlaintextBallotContest:
"""
Construct a `BallotContest` from a specific `ContestDescription` with all false fields.
This function is useful for filling contests and selections when a voter undervotes a ballot.
:param description: The `ContestDescription` used to derive the well-formed `BallotContest`
:return: a `BallotContest`
"""
selections: List[PlaintextBallotSelection] = list()
for selection_description in description.ballot_selections:
selections.append(selection_from(selection_description))
return PlaintextBallotContest(description.object_id, selections)
def encrypt_selection(
selection: PlaintextBallotSelection,
selection_description: SelectionDescription,
elgamal_public_key: ElementModP,
crypto_extended_base_hash: ElementModQ,
nonce_seed: ElementModQ,
is_placeholder: bool = False,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallotSelection]:
"""
Encrypt a specific `BallotSelection` in the context of a specific `BallotContest`
:param selection: the selection in the valid input form
:param selection_description: the `SelectionDescription` from the
`ContestDescription` which defines this selection's structure
:param elgamal_public_key: the public key (K) used to encrypt the ballot
:param crypto_extended_base_hash: the extended base hash of the election
:param nonce_seed: an `ElementModQ` used as a header to seed the `Nonce` generated for this selection.
this value can be (or derived from) the BallotContest nonce, but no relationship is required
:param is_placeholder: specifies if this is a placeholder selection
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Validate Input
if not selection.is_valid(selection_description.object_id):
log_warning(f"malformed input selection: {selection}")
return None
selection_description_hash = selection_description.crypto_hash()
nonce_sequence = Nonces(selection_description_hash, nonce_seed)
selection_nonce = nonce_sequence[selection_description.sequence_order]
disjunctive_chaum_pedersen_nonce = next(iter(nonce_sequence))
log_info(
f": encrypt_selection: for {selection_description.object_id} hash: {selection_description_hash.to_hex()}"
)
selection_representation = selection.vote
# Generate the encryption
elgamal_encryption = elgamal_encrypt(
selection_representation, selection_nonce, elgamal_public_key
)
if elgamal_encryption is None:
# will have logged about the failure earlier, so no need to log anything here
return None
# TODO: ISSUE #35: encrypt/decrypt: encrypt the extended_data field
# Create the return object
encrypted_selection = make_ciphertext_ballot_selection(
object_id=selection.object_id,
description_hash=selection_description_hash,
ciphertext=get_optional(elgamal_encryption),
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
proof_seed=disjunctive_chaum_pedersen_nonce,
selection_representation=selection_representation,
is_placeholder_selection=is_placeholder,
nonce=selection_nonce,
)
if encrypted_selection.proof is None:
return None # log will have happened earlier
# optionally, skip the verification step
if not should_verify_proofs:
return encrypted_selection
# verify the selection.
if encrypted_selection.is_valid_encryption(
selection_description_hash, elgamal_public_key, crypto_extended_base_hash
):
return encrypted_selection
log_warning(
f"mismatching selection proof for selection {encrypted_selection.object_id}"
)
return None
# pylint: disable=too-many-return-statements
def encrypt_contest(
contest: PlaintextBallotContest,
contest_description: ContestDescriptionWithPlaceholders,
elgamal_public_key: ElementModP,
crypto_extended_base_hash: ElementModQ,
nonce_seed: ElementModQ,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallotContest]:
"""
Encrypt a specific `BallotContest` in the context of a specific `Ballot`.
This method accepts a contest representation that only includes `True` selections.
It will fill missing selections for a contest with `False` values, and generate `placeholder`
selections to represent the number of seats available for a given contest. By adding `placeholder`
votes
:param contest: the contest in the valid input form
:param contest_description: the `ContestDescriptionWithPlaceholders`
from the `ContestDescription` which defines this contest's structure
:param elgamal_public_key: the public key (k) used to encrypt the ballot
:param crypto_extended_base_hash: the extended base hash of the election
:param nonce_seed: an `ElementModQ` used as a header to seed the `Nonce` generated for this contest.
this value can be (or derived from) the Ballot nonce, but no relationship is required
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Validate Input
if not contest.is_valid(
contest_description.object_id,
len(contest_description.ballot_selections),
contest_description.number_elected,
contest_description.votes_allowed,
):
log_warning(f"malformed input contest: {contest}")
return None
if not contest_description.is_valid():
log_warning(f"malformed contest description: {contest_description}")
return None
# account for sequence id
contest_description_hash = contest_description.crypto_hash()
nonce_sequence = Nonces(contest_description_hash, nonce_seed)
contest_nonce = nonce_sequence[contest_description.sequence_order]
chaum_pedersen_nonce = next(iter(nonce_sequence))
encrypted_selections: List[CiphertextBallotSelection] = list()
selection_count = 0
# TODO: ISSUE #54 this code could be inefficient if we had a contest
# with a lot of choices, although the O(n^2) iteration here is small
# compared to the huge cost of doing the cryptography.
# Generate the encrypted selections
for description in contest_description.ballot_selections:
has_selection = False
encrypted_selection = None
# iterate over the actual selections for each contest description
# and apply the selected value if it exists. If it does not, an explicit
# false is entered instead and the selection_count is not incremented
# this allows consumers to only pass in the relevant selections made by a voter
for selection in contest.ballot_selections:
if selection.object_id == description.object_id:
# track the selection count so we can append the
# appropriate number of true placeholder votes
has_selection = True
selection_count += selection.vote
encrypted_selection = encrypt_selection(
selection,
description,
elgamal_public_key,
crypto_extended_base_hash,
contest_nonce,
)
break
if not has_selection:
# No selection was made for this possible value
# so we explicitly set it to false
encrypted_selection = encrypt_selection(
selection_from(description),
description,
elgamal_public_key,
crypto_extended_base_hash,
contest_nonce,
)
if encrypted_selection is None:
return None # log will have happened earlier
encrypted_selections.append(get_optional(encrypted_selection))
# Handle Placeholder selections
# After we loop through all of the real selections on the ballot,
# we loop through each placeholder value and determine if it should be filled in
# Add a placeholder selection for each possible seat in the contest
for placeholder in contest_description.placeholder_selections:
# for undervotes, select the placeholder value as true for each available seat
# note this pattern is used since DisjunctiveChaumPedersen expects a 0 or 1
# so each seat can only have a maximum value of 1 in the current implementation
select_placeholder = False
if selection_count < contest_description.number_elected:
select_placeholder = True
selection_count += 1
encrypted_selection = encrypt_selection(
selection=selection_from(
description=placeholder,
is_placeholder=True,
is_affirmative=select_placeholder,
),
selection_description=placeholder,
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
nonce_seed=contest_nonce,
is_placeholder=True,
should_verify_proofs=True,
)
if encrypted_selection is None:
return None # log will have happened earlier
encrypted_selections.append(get_optional(encrypted_selection))
# TODO: ISSUE #33: support other cases such as cumulative voting
# (individual selections being an encryption of > 1)
if (
contest_description.votes_allowed is not None
and selection_count < contest_description.votes_allowed
):
log_warning(
"mismatching selection count: only n-of-m style elections are currently supported"
)
# Create the return object
encrypted_contest = make_ciphertext_ballot_contest(
object_id=contest.object_id,
description_hash=contest_description_hash,
ballot_selections=encrypted_selections,
elgamal_public_key=elgamal_public_key,
crypto_extended_base_hash=crypto_extended_base_hash,
proof_seed=chaum_pedersen_nonce,
number_elected=contest_description.number_elected,
nonce=contest_nonce,
)
if encrypted_contest is None or encrypted_contest.proof is None:
return None # log will have happened earlier
if not should_verify_proofs:
return encrypted_contest
# Verify the proof
if encrypted_contest.is_valid_encryption(
contest_description_hash, elgamal_public_key, crypto_extended_base_hash
):
return encrypted_contest
log_warning(f"mismatching contest proof for contest {encrypted_contest.object_id}")
return None
# TODO: ISSUE #57: add the device hash to the function interface so it can be propagated with the ballot.
# also propagate the seed so that the ballot codes can be regenerated
# by traversing the collection of ballots encrypted by a specific device
def encrypt_ballot(
ballot: PlaintextBallot,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
encryption_seed: ElementModQ,
nonce: Optional[ElementModQ] = None,
should_verify_proofs: bool = True,
) -> Optional[CiphertextBallot]:
"""
Encrypt a specific `Ballot` in the context of a specific `CiphertextElectionContext`.
This method accepts a ballot representation that only includes `True` selections.
It will fill missing selections for a contest with `False` values, and generate `placeholder`
selections to represent the number of seats available for a given contest.
This method also allows for ballots to exclude passing contests for which the voter made no selections.
It will fill missing contests with `False` selections and generate `placeholder` selections that are marked `True`.
:param ballot: the ballot in the valid input form
:param internal_manifest: the `InternalManifest` which defines this ballot's structure
:param context: all the cryptographic context for the election
:param encryption_seed: Hash from previous ballot or starting hash from device
:param nonce: an optional `int` used to seed the `Nonce` generated for this contest
if this value is not provided, the secret generating mechanism of the OS provides its own
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
"""
# Determine the relevant range of contests for this ballot style
style = internal_manifest.get_ballot_style(ballot.style_id)
# Validate Input
if not ballot.is_valid(style.object_id):
log_warning(f"malformed input ballot: {ballot}")
return None
# Generate a random master nonce to use for the contest and selection nonce's on the ballot
random_master_nonce = get_or_else_optional_func(nonce, lambda: rand_q())
# Include a representation of the election and the external Id in the nonce's used
# to derive other nonce values on the ballot
nonce_seed = CiphertextBallot.nonce_seed(
internal_manifest.manifest_hash,
ballot.object_id,
random_master_nonce,
)
log_info(f": manifest_hash : {internal_manifest.manifest_hash.to_hex()}")
log_info(f": encryption_seed : {encryption_seed.to_hex()}")
encrypted_contests = encrypt_ballot_contests(
ballot, internal_manifest, context, nonce_seed
)
if encrypted_contests is None:
return None
# Create the return object
encrypted_ballot = make_ciphertext_ballot(
ballot.object_id,
ballot.style_id,
internal_manifest.manifest_hash,
encryption_seed,
encrypted_contests,
random_master_nonce,
)
if not encrypted_ballot.code:
return None
if not should_verify_proofs:
return encrypted_ballot
# Verify the proofs
if encrypted_ballot.is_valid_encryption(
internal_manifest.manifest_hash,
context.elgamal_public_key,
context.crypto_extended_base_hash,
):
return encrypted_ballot
return None # log will have happened earlier
def encrypt_ballot_contests(
ballot: PlaintextBallot,
description: InternalManifest,
context: CiphertextElectionContext,
nonce_seed: ElementModQ,
) -> Optional[List[CiphertextBallotContest]]:
"""Encrypt contests from a plaintext ballot with a specific style"""
encrypted_contests: List[CiphertextBallotContest] = []
# Only iterate on contests for this specific ballot style
for ballot_style_contest in description.get_contests_for(ballot.style_id):
use_contest = None
for contest in ballot.contests:
if contest.object_id == ballot_style_contest.object_id:
use_contest = contest
break
# no selections provided for the contest, so create a placeholder contest
if not use_contest:
use_contest = contest_from(ballot_style_contest)
encrypted_contest = encrypt_contest(
use_contest,
ballot_style_contest,
context.elgamal_public_key,
context.crypto_extended_base_hash,
nonce_seed,
)
if encrypted_contest is None:
return None
encrypted_contests.append(get_optional(encrypted_contest))
return encrypted_contests
| src/electionguard/encrypt.py | 19,691 | Metadata for encryption device
An object for caching election and encryption state.
It composes Elections and Ballots.
Construct a `BallotContest` from a specific `ContestDescription` with all false fields.
This function is useful for filling contests and selections when a voter undervotes a ballot.
:param description: The `ContestDescription` used to derive the well-formed `BallotContest`
:return: a `BallotContest`
Encrypt the specified ballot using the cached election context.
Encrypt a specific `Ballot` in the context of a specific `CiphertextElectionContext`.
This method accepts a ballot representation that only includes `True` selections.
It will fill missing selections for a contest with `False` values, and generate `placeholder`
selections to represent the number of seats available for a given contest.
This method also allows for ballots to exclude passing contests for which the voter made no selections.
It will fill missing contests with `False` selections and generate `placeholder` selections that are marked `True`.
:param ballot: the ballot in the valid input form
:param internal_manifest: the `InternalManifest` which defines this ballot's structure
:param context: all the cryptographic context for the election
:param encryption_seed: Hash from previous ballot or starting hash from device
:param nonce: an optional `int` used to seed the `Nonce` generated for this contest
if this value is not provided, the secret generating mechanism of the OS provides its own
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
Encrypt contests from a plaintext ballot with a specific style
Encrypt a specific `BallotContest` in the context of a specific `Ballot`.
This method accepts a contest representation that only includes `True` selections.
It will fill missing selections for a contest with `False` values, and generate `placeholder`
selections to represent the number of seats available for a given contest. By adding `placeholder`
votes
:param contest: the contest in the valid input form
:param contest_description: the `ContestDescriptionWithPlaceholders`
from the `ContestDescription` which defines this contest's structure
:param elgamal_public_key: the public key (k) used to encrypt the ballot
:param crypto_extended_base_hash: the extended base hash of the election
:param nonce_seed: an `ElementModQ` used as a header to seed the `Nonce` generated for this contest.
this value can be (or derived from) the Ballot nonce, but no relationship is required
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
Encrypt a specific `BallotSelection` in the context of a specific `BallotContest`
:param selection: the selection in the valid input form
:param selection_description: the `SelectionDescription` from the
`ContestDescription` which defines this selection's structure
:param elgamal_public_key: the public key (K) used to encrypt the ballot
:param crypto_extended_base_hash: the extended base hash of the election
:param nonce_seed: an `ElementModQ` used as a header to seed the `Nonce` generated for this selection.
this value can be (or derived from) the BallotContest nonce, but no relationship is required
:param is_placeholder: specifies if this is a placeholder selection
:param should_verify_proofs: specify if the proofs should be verified prior to returning (default True)
Get unique identifier for device
:return: Unique identifier
Get hash for encryption device
:return: Starting hash
Get the current timestamp in utc
Construct a `BallotSelection` from a specific `SelectionDescription`.
This function is useful for filling selections when a voter undervotes a ballot.
It is also used to create placeholder representations when generating the `ConstantChaumPedersenProof`
:param description: The `SelectionDescription` which provides the relevant `object_id`
:param is_placeholder: Mark this selection as a placeholder value
:param is_affirmative: Mark this selection as `yes`
:return: A BallotSelection
pylint: disable=no-self-use Validate Input Generate the encryption will have logged about the failure earlier, so no need to log anything here TODO: ISSUE 35: encrypt/decrypt: encrypt the extended_data field Create the return object log will have happened earlier optionally, skip the verification step verify the selection. pylint: disable=too-many-return-statements Validate Input account for sequence id TODO: ISSUE 54 this code could be inefficient if we had a contest with a lot of choices, although the O(n^2) iteration here is small compared to the huge cost of doing the cryptography. Generate the encrypted selections iterate over the actual selections for each contest description and apply the selected value if it exists. If it does not, an explicit false is entered instead and the selection_count is not incremented this allows consumers to only pass in the relevant selections made by a voter track the selection count so we can append the appropriate number of true placeholder votes No selection was made for this possible value so we explicitly set it to false log will have happened earlier Handle Placeholder selections After we loop through all of the real selections on the ballot, we loop through each placeholder value and determine if it should be filled in Add a placeholder selection for each possible seat in the contest for undervotes, select the placeholder value as true for each available seat note this pattern is used since DisjunctiveChaumPedersen expects a 0 or 1 so each seat can only have a maximum value of 1 in the current implementation log will have happened earlier TODO: ISSUE 33: support other cases such as cumulative voting (individual selections being an encryption of > 1) Create the return object log will have happened earlier Verify the proof TODO: ISSUE 57: add the device hash to the function interface so it can be propagated with the ballot. also propagate the seed so that the ballot codes can be regenerated by traversing the collection of ballots encrypted by a specific device Determine the relevant range of contests for this ballot style Validate Input Generate a random master nonce to use for the contest and selection nonce's on the ballot Include a representation of the election and the external Id in the nonce's used to derive other nonce values on the ballot Create the return object Verify the proofs log will have happened earlier Only iterate on contests for this specific ballot style no selections provided for the contest, so create a placeholder contest | 6,647 | en | 0.812783 |
import argparse
import logging
import sys
import pyphen
import nltk
pyphen.language_fallback("en_US")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_out = logging.StreamHandler(sys.stdout)
console_out.setLevel(logging.DEBUG)
logger.addHandler(console_out)
def parse_arguments():
"""
Simple argument parser for the command line
:return: The text to be edited
"""
parser = argparse.ArgumentParser(description="Receive text to be edited")
parser.add_argument("text", metavar="input text", type=str)
args = parser.parse_args()
return args.text
def clean_input(text):
"""
Text sanitization function
:param text: User input text
:return: Sanitized text, without non ascii characters
"""
# To keep things simple at the start, let's only keep ASCII characters
return str(text.encode().decode("ascii", errors="ignore"))
def preprocess_input(text):
"""
Tokenizes text that has been sainitized
:param text: Sanitized text
:return: Text ready to be fed to analysis, by having sentences and words tokenized
"""
sentences = nltk.sent_tokenize(text)
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
return tokens
def compute_flesch_reading_ease(total_syllables, total_words, total_sentences):
"""
Computes readability score from summary statistics
:param total_syllables: number of syllables in input text
:param total_words: number of words in input text
:param total_sentences: number of sentences in input text
:return: A readability score: the lower the score, the more complex the text is deemed to be
"""
return (
206.85
- 1.015 * (total_words / total_sentences)
- 84.6 * (total_syllables / total_words)
)
def get_reading_level_from_flesch(flesch_score):
"""
Thresholds taken from https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests
:param flesch_score:
:return: A reading level and difficulty for a given flesch score
"""
if flesch_score < 30:
return "Very difficult to read"
elif flesch_score < 50:
return "Difficult to read"
elif flesch_score < 60:
return "Fairly difficult to read"
elif flesch_score < 70:
return "Plain English"
elif flesch_score < 80:
return "Fairly easy to read"
elif flesch_score < 90:
return "Easy to read"
else:
return "Very easy to read"
def compute_average_word_length(tokens):
"""
Calculate word length for a sentence
:param tokens: a list of words
:return: The average length of words in this list
"""
word_lengths = [len(word) for word in tokens]
return sum(word_lengths) / len(word_lengths)
def compute_total_average_word_length(sentence_list):
"""
Calculate average word length for multiple sentences
:param sentence_list: a list of sentences, each being a list of words
:return: The average length of words in this list of sentences
"""
lengths = [compute_average_word_length(tokens) for tokens in sentence_list]
return sum(lengths) / len(lengths)
def compute_total_unique_words_fraction(sentence_list):
"""
Compute fraction os unique words
:param sentence_list: a list of sentences, each being a list of words
:return: the fraction of unique words in the sentences
"""
all_words = [word for word_list in sentence_list for word in word_list]
unique_words = set(all_words)
return len(unique_words) / len(all_words)
def count_word_usage(tokens, word_list):
"""
Counts occurrences of a given list of words
:param tokens: a list of tokens for one sentence
:param word_list: a list of words to search for
:return: the number of times the words appear in the list
"""
return len([word for word in tokens if word.lower() in word_list])
def count_word_syllables(word):
"""
Count syllables in a word
:param word: a one word string
:return: the number of syllables according to pyphen
"""
dic = pyphen.Pyphen(lang="en_US")
# this returns our word, with hyphens ("-") inserted in between each syllable
hyphenated = dic.inserted(word)
return len(hyphenated.split("-"))
def count_sentence_syllables(tokens):
"""
Count syllables in a sentence
:param tokens: a list of words and potentially punctuation
:return: the number of syllables in the sentence
"""
# Our tokenizer leaves punctuation as a separate word, so we filter for it here
punctuation = ".,!?/"
return sum(
[
count_word_syllables(word)
for word in tokens
if word not in punctuation
]
)
def count_total_syllables(sentence_list):
"""
Count syllables in a list of sentences
:param sentence_list: a list of sentences, each being a list of words
:return: the number of syllables in the sentences
"""
return sum(
[count_sentence_syllables(sentence) for sentence in sentence_list]
)
def count_words_per_sentence(sentence_tokens):
"""
Count words in a sentence
:param sentence_tokens: a list of words and potentially punctuation
:return: the number of words in the sentence
"""
punctuation = ".,!?/"
return len([word for word in sentence_tokens if word not in punctuation])
def count_total_words(sentence_list):
"""
Count words in a list of sentences
:param sentence_list: a list of sentences, each being a list of words
:return: the number of words in the sentences
"""
return sum(
[count_words_per_sentence(sentence) for sentence in sentence_list]
)
def get_suggestions(sentence_list):
"""
Returns a string containing our suggestions
:param sentence_list: a list of sentences, each being a list of words
:return: suggestions to improve the input
"""
told_said_usage = sum(
(count_word_usage(tokens, ["told", "said"]) for tokens in sentence_list)
)
but_and_usage = sum(
(count_word_usage(tokens, ["but", "and"]) for tokens in sentence_list)
)
wh_adverbs_usage = sum(
(
count_word_usage(
tokens,
[
"when",
"where",
"why",
"whence",
"whereby",
"wherein",
"whereupon",
],
)
for tokens in sentence_list
)
)
result_str = ""
adverb_usage = "Adverb usage: %s told/said, %s but/and, %s wh adverbs" % (
told_said_usage,
but_and_usage,
wh_adverbs_usage,
)
result_str += adverb_usage
average_word_length = compute_total_average_word_length(sentence_list)
unique_words_fraction = compute_total_unique_words_fraction(sentence_list)
word_stats = "Average word length %.2f, fraction of unique words %.2f" % (
average_word_length,
unique_words_fraction,
)
# Using HTML break to later display on a webapp
result_str += "<br/>"
result_str += word_stats
number_of_syllables = count_total_syllables(sentence_list)
number_of_words = count_total_words(sentence_list)
number_of_sentences = len(sentence_list)
syllable_counts = "%d syllables, %d words, %d sentences" % (
number_of_syllables,
number_of_words,
number_of_sentences,
)
result_str += "<br/>"
result_str += syllable_counts
flesch_score = compute_flesch_reading_ease(
number_of_syllables, number_of_words, number_of_sentences
)
flesch = "%d syllables, %.2f flesch score: %s" % (
number_of_syllables,
flesch_score,
get_reading_level_from_flesch(flesch_score),
)
result_str += "<br/>"
result_str += flesch
return result_str
def get_recommendations_from_input(txt):
"""
Cleans, preprocesses, and generates heuristic suggestion for input string
:param txt: Input text
:return: Suggestions for a given text input
"""
processed = clean_input(txt)
tokenized_sentences = preprocess_input(processed)
suggestions = get_suggestions(tokenized_sentences)
return suggestions
if __name__ == "__main__":
input_text = parse_arguments()
print(get_recommendations_from_input(input_text))
| ml_editor/ml_editor.py | 8,408 | Text sanitization function
:param text: User input text
:return: Sanitized text, without non ascii characters
Calculate word length for a sentence
:param tokens: a list of words
:return: The average length of words in this list
Computes readability score from summary statistics
:param total_syllables: number of syllables in input text
:param total_words: number of words in input text
:param total_sentences: number of sentences in input text
:return: A readability score: the lower the score, the more complex the text is deemed to be
Calculate average word length for multiple sentences
:param sentence_list: a list of sentences, each being a list of words
:return: The average length of words in this list of sentences
Compute fraction os unique words
:param sentence_list: a list of sentences, each being a list of words
:return: the fraction of unique words in the sentences
Count syllables in a sentence
:param tokens: a list of words and potentially punctuation
:return: the number of syllables in the sentence
Count syllables in a list of sentences
:param sentence_list: a list of sentences, each being a list of words
:return: the number of syllables in the sentences
Count words in a list of sentences
:param sentence_list: a list of sentences, each being a list of words
:return: the number of words in the sentences
Count syllables in a word
:param word: a one word string
:return: the number of syllables according to pyphen
Counts occurrences of a given list of words
:param tokens: a list of tokens for one sentence
:param word_list: a list of words to search for
:return: the number of times the words appear in the list
Count words in a sentence
:param sentence_tokens: a list of words and potentially punctuation
:return: the number of words in the sentence
Thresholds taken from https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests
:param flesch_score:
:return: A reading level and difficulty for a given flesch score
Cleans, preprocesses, and generates heuristic suggestion for input string
:param txt: Input text
:return: Suggestions for a given text input
Returns a string containing our suggestions
:param sentence_list: a list of sentences, each being a list of words
:return: suggestions to improve the input
Simple argument parser for the command line
:return: The text to be edited
Tokenizes text that has been sainitized
:param text: Sanitized text
:return: Text ready to be fed to analysis, by having sentences and words tokenized
To keep things simple at the start, let's only keep ASCII characters this returns our word, with hyphens ("-") inserted in between each syllable Our tokenizer leaves punctuation as a separate word, so we filter for it here Using HTML break to later display on a webapp | 2,751 | en | 0.827244 |
# -*- coding: utf-8 -*-
import six
from flask import Blueprint, jsonify, current_app
from ..utils import MountTree
from .utils import is_testing
api_bp = Blueprint('api', __name__.rsplit('.')[1])
if is_testing():
@api_bp.route('/_hello/')
def api_hello():
return jsonify('api hello')
@api_bp.route('/all')
def all_storage():
"""Get all storage in JSON."""
trees = current_app.trees
mounts = MountTree()
for prefix, tree in six.iteritems(trees):
for path, storage in tree.iter_storage():
mounts.mount(prefix + '/' + path, storage)
# get a compressed representation of the tree
def dfs(node):
children = node.children
if children:
ret = []
for name in sorted(six.iterkeys(children)):
child = children[name]
child_ret = dfs(child)
if child_ret:
ret.append((name, child_ret))
if ret:
return ret
data = node.data
if data:
return data.to_dict()
return jsonify(dfs(mounts.root) or [])
| mlcomp/board/views/api.py | 1,114 | Get all storage in JSON.
-*- coding: utf-8 -*- get a compressed representation of the tree | 92 | en | 0.848112 |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from copy import deepcopy
import torch
from cvpods.checkpoint import DefaultCheckpointer
from cvpods.data import build_transform_gens
__all__ = ["DefaultPredictor"]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
If you'd like to do anything more fancy, please refer to its source code
as examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
.. code-block:: python
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg, meta):
self.cfg = deepcopy(cfg)
if self.cfg.MODEL.DEVICE.startswith("cuda:"):
torch.cuda.set_device(self.cfg.MODEL.DEVICE)
self.cfg.MODEL.DEVICE = "cuda"
self.model = cfg.build_model(self.cfg)
self.model.eval()
self.metadata = meta
checkpointer = DefaultCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.transform_gen = build_transform_gens(cfg.INPUT.AUG.TEST_PIPELINES)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(
): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = original_image
for tfm_gen in self.transform_gen:
image = tfm_gen.get_transform(image).apply_image(image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
| cvpods/engine/predictor.py | 2,871 | Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
If you'd like to do anything more fancy, please refer to its source code
as examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
.. code-block:: python
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
!/usr/bin/python3 -*- coding:utf-8 -*- https://github.com/sphinx-doc/sphinx/issues/4258 Apply pre-processing to image. whether the model expects BGR inputs or RGB | 1,235 | en | 0.738415 |
# coding: utf-8
from pytdx.hq import TdxHq_API
from pytdx.params import TDXParams
import pandas as pd
import numpy as np
import re
import csv
import io
import time
import traceback
if __name__ == '__main__':
with io.open(r'..\all_other_data\symbol.txt', 'r', encoding='utf-8') as f:
symbol = [s.strip() for s in f.readlines()]
TDXHQ = TdxHq_API(raise_exception=True, auto_retry=True)
if not TDXHQ.connect('121.14.110.200', 443):
raise Exception("Can't connect.")
#symbol = symbol[0:5]
first_df = True
for code in symbol:
if code[0:2] == 'SH':
market = 1
else:
market = 0
code = code [2:]
#quote_info = TDXHQ.get_security_quotes([(market, code)])
quote_info = TDXHQ.get_security_bars(9, market, code, 0, 1)
try:
if first_df:
columns = ['code', 'price']
quote_df = pd.DataFrame(columns=columns)
first_df = False
values = [code, quote_info[0]['close']]
quote_df.loc[quote_df.shape[0]] = values
except Exception as e:
print "code {}, process bars error, skipped.".format(code)
print e.message
print quote_info
quote_df = quote_df.rename(columns={
'code':'代码',
'price':'价格',
})
# string_columns = ['代码']
# quote_df[string_columns] = quote_df[string_columns].applymap(
# lambda x: '=""' if type(x) is float else '="' + str(x) + '"')
quote_df.to_csv(r"..\all_other_data\all_last_price.csv", encoding="gbk", quoting=csv.QUOTE_NONE, index=False)
TDXHQ.disconnect()
| get_data/get_last_price.py | 1,672 | coding: utf-8symbol = symbol[0:5]quote_info = TDXHQ.get_security_quotes([(market, code)]) string_columns = ['代码'] quote_df[string_columns] = quote_df[string_columns].applymap( lambda x: '=""' if type(x) is float else '="' + str(x) + '"') | 241 | en | 0.234239 |
#! /usr/bin/env python3
from ssedata import FunctionType
from google.protobuf.json_format import MessageToDict
import grpc
import argparse
import json
import logging
import logging.config
import os
import sys
import inspect
import time
from websocket import create_connection
import socket
import re
from concurrent import futures
from datetime import datetime
import requests
import configparser
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PARENT_DIR, 'generated'))
sys.path.append(os.path.join(PARENT_DIR, 'helper_functions'))
import qlist
import pysize
from ssedata import FunctionType
import ServerSideExtension_pb2 as SSE
# import helper .py files
import qlist
import pysize
import ServerSideExtension_pb2 as SSE
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
config = configparser.ConfigParser()
class ExtensionService(SSE.ConnectorServicer):
"""
A simple SSE-plugin created for the HelloWorld example.
"""
def __init__(self, funcdef_file):
"""
Class initializer.
:param funcdef_file: a function definition JSON file
"""
self._function_definitions = funcdef_file
#self.ScriptEval = ScriptEval()
os.makedirs('logs', exist_ok=True)
log_file = os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), 'logger.config')
print(log_file)
logging.config.fileConfig(log_file)
logging.info(self._function_definitions)
logging.info('Logging enabled')
function_name = "none"
@property
def function_definitions(self):
"""
:return: json file with function definitions
"""
return self._function_definitions
@property
def functions(self):
"""
:return: Mapping of function id and implementation
"""
return {
0: '_rest_single',
1: '_rest_30',
2: '_ws_single',
3: '_ws_batch',
4: '_gcp_bq'
}
@staticmethod
def _get_function_id(context):
"""
Retrieve function id from header.
:param context: context
:return: function id
"""
metadata = dict(context.invocation_metadata())
header = SSE.FunctionRequestHeader()
header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
return header.functionId
@staticmethod
def _rest_single(request, context):
"""
Rest using single variable
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
logging.debug("Rest Url is set to {}" .format(url))
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
response_rows = []
request_counter = 1
for request_rows in request:
logging.debug(
'Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter = request_counter + 1
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
if (len(param) == 0):
logging.info('Exiting {} TimeStamp: {} due to Data being Empty ' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
else:
payload = '{"data":"' + param + '"}'
logging.debug('Showing Payload: {}'.format(payload))
resp = requests.post(url, data=payload)
logging.debug(
'Show Payload Response as Text: {}'.format(resp.text))
result = resp.text
result = result.replace('"', '')
result = result.strip()
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _ws_single(request, context):
"""
Single Row Processing for Websockets
:param request: iterable sequence of bundled rows
:return: the same iterable sequence as received
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
# Start by Gathering Environmental Varaiable
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
ws_url = config.get(q_function_name, 'ws_url')
token = config.get(q_function_name, 'token')
user_name = config.get(q_function_name, 'username')
ws_route = config.get(q_function_name, 'ws_route')
bCache = config.get(q_function_name, 'cache')
logging.debug('Pringint Route for WS {}' .format(ws_route))
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# In Future we will use the Token for Liencensing and Throttling
# Currently we are using Comblination of host+ipaddr+username for Client Identification
ws_url = ws_url + host + '_' + ip_addr+'_' + user_name+'_'
logging.debug('Websocket URL : {}' .format(ws_url))
ws = create_connection(ws_url)
response_rows = []
for request_rows in request:
# Iterate over rows
# Default code
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
result = ''
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
else:
payload = '{"action":"' + ws_route + \
'","data":"' + param + '"}'
logging.debug('Showing Payload: {}'.format(payload))
ws.send(payload)
#logging.info('Show Payload Response: {}'.format(resp.text))
resp = json.loads(ws.recv())
logging.debug(resp)
result = resp['result']
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
ws.close()
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _ws_batch(request, context):
"""
Mirrors the input and sends back the same data.
:param request: iterable sequence of bundled rows
:return: the same iterable sequence as received
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
logging.debug('Calling qrag.ini section "{}' .format(q_function_name))
ws_url = config.get(q_function_name, 'ws_url')
token = config.get(q_function_name, 'token')
user_name = config.get(q_function_name, 'username')
batch_size = int(config.get(q_function_name, 'batch_size'))
logging.debug('Batch Size {}' .format(batch_size))
ws_route = config.get(q_function_name, 'ws_route')
logging.info('API Route : {}' .format(ws_route))
# setup Caching
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
ws_url = ws_url + host + '_' + ip_addr+'_' + user_name+'_'
logging.debug('Full url for ws: {} '.format(ws_url))
ws = create_connection(ws_url)
response_rows = []
outer_counter = 1
inner_counter = 1
request_counter = 1
for request_rows in request:
logging.debug(
'Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter += 1
temp = MessageToDict(request_rows)
logging.debug('Temp Message to Dict {}' .format(temp))
test_rows = temp['rows']
logging.debug('Test Rows: {}' .format(test_rows))
request_size = len(test_rows)
logging.debug(
'Bundled Row Number of Rows - {}' .format(request_size))
batches = list(qlist.divide_chunks(test_rows, batch_size))
for i in batches:
payload_t = {"action": ws_route}
logging.debug('PreFix Route Seletection {}' .format(payload_t))
logging.debug(len(batches))
payload_t["data"] = i
logging.debug('Size of payload {}' .format(
pysize.get_size(payload_t)))
logging.debug('Showing Payload: {}'.format(payload_t))
logging.debug('batch number {}'.format(outer_counter))
ws.send(json.dumps(payload_t))
logging.debug('message sent WS')
outer_counter += 1
payload_t.clear()
for j in i:
#logging.debug("Priniting i {}" .format(i))
resp = json.loads(ws.recv())
#logging.debug('Response Type : {}' .format(type(resp)))
logging.debug('Counter: {} Payload Size: {} Payload Response: {}'.format(
inner_counter, pysize.get_size(resp), resp))
inner_counter += 1
result = resp['result']
logging.debug('Log Resulst: {}' .format(result))
duals = iter([SSE.Dual(strData=result)])
# logging.debug(duals)
#logging.debug('Printing Duals {}' .format(duals))
# Yield the row data as bundled rows
response_rows.append(SSE.Row(duals=duals))
logging.debug(
'Exiting Inner Loop: Printing j {}' .format(j))
yield SSE.BundledRows(rows=response_rows)
ws.close()
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _rest_30(request, context):
"""
Aggregates the parameters to a single comma separated string.
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
response_rows = []
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals]
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
#logging.info('Showing Payload: {}'.format(param))
# Aggregate parameters to a single string
# Join payload via =','.join(param)
else:
payload = '{"data":"' + (','.join(param)) + '"}'
logging.debug('Showing Payload: {}'.format(payload))
resp = requests.post(url, data=payload)
logging.debug(
'Show Payload Response: {}'.format(resp.text))
result = resp.text
result = result.replace('"', '')
result = result.strip()
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting Predict v2 TimeStamp: {}' .format(
datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _gcp_bq(request, context)
"""
Google Cloud Big Query Client Integration
November 2020
john.park@qlik.com
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
response_rows = []
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals]
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
#logging.info('Showing Payload: {}'.format(param))
# Aggregate parameters to a single string
# Join payload via =','.join(param)
else:
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting gcp_bq TimeStamp: {}' .format(
datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _cache(request, context):
"""
Cache enabled. Add the datetime stamp to the end of each string value.
:param request: iterable sequence of bundled rows
:param context: not used.
:return: string
"""
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
@staticmethod
def _no_cache(request, context):
"""
Cache disabled. Add the datetime stamp to the end of each string value.
:param request:
:param context: used for disabling the cache in the header.
:return: string
"""
# Disable caching.
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
def _get_call_info(self, context):
"""
Retreive useful information for the function call.
:param context: context
:return: string containing header info
"""
# Get metadata for the call from the context
metadata = dict(context.invocation_metadata())
# Get the function ID
func_header = SSE.FunctionRequestHeader()
func_header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
func_id = func_header.functionId
# Get the common request header
common_header = SSE.CommonRequestHeader()
common_header.ParseFromString(metadata['qlik-commonrequestheader-bin'])
# Get capabilities
if not hasattr(self, 'capabilities'):
self.capabilities = self.GetCapabilities(None, context)
# Get the name of the capability called in the function
capability = [
function.name for function in self.capabilities.functions if function.functionId == func_id][0]
# Get the user ID using a regular expression
match = re.match(r"UserDirectory=(?P<UserDirectory>\w*)\W+UserId=(?P<UserId>\w*)",
common_header.userId, re.IGNORECASE)
if match:
userId = match.group('UserDirectory') + '/' + match.group('UserId')
else:
userId = common_header.userId
# Get the app ID
appId = common_header.appId
# Get the call's origin
peer = context.peer()
return "{0} - Capability '{1}' called by user {2} from app {3}".format(peer, capability, userId, appId)
@staticmethod
def _echo_table(request, context):
"""
Echo the input table.
:param request:
:param context:
:return:
"""
for request_rows in request:
response_rows = []
for row in request_rows.rows:
response_rows.append(row)
yield SSE.BundledRows(rows=response_rows)
def GetCapabilities(self, request, context):
"""
Get capabilities.
Note that either request or context is used in the implementation of this method, but still added as
parameters. The reason is that gRPC always sends both when making a function call and therefore we must include
them to avoid error messages regarding too many parameters provided from the client.
:param request: the request, not used in this method.
:param context: the context, not used in this method.
:return: the capabilities.
"""
logging.info('GetCapabilities')
# Create an instance of the Capabilities grpc message
# Enable(or disable) script evaluation
# Set values for pluginIdentifier and pluginVersion
capabilities = SSE.Capabilities(allowScript=True,
pluginIdentifier='Qlik Rapid API Gateway - Partner Engineering',
pluginVersion='v0.1.0')
# If user defined functions supported, add the definitions to the message
with open(self.function_definitions) as json_file:
# Iterate over each function definition and add data to the capabilities grpc message
for definition in json.load(json_file)['Functions']:
function = capabilities.functions.add()
function.name = definition['Name']
function.functionId = definition['Id']
function.functionType = definition['Type']
function.returnType = definition['ReturnType']
# Retrieve name and type of each parameter
for param_name, param_type in sorted(definition['Params'].items()):
function.params.add(name=param_name, dataType=param_type)
logging.info('Adding to capabilities: {}({})'.format(function.name,
[p.name for p in function.params]))
return capabilities
def ExecuteFunction(self, request_iterator, context):
"""
Execute function call.
:param request_iterator: an iterable sequence of Row.
:param context: the context.
:return: an iterable sequence of Row.
"""
func_id = self._get_function_id(context)
logging.info(self._get_call_info(context))
# Call corresponding function
logging.info('ExecuteFunctions (functionId: {})' .format(func_id))
# self.functions[func_id]))
current_function_def = (json.load(open(self.function_definitions))[
'Functions'])[func_id]
logging.debug(current_function_def)
global q_function_name
q_function_name = current_function_def["Name"]
logging.debug('Logical Method Called is: {}' .format(q_function_name))
current_qrap_type = current_function_def["QRAP_Type"]
qrag_function_name = '_' + current_qrap_type
logging.debug(
'This is the type of QRAG Method Name: {}' .format(current_qrap_type))
logging.debug(
'Physical Method Called is: {}' .format(qrag_function_name))
# Convers to Method Name to Physical Main Function
qrag_id = qlist.find_key(self.functions, qrag_function_name)
logging.debug('QRAG ID: {}' .format(qrag_id))
global function_name
function_name = self.functions[qrag_id]
return getattr(self, self.functions[qrag_id])(request_iterator, context)
def Serve(self, port, pem_dir):
"""
Sets up the gRPC Server with insecure connection on port
:param port: port to listen on.
:param pem_dir: Directory including certificates
:return: None
"""
# Create gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
SSE.add_ConnectorServicer_to_server(self, server)
if pem_dir:
# Secure connection
with open(os.path.join(pem_dir, 'sse_server_key.pem'), 'rb') as f:
private_key = f.read()
with open(os.path.join(pem_dir, 'sse_server_cert.pem'), 'rb') as f:
cert_chain = f.read()
with open(os.path.join(pem_dir, 'root_cert.pem'), 'rb') as f:
root_cert = f.read()
credentials = grpc.ssl_server_credentials(
[(private_key, cert_chain)], root_cert, True)
server.add_secure_port('[::]:{}'.format(port), credentials)
logging.info(
'*** Running server in secure mode on port: {} ***'.format(port))
else:
# Insecure connection
server.add_insecure_port('[::]:{}'.format(port))
logging.info(
'*** Running server in insecure mode on port: {} ***'.format(port))
# Start gRPC server
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
qrag_file = os.path.join(os.path.dirname(__file__), 'config', 'qrag.ini')
config.read(qrag_file)
print(qrag_file)
print(config.sections())
port = config.get('base', 'port')
parser.add_argument('--port', nargs='?', default=port)
parser.add_argument('--pem_dir', nargs='?')
parser.add_argument('--definition_file', nargs='?',
default='functions.json')
args = parser.parse_args()
# need to locate the file when script is called from outside it's location dir.
def_file = os.path.join(os.path.dirname(
os.path.abspath(__file__)), args.definition_file)
print(def_file)
logging.info('*** Server Configurations Port: {}, Pem_Dir: {}, def_file {} TimeStamp: {} ***'.format(
args.port, args.pem_dir, def_file, datetime.now().isoformat()))
calc = ExtensionService(def_file)
calc.Serve(args.port, args.pem_dir)
| gcp/__main__.py | 26,947 | ! /usr/bin/env python3 import helper .py filesself.ScriptEval = ScriptEval() Retrieve string value of parameter and append to the params variable Length of param is 1 since one column is received, the [0] collects the first value in the list Join with current timedate stamp Create an iterable of dual with the result Yield the row data as bundled rows Start by Gathering Environmental Varaiable In Future we will use the Token for Liencensing and Throttling Currently we are using Comblination of host+ipaddr+username for Client Identification Iterate over rows Default code Retrieve string value of parameter and append to the params variable Length of param is 1 since one column is received, the [0] collects the first value in the listlogging.info('Show Payload Response: {}'.format(resp.text)) Create an iterable of dual with the result Yield the row data as bundled rows setup Cachinglogging.debug("Priniting i {}" .format(i))logging.debug('Response Type : {}' .format(type(resp))) logging.debug(duals)logging.debug('Printing Duals {}' .format(duals)) Yield the row data as bundled rows Iterate over bundled rows Iterate over rows Retrieve string value of parameter and append to the params variable Length of param is 1 since one column is received, the [0] collects the first value in the listlogging.info('Showing Payload: {}'.format(param)) Aggregate parameters to a single string Join payload via =','.join(param) Create an iterable of dual with the result Yield the row data as bundled rows Iterate over bundled rows Iterate over rows Retrieve string value of parameter and append to the params variable Length of param is 1 since one column is received, the [0] collects the first value in the listlogging.info('Showing Payload: {}'.format(param)) Aggregate parameters to a single string Join payload via =','.join(param) Create an iterable of dual with the result Yield the row data as bundled rows Iterate over bundled rows Iterate over rows Retrieve string value of parameter and append to the params variable Length of param is 1 since one column is received, the [0] collects the first value in the list Join with current timedate stamp Create an iterable of dual with the result Yield the row data as bundled rows Disable caching. Iterate over bundled rows Iterate over rows Retrieve string value of parameter and append to the params variable Length of param is 1 since one column is received, the [0] collects the first value in the list Join with current timedate stamp Create an iterable of dual with the result Yield the row data as bundled rows Get metadata for the call from the context Get the function ID Get the common request header Get capabilities Get the name of the capability called in the function Get the user ID using a regular expression Get the app ID Get the call's origin Create an instance of the Capabilities grpc message Enable(or disable) script evaluation Set values for pluginIdentifier and pluginVersion If user defined functions supported, add the definitions to the message Iterate over each function definition and add data to the capabilities grpc message Retrieve name and type of each parameter Call corresponding function self.functions[func_id])) Convers to Method Name to Physical Main Function Create gRPC server Secure connection Insecure connection Start gRPC server need to locate the file when script is called from outside it's location dir. | 3,407 | en | 0.698575 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for OGR EDIGEO driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2011, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import ogr
sys.path.append('../pymod')
import gdaltest
import ogrtest
###############################################################################
def ogr_edigeo_1():
filelist = ['E000AB01.THF',
'EDAB01S1.VEC',
'EDAB01SE.DIC',
'EDAB01SE.GEN',
'EDAB01SE.GEO',
'EDAB01SE.QAL',
'EDAB01SE.SCD',
'EDAB01T1.VEC',
'EDAB01T2.VEC',
'EDAB01T3.VEC']
# base_url = 'http://svn.geotools.org/trunk/modules/unsupported/edigeo/src/test/resources/org/geotools/data/edigeo/test-data/'
base_url = 'https://raw.githubusercontent.com/geotools/geotools/master/modules/unsupported/edigeo/src/test/resources/org/geotools/data/edigeo/test-data/'
for filename in filelist:
if not gdaltest.download_file(base_url + filename, filename):
return 'skip'
try:
for filename in filelist:
os.stat('tmp/cache/' + filename)
except OSError:
return 'skip'
ds = ogr.Open('tmp/cache/E000AB01.THF')
if ds.GetLayerCount() != 24:
print(ds.GetLayerCount())
return 'fail'
layers = [('BATIMENT_id', ogr.wkbPolygon, 107),
('BORNE_id', ogr.wkbPoint, 5),
('COMMUNE_id', ogr.wkbPolygon, 1),
('LIEUDIT_id', ogr.wkbPolygon, 3),
('NUMVOIE_id', ogr.wkbPoint, 43),
('PARCELLE_id', ogr.wkbPolygon, 155),
('SECTION_id', ogr.wkbPolygon, 1),
('SUBDFISC_id', ogr.wkbPolygon, 1),
('SUBDSECT_id', ogr.wkbPolygon, 1),
('SYMBLIM_id', ogr.wkbPoint, 29),
('TLINE_id', ogr.wkbLineString, 134),
('TPOINT_id', ogr.wkbPoint, 1),
('TRONFLUV_id', ogr.wkbPolygon, 3),
('TRONROUTE_id', ogr.wkbPolygon, 1),
('TSURF_id', ogr.wkbPolygon, 3),
('ZONCOMMUNI_id', ogr.wkbLineString, 15),
('ID_S_OBJ_Z_1_2_2', ogr.wkbPoint, 248),
]
for l in layers:
lyr = ds.GetLayerByName(l[0])
if lyr.GetLayerDefn().GetGeomType() != l[1]:
return 'fail'
if lyr.GetFeatureCount() != l[2]:
print(lyr.GetFeatureCount())
return 'fail'
if l[1] != ogr.wkbNone:
if lyr.GetSpatialRef().ExportToWkt().find('Lambert_Conformal_Conic_1SP') == -1:
print(lyr.GetSpatialRef().ExportToWkt())
return 'fail'
lyr = ds.GetLayerByName('BORNE_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POINT (877171.28 72489.22)'):
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('BATIMENT_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'POLYGON ((877206.16 71888.82,877193.14 71865.51,877202.95 71860.07,877215.83 71883.5,877206.16 71888.82))'):
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayerByName('ZONCOMMUNI_id')
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, 'LINESTRING (877929.8 71656.39,877922.38 71663.72,877911.48 71669.51,877884.23 71675.64,877783.07 71694.04,877716.31 71706.98,877707.45 71709.71,877702.0 71713.79,877696.89 71719.58,877671.69 71761.82,877607.99 71865.03,877545.32 71959.04,877499.22 72026.82)'):
feat.DumpReadable()
return 'fail'
ds.Destroy()
return 'success'
gdaltest_list = [
ogr_edigeo_1]
if __name__ == '__main__':
gdaltest.setup_run('ogr_edigeo')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
| autotest/ogr/ogr_edigeo.py | 5,249 | !/usr/bin/env python -*- coding: utf-8 -*- $Id$ Project: GDAL/OGR Test Suite Purpose: Test read functionality for OGR EDIGEO driver. Author: Even Rouault <even dot rouault at mines dash paris dot org> Copyright (c) 2011, Even Rouault <even dot rouault at mines-paris dot org> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. base_url = 'http://svn.geotools.org/trunk/modules/unsupported/edigeo/src/test/resources/org/geotools/data/edigeo/test-data/' | 1,425 | en | 0.803166 |
from datetime import datetime
import itertools
import os
import random
import string
from _signal import SIGINT
from contextlib import contextmanager
from functools import partial
from itertools import permutations, combinations
from shutil import copyfile
from sys import executable
from time import sleep, perf_counter
from typing import Tuple, Iterable, Dict, Optional, List, Any, Sequence, Union, Callable
import base58
import pytest
from indy.pool import set_protocol_version
from common.serializers.serialization import invalid_index_serializer
from crypto.bls.bls_factory import BlsFactoryCrypto
from plenum.common.event_bus import ExternalBus, InternalBus
from plenum.common.member.member import Member
from plenum.common.member.steward import Steward
from plenum.common.signer_did import DidSigner
from plenum.common.signer_simple import SimpleSigner
from plenum.common.timer import QueueTimer, TimerService
from plenum.config import Max3PCBatchWait
from psutil import Popen
import json
import asyncio
from indy.ledger import sign_and_submit_request, sign_request, submit_request, build_node_request, \
multi_sign_request
from indy.error import ErrorCode, IndyError
from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_file
from plenum.common.constants import DOMAIN_LEDGER_ID, OP_FIELD_NAME, REPLY, REQNACK, REJECT, \
CURRENT_PROTOCOL_VERSION, STEWARD, VALIDATOR, TRUSTEE, DATA, BLS_KEY, BLS_KEY_PROOF
from plenum.common.exceptions import RequestNackedException, RequestRejectedException, CommonSdkIOException, \
PoolLedgerTimeoutException
from plenum.common.messages.node_messages import Reply, PrePrepare, Prepare, Commit
from plenum.common.txn_util import get_req_id, get_from, get_payload_data
from plenum.common.types import f, OPERATION
from plenum.common.util import getNoInstances, get_utc_epoch
from plenum.common.config_helper import PNodeConfigHelper
from plenum.common.request import Request
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.node import Node
from plenum.test import waits
from plenum.test.constants import BUY
from plenum.test.msgs import randomMsg
from plenum.test.spy_helpers import getLastClientReqReceivedForNode, getAllArgs, getAllReturnVals, \
getAllMsgReceivedForNode
from plenum.test.test_node import TestNode, TestReplica, \
getPrimaryReplica, getNonPrimaryReplicas
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventuallyAll, eventually
from stp_core.loop.looper import Looper
from stp_core.network.util import checkPortAvailable
logger = getlogger()
# noinspection PyUnresolvedReferences
def ordinal(n):
return "%d%s" % (
n, "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
def random_string(length: int) -> str:
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def send_reqs_batches_and_get_suff_replies(
looper: Looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
num_reqs: int,
num_batches=1,
**kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs)
else:
requests = []
for _ in range(num_batches - 1):
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs // num_batches))
rem = num_reqs % num_batches
if rem == 0:
rem = num_reqs // num_batches
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, rem))
return requests
# noinspection PyIncorrectDocstring
def checkResponseCorrectnessFromNodes(receivedMsgs: Iterable, reqId: int,
fValue: int) -> bool:
"""
the client must get at least :math:`f+1` responses
"""
msgs = [(msg[f.RESULT.nm][f.REQ_ID.nm], msg[f.RESULT.nm][f.IDENTIFIER.nm])
for msg in getRepliesFromClientInbox(receivedMsgs, reqId)]
groupedMsgs = {}
for tpl in msgs:
groupedMsgs[tpl] = groupedMsgs.get(tpl, 0) + 1
assert max(groupedMsgs.values()) >= fValue + 1
def getRepliesFromClientInbox(inbox, reqId) -> list:
return list({_: msg for msg, _ in inbox if
msg[OP_FIELD_NAME] == REPLY and msg[f.RESULT.nm]
[f.REQ_ID.nm] == reqId}.values())
def checkLastClientReqForNode(node: TestNode, expectedRequest: Request):
recvRequest = getLastClientReqReceivedForNode(node)
assert recvRequest
assert expectedRequest.as_dict == recvRequest.as_dict
# noinspection PyIncorrectDocstring
def assertLength(collection: Iterable[Any], expectedLength: int):
assert len(
collection) == expectedLength, "Observed length was {} but " \
"expected length was {}". \
format(len(collection), expectedLength)
def assertEquality(observed: Any, expected: Any, details=None):
assert observed == expected, "Observed value was {} but expected value " \
"was {}, details: {}".format(observed, expected, details)
def randomOperation():
return {
"type": BUY,
"amount": random.randint(10, 100000)
}
def random_requests(count):
return [randomOperation() for _ in range(count)]
def random_request_objects(count, protocol_version):
req_dicts = random_requests(count)
return [Request(operation=op, protocolVersion=protocol_version) for op in req_dicts]
def buildCompletedTxnFromReply(request, reply: Reply) -> Dict:
txn = request.operation
txn.update(reply)
return txn
async def msgAll(nodes):
# test sending messages from every node to every other node
# TODO split send and check so that the messages can be sent concurrently
for p in permutations(nodes, 2):
await sendMessageAndCheckDelivery(p[0], p[1])
def sendMessage(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None):
"""
Sends message from one node to another
:param nodes:
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:return:
"""
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
async def sendMessageAndCheckDelivery(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
"""
Sends message from one node to another and checks that it was delivered
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:param customTimeout:
:return:
"""
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
timeout = customTimeout or waits.expectedNodeToNodeMessageDeliveryTime()
await eventually(checkMessageReceived, msg, reciever, method,
retryWait=.1,
timeout=timeout,
ratchetSteps=10)
def sendMessageToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None):
"""
Sends message from one node to all others
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:return:
"""
for node in nodes:
if node != sender:
sendMessage(sender, node, msg)
async def sendMessageAndCheckDeliveryToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
"""
Sends message from one node to all other and checks that it was delivered
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:param customTimeout:
:return:
"""
customTimeout = customTimeout or waits.expectedNodeToAllNodesMessageDeliveryTime(
len(nodes))
for node in nodes:
if node != sender:
await sendMessageAndCheckDelivery(sender, node, msg, method, customTimeout)
break
def checkMessageReceived(msg, receiver, method: str = None):
allMsgs = getAllMsgReceivedForNode(receiver, method)
assert msg in allMsgs
def addNodeBack(node_set,
looper: Looper,
node: Node,
tconf,
tdir) -> TestNode:
config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
restartedNode = TestNode(node.name,
config_helper=config_helper,
config=tconf,
ha=node.nodestack.ha,
cliha=node.clientstack.ha)
node_set.append(restartedNode)
looper.add(restartedNode)
return restartedNode
def checkPropagateReqCountOfNode(node: TestNode, digest: str):
assert digest in node.requests
assert node.quorums.propagate.is_reached(
len(node.requests[digest].propagates))
def requestReturnedToNode(node: TestNode, key: str,
instId: int):
params = getAllArgs(node, node.processOrdered)
# Skipping the view no and time from each ordered request
recvdOrderedReqs = [
(p['ordered'].instId, p['ordered'].valid_reqIdr[0]) for p in params]
expected = (instId, key)
return expected in recvdOrderedReqs
def checkRequestReturnedToNode(node: TestNode, key: str,
instId: int):
assert requestReturnedToNode(node, key, instId)
def checkRequestNotReturnedToNode(node: TestNode, key: str,
instId: int):
assert not requestReturnedToNode(node, key, instId)
def check_request_is_not_returned_to_nodes(txnPoolNodeSet, request):
instances = range(getNoInstances(len(txnPoolNodeSet)))
for node, inst_id in itertools.product(txnPoolNodeSet, instances):
checkRequestNotReturnedToNode(node,
request.key,
inst_id)
def checkPrePrepareReqSent(replica: TestReplica, req: Request):
prePreparesSent = getAllArgs(replica._ordering_service,
replica._ordering_service.send_pre_prepare)
assert (req.digest,) in \
[p["ppReq"].reqIdr for p in prePreparesSent]
def checkPrePrepareReqRecvd(replicas: Iterable[TestReplica],
expectedRequest: PrePrepare):
for replica in replicas:
params = getAllArgs(replica._ordering_service, replica._ordering_service._can_process_pre_prepare)
assert expectedRequest.reqIdr in [p['pre_prepare'].reqIdr for p in params]
def checkPrepareReqSent(replica: TestReplica, key: str,
view_no: int):
paramsList = getAllArgs(replica._ordering_service, replica._ordering_service._can_prepare)
rv = getAllReturnVals(replica._ordering_service,
replica._ordering_service._can_prepare)
args = [p["ppReq"].reqIdr for p in paramsList if p["ppReq"].viewNo == view_no]
assert (key,) in args
idx = args.index((key,))
assert rv[idx]
def checkSufficientPrepareReqRecvd(replica: TestReplica, viewNo: int,
ppSeqNo: int):
key = (viewNo, ppSeqNo)
assert key in replica._ordering_service.prepares
assert len(replica._ordering_service.prepares[key][1]) >= replica.quorums.prepare.value
def checkSufficientCommitReqRecvd(replicas: Iterable[TestReplica], viewNo: int,
ppSeqNo: int):
for replica in replicas:
key = (viewNo, ppSeqNo)
assert key in replica._ordering_service.commits
received = len(replica._ordering_service.commits[key][1])
minimum = replica.quorums.commit.value
assert received > minimum
def checkViewNoForNodes(nodes: Iterable[TestNode], expectedViewNo: int = None):
"""
Checks if all the given nodes have the expected view no
:param nodes: The nodes to check for
:param expectedViewNo: the view no that the nodes are expected to have
:return:
"""
viewNos = set()
for node in nodes:
logger.debug("{}'s view no is {}".format(node, node.master_replica.viewNo))
viewNos.add(node.master_replica.viewNo)
assert len(viewNos) == 1, 'Expected 1, but got {}. ' \
'ViewNos: {}'.format(len(viewNos), [(n.name, n.master_replica.viewNo) for n in nodes])
vNo, = viewNos
if expectedViewNo is not None:
assert vNo >= expectedViewNo, \
'Expected at least {}, but got {}'.format(expectedViewNo, vNo)
return vNo
def waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=None,
customTimeout=None):
"""
Waits for nodes to come to same view.
Raises exception when time is out
"""
timeout = customTimeout or waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
return looper.run(eventually(checkViewNoForNodes,
txnPoolNodeSet,
expectedViewNo,
timeout=timeout))
def getNodeSuspicions(node: TestNode, code: int = None):
params = getAllArgs(node, TestNode.reportSuspiciousNode)
if params and code is not None:
params = [param for param in params
if 'code' in param and param['code'] == code]
return params
def checkDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
last = p.spylog.getLastParams(p.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def checkMasterReplicaDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
stasher = p.master_replica.stasher
last = stasher.spylog.getLastParams(stasher.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def countDiscarded(processor, reasonPat):
c = 0
for entry in processor.spylog.getAll(processor.discard):
if 'reason' in entry.params and (
(isinstance(
entry.params['reason'],
str) and reasonPat in entry.params['reason']),
(reasonPat in str(
entry.params['reason']))):
c += 1
return c
def filterNodeSet(nodeSet, exclude: List[Union[str, Node]]):
"""
Return a set of nodes with the nodes in exclude removed.
:param nodeSet: the set of nodes
:param exclude: the list of nodes or node names to exclude
:return: the filtered nodeSet
"""
return [n for n in nodeSet
if n not in
[nodeSet[x] if isinstance(x, str) else x for x in exclude]]
def whitelistNode(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistNode(toWhitelist, *codes)
def whitelistClient(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistClient(toWhitelist, *codes)
def assertExp(condition):
assert condition
def assert_eq(actual, expected):
assert actual == expected
def assert_in(value, collection):
assert value in collection
def assertFunc(func):
assert func()
def checkLedgerEquality(ledger1, ledger2):
assertLength(ledger1, ledger2.size)
assertEquality(ledger1.root_hash, ledger2.root_hash)
assertEquality(ledger1.uncommitted_root_hash, ledger2.uncommitted_root_hash)
def checkAllLedgersEqual(*ledgers):
for l1, l2 in combinations(ledgers, 2):
checkLedgerEquality(l1, l2)
def checkStateEquality(state1, state2):
if state1 is None:
return state2 is None
assertEquality(state1.as_dict, state2.as_dict)
assertEquality(state1.committedHeadHash, state2.committedHeadHash)
assertEquality(state1.committedHead, state2.committedHead)
def check_seqno_db_equality(db1, db2):
if db1._keyValueStorage._db is None or db2._keyValueStorage._db is None:
return False
assert db1.size == db2.size, \
"{} != {}".format(db1.size, db2.size)
assert {bytes(k): bytes(v) for k, v in db1._keyValueStorage.iterator()} == \
{bytes(k): bytes(v) for k, v in db2._keyValueStorage.iterator()}
def check_primaries_equality(node1, node2):
assert node1.primaries == node2.primaries, \
"{} != {}, Node1: {}; Node2: {}".format(node1.primaries, node2.primaries, node1, node2)
def check_last_ordered_3pc(node1, node2):
master_replica_1 = node1.master_replica
master_replica_2 = node2.master_replica
assert master_replica_1.last_ordered_3pc == master_replica_2.last_ordered_3pc, \
"{} != {} Node1: {}, Node2: {}".format(master_replica_1.last_ordered_3pc,
master_replica_2.last_ordered_3pc,
node1, node2)
return master_replica_1.last_ordered_3pc
def check_last_ordered_3pc_backup(node1, node2):
assert len(node1.replicas) == len(node2.replicas)
for i in range(1, len(node1.replicas)):
replica1 = node1.replicas[i]
replica2 = node2.replicas[i]
assert replica1.last_ordered_3pc == replica2.last_ordered_3pc, \
"{}: {} != {}: {}".format(replica1, replica1.last_ordered_3pc,
replica2, replica2.last_ordered_3pc)
def check_view_no(node1, node2):
assert node1.master_replica.viewNo == node2.master_replica.viewNo, \
"{} != {}".format(node1.master_replica.viewNo, node2.master_replica.viewNo)
def check_last_ordered_3pc_on_all_replicas(nodes, last_ordered_3pc):
for n in nodes:
for r in n.replicas.values():
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}, Replica: {}".format(r.last_ordered_3pc,
last_ordered_3pc, r)
def check_last_ordered_3pc_on_master(nodes, last_ordered_3pc):
for n in nodes:
assert n.master_replica.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(n.master_replica.last_ordered_3pc,
last_ordered_3pc)
def check_last_ordered_3pc_on_backup(nodes, last_ordered_3pc):
for n in nodes:
for i, r in n.replicas.items():
if i != 0:
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(r.last_ordered_3pc,
last_ordered_3pc)
def randomText(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
def mockGetInstalledDistributions(packages):
ret = []
for pkg in packages:
obj = type('', (), {})()
obj.key = pkg
ret.append(obj)
return ret
def mockImportModule(moduleName):
obj = type(moduleName, (), {})()
obj.send_message = lambda *args: None
return obj
def initDirWithGenesisTxns(
dirName,
tconf,
tdirWithPoolTxns=None,
tdirWithDomainTxns=None,
new_pool_txn_file=None,
new_domain_txn_file=None):
os.makedirs(dirName, exist_ok=True)
if tdirWithPoolTxns:
new_pool_txn_file = new_pool_txn_file or tconf.poolTransactionsFile
copyfile(
os.path.join(
tdirWithPoolTxns, genesis_txn_file(
tconf.poolTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_pool_txn_file)))
if tdirWithDomainTxns:
new_domain_txn_file = new_domain_txn_file or tconf.domainTransactionsFile
copyfile(
os.path.join(
tdirWithDomainTxns, genesis_txn_file(
tconf.domainTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_domain_txn_file)))
def stopNodes(nodes: List[TestNode], looper=None, ensurePortsFreedUp=True):
if ensurePortsFreedUp:
assert looper, 'Need a looper to make sure ports are freed up'
for node in nodes:
node.stop()
if ensurePortsFreedUp:
ports = [[n.nodestack.ha[1], n.clientstack.ha[1]] for n in nodes]
waitUntilPortIsAvailable(looper, ports)
def waitUntilPortIsAvailable(looper, ports, timeout=5):
ports = itertools.chain(*ports)
def chk():
for port in ports:
checkPortAvailable(("", port))
looper.run(eventually(chk, retryWait=.5, timeout=timeout))
def run_script(script, *args):
s = os.path.join(os.path.dirname(__file__), '../../scripts/' + script)
command = [executable, s]
command.extend(args)
with Popen([executable, s]) as p:
sleep(4)
p.send_signal(SIGINT)
p.wait(timeout=1)
assert p.poll() == 0, 'script failed'
def viewNoForNodes(nodes):
viewNos = {node.viewNo for node in nodes}
assert 1 == len(viewNos)
return next(iter(viewNos))
def primaryNodeNameForInstance(nodes, instanceId):
primaryNames = {node.replicas[instanceId].primaryName for node in nodes}
assert 1 == len(primaryNames)
primaryReplicaName = next(iter(primaryNames))
return primaryReplicaName[:-2]
def nodeByName(nodes, name):
for node in nodes:
if node.name == name:
return node
raise Exception("Node with the name '{}' has not been found.".format(name))
def send_pre_prepare(view_no, pp_seq_no, nodes,
state_root=None, txn_root=None):
pre_prepare = PrePrepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
["requests digest"],
0,
"random digest",
DOMAIN_LEDGER_ID,
state_root or '0' * 44,
txn_root or '0' * 44,
0,
True
)
primary_node = getPrimaryReplica(nodes).node
non_primary_nodes = set(nodes) - {primary_node}
sendMessageToAll(nodes, primary_node, pre_prepare)
for non_primary_node in non_primary_nodes:
sendMessageToAll(nodes, non_primary_node, pre_prepare)
def send_prepare(view_no, pp_seq_no, nodes, state_root=None, txn_root=None):
prepare = Prepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root or '0' * 44,
txn_root or '0' * 44
)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, prepare)
def send_commit(view_no, pp_seq_no, nodes):
commit = Commit(
0,
view_no,
pp_seq_no)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, commit)
def get_key_from_req(req: dict):
return Request(identifier=req[f.IDENTIFIER.nm],
reqId=req[f.REQ_ID.nm],
operation=req[OPERATION],
protocolVersion=req[f.PROTOCOL_VERSION.nm],
signature=req.get(f.SIG.nm),
taaAcceptance=req.get(f.TAA_ACCEPTANCE)
).key
def chk_all_funcs(looper, funcs, acceptable_fails=0, retry_wait=None,
timeout=None, override_eventually_timeout=False):
# TODO: Move this logic to eventuallyAll
def chk():
fails = 0
last_ex = None
for func in funcs:
try:
func()
except Exception as ex:
fails += 1
if fails >= acceptable_fails:
logger.debug('Too many fails, the last one: {}'.format(repr(ex)))
last_ex = ex
assert fails <= acceptable_fails, '{} out of {} failed. Last exception:' \
' {}'.format(fails, len(funcs), last_ex)
kwargs = {}
if retry_wait:
kwargs['retryWait'] = retry_wait
if timeout:
kwargs['timeout'] = timeout
if override_eventually_timeout:
kwargs['override_timeout_limit'] = override_eventually_timeout
looper.run(eventually(chk, **kwargs))
def check_request_ordered(node, request: Request):
# it's ok to iterate through all txns since this is a test
for seq_no, txn in node.domainLedger.getAllTxn():
if get_req_id(txn) is None:
continue
if get_from(txn) is None:
continue
if get_req_id(txn) != request.reqId:
continue
if get_from(txn) != request.identifier:
continue
return True
raise ValueError('{} request not ordered by node {}'.format(request, node.name))
def wait_for_requests_ordered(looper, nodes, requests):
node_count = len(nodes)
timeout_per_request = waits.expectedTransactionExecutionTime(node_count)
total_timeout = (1 + len(requests) / 10) * timeout_per_request
coros = [partial(check_request_ordered,
node,
request)
for (node, request) in list(itertools.product(nodes, requests))]
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=total_timeout))
def create_new_test_node(test_node_class, node_config_helper_class, name, conf,
tdir, plugin_paths, bootstrap_cls=None,
node_ha=None, client_ha=None):
config_helper = node_config_helper_class(name, conf, chroot=tdir)
return test_node_class(name,
config_helper=config_helper,
config=conf,
pluginPaths=plugin_paths,
ha=node_ha,
cliha=client_ha,
bootstrap_cls=bootstrap_cls)
# ####### SDK
def sdk_gen_request(operation, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=None, **kwargs):
# Question: Why this method is called sdk_gen_request? It does not use
# the indy-sdk
return Request(operation=operation, reqId=random.randint(10, 1000000000),
protocolVersion=protocol_version, identifier=identifier,
**kwargs)
def sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did):
_, new_steward_did = sdk_wallet_new_steward
node_ip = '{}.{}.{}.{}'.format(
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240))
data = {
'alias': node_alias,
'client_port': 50001,
'node_port': 50002,
'node_ip': node_ip,
'client_ip': node_ip,
'services': []
}
req = looper.loop.run_until_complete(
build_node_request(new_steward_did, node_did, json.dumps(data)))
return Request(**json.loads(req))
def sdk_random_request_objects(count, protocol_version, identifier=None,
**kwargs):
ops = random_requests(count)
return [sdk_gen_request(op, protocol_version=protocol_version,
identifier=identifier, **kwargs) for op in ops]
def sdk_sign_request_objects(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req.as_dict) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_multi_sign_request_objects(looper, sdk_wallets, reqs: Sequence):
reqs_str = [json.dumps(req.as_dict) for req in reqs]
for sdk_wallet in sdk_wallets:
wallet_h, did = sdk_wallet
reqs_str = [looper.loop.run_until_complete(multi_sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs_str
def sdk_sign_request_strings(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_multisign_request_object(looper, sdk_wallet, req):
wh, did = sdk_wallet
return looper.loop.run_until_complete(multi_sign_request(wh, did, req))
def sdk_multisign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None):
wh, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did,
taaAcceptance=taa_acceptance,
endorser=endorser)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(multi_sign_request(wh, did, req_str))
return json.loads(resp)
def sdk_signed_random_requests(looper, sdk_wallet, count):
_, did = sdk_wallet
reqs_obj = sdk_random_request_objects(count, identifier=did,
protocol_version=CURRENT_PROTOCOL_VERSION)
return sdk_sign_request_objects(looper, sdk_wallet, reqs_obj)
def sdk_send_signed_requests(pool_h, signed_reqs: Sequence):
return [(json.loads(req),
asyncio.ensure_future(submit_request(pool_h, req)))
for req in signed_reqs]
def sdk_send_random_requests(looper, pool_h, sdk_wallet, count: int):
reqs = sdk_signed_random_requests(looper, sdk_wallet, count)
return sdk_send_signed_requests(pool_h, reqs)
def sdk_send_random_request(looper, pool_h, sdk_wallet):
rets = sdk_send_random_requests(looper, pool_h, sdk_wallet, 1)
return rets[0]
def sdk_send_random_pool_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
reqs = [sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did) for _ in range(count)]
return [sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req) for req in reqs]
def sdk_send_random_pool_and_domain_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
req_gens = [
lambda: sdk_gen_request(random_requests(1)[0], identifier=sdk_wallet_new_steward[1]),
lambda: sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did),
]
res = []
for i in range(count):
req = req_gens[i % len(req_gens)]()
res.append(sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req))
looper.runFor(0.1) # Give nodes some time to start ordering, so that requests are really alternating
return res
def sdk_sign_and_submit_req(pool_handle, sdk_wallet, req):
wallet_handle, sender_did = sdk_wallet
return json.loads(req), asyncio.ensure_future(
sign_and_submit_request(pool_handle, wallet_handle, sender_did, req))
def sdk_sign_and_submit_req_obj(looper, pool_handle, sdk_wallet, req_obj):
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_sign_and_submit_op(looper, pool_handle, sdk_wallet, op):
_, did = sdk_wallet
req_obj = sdk_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=did)
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_get_reply(looper, sdk_req_resp, timeout=None):
req_json, resp_task = sdk_req_resp
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
try:
resp = looper.run(asyncio.wait_for(resp_task, timeout=timeout))
resp = json.loads(resp)
except IndyError as e:
resp = e.error_code
except TimeoutError as e:
resp = ErrorCode.PoolLedgerTimeout
return req_json, resp
# TODO: Check places where sdk_get_replies used without sdk_check_reply
# We need to be sure that test behaviour don't need to check response
# validity
def sdk_get_replies(looper, sdk_req_resp: Sequence, timeout=None):
resp_tasks = [resp for _, resp in sdk_req_resp]
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
def get_res(task, done_list):
if task in done_list:
try:
resp = json.loads(task.result())
except IndyError as e:
resp = e.error_code
else:
resp = ErrorCode.PoolLedgerTimeout
return resp
done, pending = looper.run(asyncio.wait(resp_tasks, timeout=timeout))
if pending:
for task in pending:
task.cancel()
ret = [(req, get_res(resp, done)) for req, resp in sdk_req_resp]
return ret
def sdk_check_reply(req_res):
req, res = req_res
if isinstance(res, ErrorCode):
if res == ErrorCode.PoolLedgerTimeout:
raise PoolLedgerTimeoutException('Got PoolLedgerTimeout for request {}'
.format(req))
else:
raise CommonSdkIOException('Got an error with code {} for request {}'
.format(res, req))
if not isinstance(res, dict):
raise CommonSdkIOException("Unexpected response format {}".format(res))
def _parse_op(res_dict):
if res_dict['op'] == REQNACK:
raise RequestNackedException('ReqNack of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if res_dict['op'] == REJECT:
raise RequestRejectedException('Reject of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if 'op' in res:
_parse_op(res)
else:
for resps in res.values():
if isinstance(resps, str):
_parse_op(json.loads(resps))
elif isinstance(resps, dict):
_parse_op(resps)
else:
raise CommonSdkIOException("Unexpected response format {}".format(res))
def sdk_get_and_check_replies(looper, sdk_req_resp: Sequence, timeout=None):
rets = []
for req_res in sdk_get_replies(looper, sdk_req_resp, timeout):
sdk_check_reply(req_res)
rets.append(req_res)
return rets
def sdk_eval_timeout(req_count: int, node_count: int,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0):
timeout_per_request = customTimeoutPerReq or waits.expectedTransactionExecutionTime(node_count)
timeout_per_request += add_delay_to_timeout
# here we try to take into account what timeout for execution
# N request - total_timeout should be in
# timeout_per_request < total_timeout < timeout_per_request * N
# we cannot just take (timeout_per_request * N) because it is so huge.
# (for timeout_per_request=5 and N=10, total_timeout=50sec)
# lets start with some simple formula:
return (1 + req_count / 10) * timeout_per_request
def sdk_send_and_check(signed_reqs, looper, txnPoolNodeSet, pool_h, timeout=None):
if not timeout:
timeout = sdk_eval_timeout(len(signed_reqs), len(txnPoolNodeSet))
results = sdk_send_signed_requests(pool_h, signed_reqs)
sdk_replies = sdk_get_replies(looper, results, timeout=timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, count,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0,
override_timeout_limit=False, total_timeout=None):
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, count)
if not total_timeout:
total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet),
customTimeoutPerReq=customTimeoutPerReq,
add_delay_to_timeout=add_delay_to_timeout)
sdk_replies = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, **kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, num_reqs, **kwargs)
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_replies = []
for _ in range(num_batches - 1):
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_batch, **kwargs))
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_last_batch, **kwargs))
return sdk_replies
def sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, timeout=Max3PCBatchWait):
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, num_reqs)
looper.runFor(timeout)
return sdk_reqs
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_reqs = []
for _ in range(num_batches - 1):
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_batch))
looper.runFor(timeout)
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_last_batch))
looper.runFor(timeout)
return sdk_reqs
def sdk_sign_request_from_dict(looper, sdk_wallet, op, reqId=None, taa_acceptance=None, endorser=None):
wallet_h, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did,
taaAcceptance=taa_acceptance,
endorser=endorser)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(sign_request(wallet_h, did, req_str))
return json.loads(resp)
def sdk_check_request_is_not_returned_to_nodes(looper, nodeSet, request):
instances = range(getNoInstances(len(nodeSet)))
coros = []
for node, inst_id in itertools.product(nodeSet, instances):
c = partial(checkRequestNotReturnedToNode,
node=node,
identifier=request['identifier'],
reqId=request['reqId'],
instId=inst_id
)
coros.append(c)
timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=timeout))
def sdk_json_to_request_object(json_req):
return Request(identifier=json_req.get('identifier', None),
reqId=json_req['reqId'],
operation=json_req['operation'],
signature=json_req['signature'] if 'signature' in json_req else None,
protocolVersion=json_req['protocolVersion'] if 'protocolVersion' in json_req else None,
taaAcceptance=json_req.get('taaAcceptance', None))
def sdk_json_couples_to_request_list(json_couples):
req_list = []
for json_couple in json_couples:
req_list.append(sdk_json_to_request_object(json_couple[0]))
return req_list
def sdk_get_bad_response(looper, reqs, exception, message):
with pytest.raises(exception) as e:
sdk_get_and_check_replies(looper, reqs)
assert message in e._excinfo[1].args[0]
def sdk_set_protocol_version(looper, version=CURRENT_PROTOCOL_VERSION):
looper.loop.run_until_complete(set_protocol_version(version))
# Context managers to be used with tconf fixture
@contextmanager
def perf_monitor_disabled(tconf):
old_unsafe = tconf.unsafe.copy()
tconf.unsafe.add("disable_view_change")
yield tconf
tconf.unsafe = old_unsafe
@contextmanager
def view_change_timeout(tconf, vc_timeout, propose_timeout=None):
old_view_change_timeout = tconf.NEW_VIEW_TIMEOUT
old_propose_timeout = tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT
old_propagate_request_delay = tconf.PROPAGATE_REQUEST_DELAY
tconf.NEW_VIEW_TIMEOUT = vc_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = vc_timeout if propose_timeout is None else propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = 0
yield tconf
tconf.NEW_VIEW_TIMEOUT = old_view_change_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = old_propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = old_propagate_request_delay
@contextmanager
def max_3pc_batch_limits(tconf, size, wait=10000):
old_size = tconf.Max3PCBatchSize
old_wait = tconf.Max3PCBatchWait
tconf.Max3PCBatchSize = size
tconf.Max3PCBatchWait = wait
yield tconf
tconf.Max3PCBatchSize = old_size
tconf.Max3PCBatchWait = old_wait
@contextmanager
def freshness(tconf, enabled, timeout):
old_update_state = tconf.UPDATE_STATE_FRESHNESS
old_timeout = tconf.STATE_FRESHNESS_UPDATE_INTERVAL
tconf.UPDATE_STATE_FRESHNESS = enabled
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = timeout
yield tconf
tconf.UPDATE_STATE_FRESHNESS = old_update_state
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = old_timeout
@contextmanager
def primary_disconnection_time(tconf, value):
old_tolarate_disconnection = tconf.ToleratePrimaryDisconnection
tconf.ToleratePrimaryDisconnection = value
yield tconf
tconf.ToleratePrimaryDisconnection = old_tolarate_disconnection
@contextmanager
def acc_monitor(tconf, acc_monitor_enabled=True, acc_monitor_timeout=3, acc_monitor_delta=0):
old_timeout = tconf.ACC_MONITOR_TIMEOUT
old_delta = tconf.ACC_MONITOR_TXN_DELTA_K
old_acc_monitor_enabled = tconf.ACC_MONITOR_ENABLED
tconf.ACC_MONITOR_TIMEOUT = acc_monitor_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = acc_monitor_delta
tconf.ACC_MONITOR_ENABLED = acc_monitor_enabled
yield tconf
tconf.ACC_MONITOR_TIMEOUT = old_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = old_delta
tconf.ACC_MONITOR_ENABLED = old_acc_monitor_enabled
def create_pre_prepare_params(state_root,
ledger_id=DOMAIN_LEDGER_ID,
txn_root=None,
timestamp=None,
bls_multi_sig=None,
view_no=0,
pool_state_root=None,
pp_seq_no=0,
inst_id=0,
audit_txn_root=None,
reqs=None,
bls_multi_sigs=None):
if timestamp is None:
timestamp = get_utc_epoch()
req_idrs = [req.key for req in reqs] if reqs is not None else [random_string(32)]
digest = OrderingService.generate_pp_digest(req_idrs, view_no, timestamp)
params = [inst_id,
view_no,
pp_seq_no,
timestamp,
req_idrs,
init_discarded(0),
digest,
ledger_id,
state_root,
txn_root or '1' * 32,
0,
True,
pool_state_root or generate_state_root(),
audit_txn_root or generate_state_root()]
if bls_multi_sig:
# Pass None for backward compatibility
params.append(None)
params.append([bls_multi_sig.as_list()])
elif bls_multi_sigs is not None:
# Pass None for backward compatibility
params.append(None)
params.append([sig.as_list() for sig in bls_multi_sigs])
return params
def create_pre_prepare_no_bls(state_root, view_no=0, pool_state_root=None, pp_seq_no=0, inst_id=0, audit_txn_root=None):
params = create_pre_prepare_params(state_root=state_root,
view_no=view_no,
pool_state_root=pool_state_root,
pp_seq_no=pp_seq_no,
inst_id=inst_id,
audit_txn_root=audit_txn_root)
return PrePrepare(*params)
def create_commit_params(view_no, pp_seq_no, inst_id=0):
return [inst_id, view_no, pp_seq_no]
def create_commit_no_bls_sig(req_key, inst_id=0):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no, inst_id=inst_id)
return Commit(*params)
def create_commit_with_bls_sig(req_key, bls_sig):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
params.append(' ')
params.append({DOMAIN_LEDGER_ID: bls_sig})
return Commit(*params)
def create_commit_with_bls_sigs(req_key, bls_sig, lid):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
params.append(' ')
params.append({str(lid): bls_sig})
return Commit(*params)
def create_commit_bls_sig(bls_bft, req_key, pre_prepare):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
params = bls_bft.update_commit(params, pre_prepare)
return Commit(*params)
def create_prepare_params(view_no, pp_seq_no, state_root, inst_id=0):
return [inst_id,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root,
'1' * 32]
def create_prepare_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo,
pre_prepare.ppTime,
pre_prepare.digest,
pre_prepare.stateRootHash,
pre_prepare.txnRootHash,
pre_prepare.auditTxnRootHash]
return Prepare(*params)
def create_commit_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo]
return Commit(*params)
def create_prepare(req_key, state_root, inst_id=0):
view_no, pp_seq_no = req_key
params = create_prepare_params(view_no, pp_seq_no, state_root, inst_id=inst_id)
return Prepare(*params)
def generate_state_root():
return base58.b58encode(os.urandom(32)).decode("utf-8")
def init_discarded(value=None):
"""init discarded field with value and return message like representation"""
discarded = []
if value:
discarded.append(value)
return invalid_index_serializer.serialize(discarded, toBytes=False)
def incoming_3pc_msgs_count(nodes_count: int = 4) -> int:
pre_prepare = 1 # Message from Primary
prepares = nodes_count - 2 # Messages from all nodes exclude primary and self node
commits = nodes_count - 1 # Messages from all nodes exclude self node
# The primary node receives the same number of messages. Doesn't get pre-prepare,
# but gets one more prepare
return pre_prepare + prepares + commits
def check_missing_pre_prepares(nodes, count):
assert all(count <= len(replica._ordering_service.prePreparesPendingPrevPP)
for replica in getNonPrimaryReplicas(nodes, instId=0))
class MockTimestamp:
def __init__(self, value=datetime.utcnow()):
self.value = value
def __call__(self):
return self.value
class MockTimer(QueueTimer):
def __init__(self, start_time: int = 0):
self._ts = MockTimestamp(start_time)
QueueTimer.__init__(self, self._ts)
def set_time(self, value):
"""
Update time and run scheduled callbacks afterwards
"""
self._ts.value = value
self._log_time()
self.service()
def sleep(self, seconds):
"""
Simulate sleeping for given amount of seconds, and run scheduled callbacks afterwards
"""
self.set_time(self._ts.value + seconds)
def advance(self):
"""
Advance time to next scheduled callback and run that callback
"""
if not self._events:
return
event = self._pop_event()
self._ts.value = event.timestamp
self._log_time()
event.callback()
def advance_until(self, value):
"""
Advance time in steps until required value running scheduled callbacks in process
"""
while self._events and self._next_timestamp() <= value:
self.advance()
self._ts.value = value
def run_for(self, seconds):
"""
Simulate running for given amount of seconds, running scheduled callbacks at required timestamps
"""
self.advance_until(self._ts.value + seconds)
def wait_for(self, condition: Callable[[], bool], timeout: Optional = None, max_iterations: int = 10000):
"""
Advance time in steps until condition is reached, running scheduled callbacks in process
Throws TimeoutError if fail to reach condition (under required timeout if defined)
"""
counter = 0
deadline = self._ts.value + timeout if timeout else None
while self._events and not condition() and counter < max_iterations:
if deadline and self._next_timestamp() > deadline:
raise TimeoutError("Failed to reach condition in required time, {} iterations passed".format(counter))
self.advance()
counter += 1
if not condition():
if not self._events:
raise TimeoutError("Condition will be never reached, {} iterations passed".format(counter))
else:
raise TimeoutError("Failed to reach condition in {} iterations".format(max_iterations))
def run_to_completion(self, max_iterations: int = 10000):
"""
Advance time in steps until nothing is scheduled
"""
counter = 0
while self._events and counter < max_iterations:
self.advance()
counter += 1
if self._events:
raise TimeoutError("Failed to complete in {} iterations".format(max_iterations))
def _log_time(self):
# TODO: Probably better solution would be to replace real time in logs with virtual?
logger.info("Virtual time: {}".format(self._ts.value))
class TestStopwatch:
def __init__(self, timer: Optional[TimerService] = None):
self._get_current_time = timer.get_current_time if timer else perf_counter
self._start_time = self._get_current_time()
def start(self):
self._start_time = self._get_current_time()
def has_elapsed(self, expected_delay: float, tolerance: float = 0.1) -> bool:
elapsed = self._get_current_time() - self._start_time
return abs(expected_delay - elapsed) <= expected_delay * tolerance
class TestInternalBus(InternalBus):
def __init__(self):
super().__init__()
self.sent_messages = []
def send(self, message: Any, *args):
self.sent_messages.append(message)
super().send(message, *args)
class MockNetwork(ExternalBus):
def __init__(self):
super().__init__(self._send_message)
self.sent_messages = []
def _send_message(self, msg: Any, dst: ExternalBus.Destination):
self.sent_messages.append((msg, dst))
def connect(self, name: str):
self.update_connecteds(self.connecteds.union({name}))
def disconnect(self, name: str):
self.update_connecteds(self.connecteds.difference({name}))
def get_handler_by_type_wm(write_manager, h_type):
for h_l in write_manager.request_handlers.values():
for h in h_l:
if isinstance(h, h_type):
return h
def create_pool_txn_data(node_names: List[str],
crypto_factory: BlsFactoryCrypto,
get_free_port: Callable[[], int],
nodes_with_bls: Optional[int] = None):
nodeCount = len(node_names)
data = {'txns': [], 'seeds': {}, 'nodesWithBls': {}}
for i, node_name in zip(range(1, nodeCount + 1), node_names):
data['seeds'][node_name] = node_name + '0' * (32 - len(node_name))
steward_name = 'Steward' + str(i)
data['seeds'][steward_name] = steward_name + '0' * (32 - len(steward_name))
n_idr = SimpleSigner(seed=data['seeds'][node_name].encode()).identifier
s_idr = DidSigner(seed=data['seeds'][steward_name].encode())
data['txns'].append(
Member.nym_txn(nym=s_idr.identifier,
verkey=s_idr.verkey,
role=STEWARD,
name=steward_name,
seq_no=i)
)
node_txn = Steward.node_txn(steward_nym=s_idr.identifier,
node_name=node_name,
nym=n_idr,
ip='127.0.0.1',
node_port=get_free_port(),
client_port=get_free_port(),
client_ip='127.0.0.1',
services=[VALIDATOR],
seq_no=i)
if nodes_with_bls is None or i <= nodes_with_bls:
_, bls_key, bls_key_proof = crypto_factory.generate_bls_keys(
seed=data['seeds'][node_name])
get_payload_data(node_txn)[DATA][BLS_KEY] = bls_key
get_payload_data(node_txn)[DATA][BLS_KEY_PROOF] = bls_key_proof
data['nodesWithBls'][node_name] = True
data['txns'].append(node_txn)
# Add 4 Trustees
for i in range(4):
trustee_name = 'Trs' + str(i)
data['seeds'][trustee_name] = trustee_name + '0' * (
32 - len(trustee_name))
t_sgnr = DidSigner(seed=data['seeds'][trustee_name].encode())
data['txns'].append(
Member.nym_txn(nym=t_sgnr.identifier,
verkey=t_sgnr.verkey,
role=TRUSTEE,
name=trustee_name)
)
more_data_seeds = \
{
"Alice": "99999999999999999999999999999999",
"Jason": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"John": "dddddddddddddddddddddddddddddddd",
"Les": "ffffffffffffffffffffffffffffffff"
}
more_data_users = []
for more_name, more_seed in more_data_seeds.items():
signer = DidSigner(seed=more_seed.encode())
more_data_users.append(
Member.nym_txn(nym=signer.identifier,
verkey=signer.verkey,
name=more_name,
creator="5rArie7XKukPCaEwq5XGQJnM9Fc5aZE3M9HAPVfMU2xC")
)
data['txns'].extend(more_data_users)
data['seeds'].update(more_data_seeds)
return data
def get_pp_seq_no(nodes: list, inst_id=0) -> int:
los = set([n.replicas._replicas[inst_id].last_ordered_3pc[1] for n in nodes])
assert len(los) == 1
return los.pop()
| plenum/test/helper.py | 57,035 | Advance time to next scheduled callback and run that callback
Advance time in steps until required value running scheduled callbacks in process
the client must get at least :math:`f+1` responses
Checks if all the given nodes have the expected view no
:param nodes: The nodes to check for
:param expectedViewNo: the view no that the nodes are expected to have
:return:
Return a set of nodes with the nodes in exclude removed.
:param nodeSet: the set of nodes
:param exclude: the list of nodes or node names to exclude
:return: the filtered nodeSet
init discarded field with value and return message like representation
Simulate running for given amount of seconds, running scheduled callbacks at required timestamps
Advance time in steps until nothing is scheduled
Sends message from one node to another
:param nodes:
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:return:
Sends message from one node to all others
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:return:
Update time and run scheduled callbacks afterwards
Simulate sleeping for given amount of seconds, and run scheduled callbacks afterwards
Waits for nodes to come to same view.
Raises exception when time is out
Advance time in steps until condition is reached, running scheduled callbacks in process
Throws TimeoutError if fail to reach condition (under required timeout if defined)
noinspection PyUnresolvedReferences This method assumes that `num_reqs` <= num_batches*MaxbatchSize noinspection PyIncorrectDocstring noinspection PyIncorrectDocstring test sending messages from every node to every other node TODO split send and check so that the messages can be sent concurrently Skipping the view no and time from each ordered request TODO: Move this logic to eventuallyAll it's ok to iterate through all txns since this is a test SDK Question: Why this method is called sdk_gen_request? It does not use the indy-sdk Give nodes some time to start ordering, so that requests are really alternating TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside TODO: Check places where sdk_get_replies used without sdk_check_reply We need to be sure that test behaviour don't need to check response validity TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside here we try to take into account what timeout for execution N request - total_timeout should be in timeout_per_request < total_timeout < timeout_per_request * N we cannot just take (timeout_per_request * N) because it is so huge. (for timeout_per_request=5 and N=10, total_timeout=50sec) lets start with some simple formula: This method assumes that `num_reqs` <= num_batches*MaxbatchSize Context managers to be used with tconf fixture Pass None for backward compatibility Pass None for backward compatibility Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable Message from Primary Messages from all nodes exclude primary and self node Messages from all nodes exclude self node The primary node receives the same number of messages. Doesn't get pre-prepare, but gets one more prepare TODO: Probably better solution would be to replace real time in logs with virtual? Add 4 Trustees | 3,434 | en | 0.820913 |
# !/usr/bin/env python2
from math import pi, cos, sin, atan2, acos, sqrt, pow, radians, asin
from math_calc import *
from service_router import readPos
class LegConsts(object):
''' Class object to store characteristics of each leg '''
def __init__(self, x_off, y_off, z_off, ang_off, leg_nr):
self.x_off = x_off # X offset from body origin to first servo (mm)
self.y_off = y_off # Y offset from body origin to first servo (mm)
self.z_off = z_off # Z offset from body origin to first servo (mm)
self.ang_off = ang_off # Angular offset from body origin to first servo (mm)
self.f_ang_off = radians(13.33) # Angular offset of Femur
self.t_ang_off = radians(-25.90) # Angular offset of Tibia
self.c_len = 66.50 # Link length of Coxa (mm)
self.f_len = 144.40 # Link length of Femur (mm)
self.t_len = 287 # Link length of Tibia (mm)
self.leg_nr = leg_nr # Leg Number
class Kinematics(object):
''' Class object to compute various types of kinematics data for AntBot '''
# Origin to coxa: x_off, y_off, z_off, ang_off, name
leg1 = LegConsts(70.5, 122.225, -14.9, - pi / 3, "Leg 1")
leg2 = LegConsts(-70.5, 122.225, -14.9, -2 * pi / 3, "Leg 2")
leg3 = LegConsts(141.33, 0, -14.9, 0, "Leg 3")
leg4 = LegConsts(-141.33, 0, -14.9, pi, "Leg 4")
leg5 = LegConsts(70.5, -122.225, -14.9, pi / 3, "Leg 5")
leg6 = LegConsts(-70.5, -122.225, -14.9, 2 * pi / 3, "Leg 6")
leg_list = [leg1, leg2, leg3, leg4, leg5, leg6]
################
# Public methods
################
def doFkine(self, all_positions):
''' Function: computes forward kinematics
Parameter: all_positions: list with 18 values of servo positions in steps from ID1 to ID18
Return: ee_xyz: list of x,y,z coordinates for all 6 legs
servoPos: servo positions in radians
'''
servoPos = self.step_to_rad(all_positions)
ee_xyz = []
j = 0
for i in xrange(0, 16, 3):
ee_xyz.extend(self.calc_fkine(servoPos[i:i + 3], self.leg_list[j]))
j += 1
return ee_xyz, servoPos
def doIkine(self, all_positions, x, y, z, body_orient=None, leg=None, auto=None):
''' Function: computes inverse kinematics
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
x,y,z: desired change in x,y,z coordinates (same for all legs)
body_orient: list of 3 integers meaning alpha,beta,gamma rotation in degrees
leg: list with integers meaning leg numbers to compute inverse for them only
Return: list of 18 integers with servo steps
'''
ee_xyz, servoPos = self.doFkine(all_positions)
thetas = []
j = 0
if isinstance(leg, int):
leg = [leg]
elif isinstance(leg, tuple):
leg = list(leg)
elif isinstance(body_orient, tuple):
body_orient = list(body_orient)
if body_orient:
# Optional parameter. Compute inverse with body orientation
body_orient = [radians(d) for d in body_orient]
alpha_rad, beta_rad, gama_rad = body_orient[0], body_orient[1], body_orient[2]
x = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* cos(alpha_rad) - sin(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
y = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* sin(alpha_rad) + cos(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
z = -sin(beta_rad) * x + cos(beta_rad) * sin(gama_rad) * y + cos(beta_rad) * cos(gama_rad) * z
if leg:
# Optional parameter. Compute inverse for a specific leg/s.
for i in range(len(leg)):
j = leg[i] - 1
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[j * 3:j * 3 + 3], self.leg_list[j]))
else:
# Compute inverse for all legs if not leg specified.
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def doIkineRotationEuler(self, all_positions, alpha_rad, beta_rad, gama_rad, dist_x, dist_y, dist_z):
''' Function: computes inverse kinematics and body rotation (Parallel kinematics)
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
alpha,beta,gama: # for leg in range(6): # 6 legs
# if leg in leg_list:
# new_pos.extend(K.calc_ikine(x, y, z, ee_xyz[leg:leg + 3], K.leg_list[leg]))
# else:
# new_pos.append(current_pos[3 * leg])
# new_pos.append(current_pos[3 * leg + 1])
# new_pos.append(current_pos[3 * leg + 2])ers with servo steps
'''
final_eexyz, ee_xyz = self.calc_rot_matrix(all_positions, alpha_rad, beta_rad, gama_rad)
thetas = []
j = 0
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(final_eexyz[i] - dist_x, final_eexyz[i + 1] - dist_y, final_eexyz[i + 2] - dist_z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def printForward(self, all_positions):
''' Function: Prints x,y,z coordinates of each leg
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
'''
ee_list, theta_list = self.doFkine(all_positions)
RoundedCoords = ['%.4f' % elem for elem in ee_list]
print ""
print "X,Y,Z coordinates of Leg end-points: "
print " " + str(["X ", " Y ", " Z "])
print "Leg 1: " + str(RoundedCoords[0:3])
print "Leg 2: " + str(RoundedCoords[3:6])
print "Leg 3: " + str(RoundedCoords[6:9])
print "Leg 4: " + str(RoundedCoords[9:12])
print "Leg 5: " + str(RoundedCoords[12:15])
print "Leg 6: " + str(RoundedCoords[15:18])
print ""
def printInverse(self, all_positions, x, y, z):
''' Function: Prints servo positions, in radians, needed to reach the position
Parameters: theta_list: 18 servo positions in radians.
'''
theta_list = self.doIkine(all_positions, x, y, z)
RoundedThetas = ['%.4f' % elem for elem in theta_list]
print ""
print "Theta angles of each servo:"
print " " + str(["Coxa ", "Femur ", "Tibia"])
print "Leg 1: " + str(RoundedThetas[0:3])
print "Leg 2: " + str(RoundedThetas[3:6])
print "Leg 3: " + str(RoundedThetas[6:9])
print "Leg 4: " + str(RoundedThetas[9:12])
print "Leg 5: " + str(RoundedThetas[12:15])
print "Leg 6: " + str(RoundedThetas[15:18])
print ""
def printKinematics(self, all_positions, x, y, z):
self.printForward(all_positions)
self.printInverse(all_positions, x, y, z)
#################
# Private methods
#################
def calc_fkine(self, servoPos, leg):
theta1 = servoPos[0] - leg.ang_off
theta2 = servoPos[1] + leg.f_ang_off
theta3 = servoPos[2] + leg.t_ang_off
ee_z = leg.f_len * sin(theta2) + leg.t_len * sin(theta3 + theta2) + leg.z_off
ee_x = leg.x_off + cos(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
ee_y = leg.y_off + sin(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
return [ee_x, ee_y, ee_z]
def calc_ikine(self, x, y, z, ee_xyz, leg, auto=None):
init_X = ee_xyz[0]
init_Y = ee_xyz[1]
init_Z = ee_xyz[2]
X = init_X + (x) - leg.x_off
Y = init_Y + (y) - leg.y_off
Z = init_Z + (z) - leg.z_off
theta1 = atan2(Y, X) + leg.ang_off
if theta1 < -pi:
theta1 += 2 * pi
if theta1 > pi:
theta1 -= 2 * pi
new_x = cos(leg.ang_off) * X - sin(leg.ang_off) * Y
new_y = sin(leg.ang_off) * X + cos(leg.ang_off) * Y
final_x = cos(theta1) * new_x + sin(theta1) * new_y - leg.c_len
s = sqrt(pow(final_x, 2) + pow(Z, 2))
try:
t3_term = (-pow(s, 2) + pow(leg.f_len, 2) + pow(leg.t_len, 2)) / (2 * leg.f_len * leg.t_len)
t3 = pi - acos(t3_term)
except ValueError:
print "Cannot compute acos(", t3_term, ") for ", leg.leg_nr
if auto is None:
if t3_term < 0:
t3 = pi - acos(-0.99)
else:
t3 = pi - acos(0.99)
else:
return -1
theta3 = -t3 - leg.t_ang_off
theta2 = -(-atan2(Z, final_x) - atan2(leg.t_len * sin(t3), leg.f_len + leg.t_len * cos(t3)) + leg.f_ang_off)
if auto is not None:
if (theta2 > 1.8 or theta2 < -1.8) or (theta3 < -2.2 or theta3 > 2.2):
return -1
return [theta1, theta2, theta3]
def calc_rot_displacement(self, alpha_rad, beta_rad, gama_rad, ee_xyz):
pre_x = ee_xyz[0]
pre_y = ee_xyz[1]
pre_z = ee_xyz[2]
r_term1 = (cos(gama_rad) * sin(beta_rad) * pre_z + sin(gama_rad) * sin(beta_rad) * pre_y + pre_x * cos(beta_rad))
r_term2 = (cos(gama_rad) * pre_y - sin(gama_rad) * pre_z)
r_x = r_term1 * cos(alpha_rad) - r_term2 * sin(alpha_rad) - pre_x
r_y = r_term1 * sin(alpha_rad) + r_term2 * cos(alpha_rad) - pre_y
r_z = - sin(beta_rad) * pre_x + cos(beta_rad) * sin(gama_rad) * pre_y + cos(beta_rad) * cos(gama_rad) * pre_z - pre_z
return [r_x, r_y, r_z]
def calc_rot_matrix(self, all_positions, alpha_rad, beta_rad, gama_rad):
ee_xyz, servoPos = self.doFkine(all_positions)
rot_val_list = []
for i in xrange(0, 16, 3):
rot_val_list.extend(self.calc_rot_displacement(alpha_rad, beta_rad, gama_rad, ee_xyz[i:i + 3]))
return rot_val_list, ee_xyz
def rad_to_step(self, pos_rads):
return [i / pi * 2048 + 2048 for i in pos_rads]
def step_to_rad(self, pos_steps):
return [(((x / 2047.5) - 1) * pi) for x in pos_steps]
def make_poligonCorners(self, all_positions, leg_list):
if leg_list is int:
leg_list = [leg_list]
xyz_polygon = []
ee_xyz, servoPos = self.doFkine(all_positions)
newEe_xyz = [ee_xyz[0], ee_xyz[1], ee_xyz[2], ee_xyz[3], ee_xyz[4], ee_xyz[5],
ee_xyz[9], ee_xyz[10], ee_xyz[11], ee_xyz[15], ee_xyz[16], ee_xyz[17],
ee_xyz[12], ee_xyz[13], ee_xyz[14], ee_xyz[6], ee_xyz[7], ee_xyz[8]]
for i in range(len(leg_list)):
j = leg_list[i] - 1
xyz_polygon.extend((newEe_xyz[j * 3:j * 3 + 3]))
return xyz_polygon
def make_polygonLines(self, leg_list, ee_xyz):
print("leglistLins", leg_list)
line = []
for i in range(len(ee_xyz / 3)):
j = i - 1
line.extend = [ee_xyz[3 * j + 3] - ee_xyz[3 * j],
ee_xyz[3 * j + 4] - ee_xyz[3 * j + 1],
ee_xyz[3 * j + 5] - ee_xyz[3 * j + 2]]
return line
def check_stabilty(self, t_poly=None):
ee_xyz, servoPos = self.doFkine(readPos())
tac = [False, True, False, True, True, False]
leg_list = []
for i in range(len(tac)):
if tac[i] is True:
leg_list.extend([i + 1])
poly_lines, poly_points = self.make_polygonLines(leg_list, ee_xyz)
print("lines", poly_lines)
if tac[1] is True and tac[2] is True and tac[5]is True:
# gamma, beta = 10,20 #self.get_orientation(tac)
# n = [0,-sin(beta),cos(beta)]
print("im not here")
P1 = [ee_xyz[3], ee_xyz[4], 1]
P2 = [ee_xyz[6], ee_xyz[7], 1]
P3 = [ee_xyz[15], ee_xyz[16], 1]
print(P1, P2, P3)
elif tac[0] is True and tac[3] is True and tac[4] is True:
print("im here")
P1 = [ee_xyz[0], ee_xyz[1], 1]
P3 = [ee_xyz[9], ee_xyz[10], 1]
P2 = [ee_xyz[12], ee_xyz[13], 1]
print(P1, P2, P3)
k = 1 # dotProduct(n,P1)
x = 0
y = 1
z = 2
lambda_1 = ((P2[x] * P3[y] - P2[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_2 = -((P1[x] * P3[y] - P1[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_3 = ((P1[x] * P2[y] - P1[y] * P2[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
if lambda_1 > 0.1 and lambda_2 > 0.1 and lambda_3 > 0.1 and lambda_3 > 0.1:
if lambda_1 < 0.9 and lambda_2 < 0.9 and lambda_3 < 0.9:
if lambda_1 + lambda_2 + lambda_3 == 1:
inside = True
side1 = subtract(P1, P2)
side2 = subtract(P3, P2)
side3 = subtract(P1, P3)
G = [0, 0, 1]
P2_G = subtract(G, P2)
P3_G = subtract(G, P3)
margin_s1 = sqrt(pow(dotProduct(P2_G, unit_vec(side1)), 2) + dotProduct(P2_G, P2_G))
margin_s2 = sqrt(pow(dotProduct(P2_G, unit_vec(side2)), 2) + dotProduct(P2_G, P2_G))
margin_s3 = sqrt(pow(dotProduct(P3_G, unit_vec(side3)), 2) + dotProduct(P3_G, P3_G))
stability_margin = min(margin_s1, margin_s2, margin_s3)
print(stability_margin, inside)
return stability_margin, inside
def get_orientation(self, leg_list):
ee_xyz, servoPos = self.doFkine(readPos())
p1 = ee_xyz[3 * (leg_list[0] - 1):3 * (leg_list[0] - 1) + 3]
p2 = ee_xyz[3 * (leg_list[1] - 1):3 * (leg_list[1] - 1) + 3]
p3 = ee_xyz[3 * (leg_list[2] - 1):3 * (leg_list[2] - 1) + 3]
p21 = subtract(p2, p1)
p23 = subtract(p2, p3)
normz = crossProduct(p21, p23)
beta = atan2(normz[0], normz[2]) * 180 / pi
gamma = -atan2(normz[1], normz[2]) * 180 / pi
return gamma, beta
def calc_translationStairs(self, riser, climbed_stairs_front, climbed_stairs_rear):
# gamma, beta = self.get_orientation([1,5,6])
ee_xyz, servopos = self.doFkine(readPos())
dist_y = abs(ee_xyz[1] - ee_xyz[13])
riser_diff = (climbed_stairs_front - climbed_stairs_rear) * riser
omega = asin(riser_diff / dist_y) * 180 / pi
AB = -ee_xyz[14] + 30
AC = AB / cos(omega * pi / 180)
BC = AC * sin(omega * pi / 180)
BE = sqrt(pow(ee_xyz[12], 2) + pow(ee_xyz[11], 2)) - 141.33
CE = BE - BC
CD = BC * CE / AC
if AC + CD <= riser_diff:
trans_z_g = riser_diff - AC - CD + 10
translation_z = trans_z_g * cos(omega * pi / 180)
translation_y = trans_z_g * sin(omega * pi / 180)
else:
translation_z = 0
translation_y = 0
return [translation_z, translation_y]
| dns_main/src/kinematics.py | 15,667 | !/usr/bin/env python2 X offset from body origin to first servo (mm) Y offset from body origin to first servo (mm) Z offset from body origin to first servo (mm) Angular offset from body origin to first servo (mm) Angular offset of Femur Angular offset of Tibia Link length of Coxa (mm) Link length of Femur (mm) Link length of Tibia (mm) Leg Number Origin to coxa: x_off, y_off, z_off, ang_off, name Public methods Optional parameter. Compute inverse with body orientation Optional parameter. Compute inverse for a specific leg/s. Compute inverse for all legs if not leg specified. Private methods gamma, beta = 10,20 self.get_orientation(tac) n = [0,-sin(beta),cos(beta)] dotProduct(n,P1) gamma, beta = self.get_orientation([1,5,6]) | 733 | en | 0.597333 |
'''
Module containing python objects matching the ESGF database tables.
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship
Base = declarative_base()
ROLE_USER = 'user'
ROLE_PUBLISHER = 'publisher'
ROLE_ADMIN = 'admin'
ROLE_SUPERUSER = 'super'
class ESGFUser(Base):
""" Class that represents the 'esgf_security.user' table in the ESGF database."""
__tablename__ = 'user'
#__table_args__ = { 'autoload':True, 'schema':'esgf_security' }
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
firstname = Column(String)
middlename = Column(String)
lastname = Column(String)
email = Column(String)
username = Column(String)
password = Column(String)
dn = Column(String)
openid = Column(String)
organization = Column(String)
organization_type = Column(String)
city = Column(String)
state = Column(String)
country = Column(String)
status_code = Column(Integer)
verification_token = Column(String)
notification_code = Column(Integer)
class ESGFGroup(Base):
""" Class that represents the 'esgf_secitity.group' table in the ESGF database."""
__tablename__ = 'group'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
visible = Column(Boolean)
automatic_approval = Column(Boolean)
class ESGFRole(Base):
""" Class that represents the 'esgf_security.role' table in the ESGF database."""
__tablename__ = 'role'
__table_args__ = { 'schema':'esgf_security' }
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
class ESGFPermission(Base):
""" Class that represents the 'esgf_security.permission' table in the ESGF database."""
__tablename__ = 'permission'
__table_args__ = { 'schema':'esgf_security' }
user_id = Column(Integer, ForeignKey('esgf_security.user.id'), primary_key=True)
group_id = Column(Integer, ForeignKey('esgf_security.group.id'), primary_key=True)
role_id = Column(Integer, ForeignKey('esgf_security.role.id'), primary_key=True)
approved = Column(Boolean)
user = relationship("ESGFUser")
group = relationship("ESGFGroup")
role = relationship("ESGFRole")
| cog/plugins/esgf/objects.py | 2,461 | Class that represents the 'esgf_secitity.group' table in the ESGF database.
Class that represents the 'esgf_security.permission' table in the ESGF database.
Class that represents the 'esgf_security.role' table in the ESGF database.
Class that represents the 'esgf_security.user' table in the ESGF database.
Module containing python objects matching the ESGF database tables.
__table_args__ = { 'autoload':True, 'schema':'esgf_security' } | 438 | en | 0.456772 |
import logging
from typing import Iterable, Mapping, Optional, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common import on_policy_algorithm, vec_env
from imitation.data import types
from imitation.rewards import discrim_nets
from imitation.algorithms.adversarial import AdversarialTrainer
from .cnn_discriminator import ActObsCNN
class CNNGAIL(AdversarialTrainer):
def __init__(
self,
venv: vec_env.VecEnv,
expert_data: Union[Iterable[Mapping], types.Transitions],
expert_batch_size: int,
gen_algo: on_policy_algorithm.OnPolicyAlgorithm,
discrim=None,
*,
discrim_kwargs: Optional[Mapping] = None,
**kwargs,
):
"""Generative Adversarial Imitation Learning that accepts Image Obs
Most parameters are described in and passed to `AdversarialTrainer.__init__`.
Additional parameters that `CNNGAIL` adds on top of its superclass initializer are
as follows:
Args:
discrim_kwargs: Optional keyword arguments to use while constructing the
DiscrimNetGAIL.
"""
discrim_kwargs = discrim_kwargs or {}
if discrim == None:
discrim = discrim_nets.DiscrimNetGAIL(
venv.observation_space,
venv.action_space,
discrim_net=ActObsCNN,
**discrim_kwargs,
)
logging.info("using CNN GAIL")
super().__init__(
venv, gen_algo, discrim, expert_data, expert_batch_size, **kwargs
)
| cnn_modules/cnn_gail.py | 1,593 | Generative Adversarial Imitation Learning that accepts Image Obs
Most parameters are described in and passed to `AdversarialTrainer.__init__`.
Additional parameters that `CNNGAIL` adds on top of its superclass initializer are
as follows:
Args:
discrim_kwargs: Optional keyword arguments to use while constructing the
DiscrimNetGAIL. | 346 | en | 0.688433 |
# -*- coding: utf-8 -*-
#
# Review Heatmap Add-on for Anki
# Copyright (C) 2016-2019 Glutanimate <https://glutanimate.com>
#
# This file was automatically generated by Anki Add-on Builder v0.1.4
# It is subject to the same licensing terms as the rest of the program
# (see the LICENSE file which accompanies this program).
#
# WARNING! All changes made in this file will be lost!
"""
Initializes generated Qt forms/resources
"""
__all__ = [
"options",
"contrib"
]
from . import options
from . import contrib
| review_heatmap/gui/forms/anki21/__init__.py | 520 | Initializes generated Qt forms/resources
-*- coding: utf-8 -*- Review Heatmap Add-on for Anki Copyright (C) 2016-2019 Glutanimate <https://glutanimate.com> This file was automatically generated by Anki Add-on Builder v0.1.4 It is subject to the same licensing terms as the rest of the program (see the LICENSE file which accompanies this program). WARNING! All changes made in this file will be lost! | 403 | en | 0.877376 |
""" Orlov Module : workspace module fixture. """
import os
import logging
import pytest
from orlov.libs.workspace import Workspace
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def workspace(request) -> Workspace:
""" Workspace Factory Fixture.
Yields:
directory(Workspace): Workspace Created.
"""
logger.debug('Setup of test structure.')
# create screenshot directory
if request.config.getoption('workspace'):
result_dir = request.config.getoption('workspace')
else:
if not os.path.exists('result'):
logger.debug('Creating results folder to store results')
os.mkdir('result')
result_dir = os.path.join(os.getcwd(), 'result')
logger.debug('Created folder %s', result_dir)
yield Workspace(result_dir)
| orlov/libs/workspace/fixture.py | 822 | Workspace Factory Fixture.
Yields:
directory(Workspace): Workspace Created.
Orlov Module : workspace module fixture.
create screenshot directory | 152 | en | 0.596795 |
import dask
import dask.array as da
import numpy as np
import numpy.testing as npt
import pytest
import sklearn
import sklearn.linear_model
import sklearn.metrics
from dask.array.utils import assert_eq
import dask_ml.metrics
import dask_ml.wrappers
def test_pairwise_distances(X_blobs):
centers = X_blobs[::100].compute()
result = dask_ml.metrics.pairwise_distances(X_blobs, centers)
expected = sklearn.metrics.pairwise_distances(X_blobs.compute(), centers)
assert_eq(result, expected, atol=1e-4)
def test_pairwise_distances_argmin_min(X_blobs):
centers = X_blobs[::100].compute()
# X_blobs has 500 rows per block.
# Ensure 500 rows in the scikit-learn version too.
working_memory = float(80 * 500) / 2 ** 20
ctx = sklearn.config_context(working_memory=working_memory)
with ctx:
a_, b_ = sklearn.metrics.pairwise_distances_argmin_min(
X_blobs.compute(), centers
)
a, b = dask_ml.metrics.pairwise_distances_argmin_min(X_blobs, centers)
a, b = dask.compute(a, b)
npt.assert_array_equal(a, a_)
npt.assert_array_equal(b, b_)
def test_euclidean_distances():
X = da.random.uniform(size=(100, 4), chunks=50)
Y = da.random.uniform(size=(100, 4), chunks=50)
a = dask_ml.metrics.euclidean_distances(X, Y)
b = sklearn.metrics.euclidean_distances(X, Y)
assert_eq(a, b)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
a = dask_ml.metrics.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
b = sklearn.metrics.euclidean_distances(X, Y, X_norm_squared=x_norm_squared)
assert_eq(a, b)
y_norm_squared = (Y ** 2).sum(axis=1).compute()[np.newaxis, :]
a = dask_ml.metrics.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
b = sklearn.metrics.euclidean_distances(X, Y, Y_norm_squared=y_norm_squared)
assert_eq(a, b)
def test_euclidean_distances_same():
X = da.random.uniform(size=(100, 4), chunks=50)
a = dask_ml.metrics.euclidean_distances(X, X)
b = sklearn.metrics.euclidean_distances(X, X)
assert_eq(a, b, atol=1e-4)
a = dask_ml.metrics.euclidean_distances(X)
b = sklearn.metrics.euclidean_distances(X)
assert_eq(a, b, atol=1e-4)
x_norm_squared = (X ** 2).sum(axis=1).compute()[:, np.newaxis]
assert_eq(X, X, Y_norm_squared=x_norm_squared, atol=1e-4)
@pytest.mark.parametrize("kernel", ["linear", "polynomial", "rbf", "sigmoid"])
def test_pairwise_kernels(kernel):
X = da.random.uniform(size=(100, 4), chunks=(50, 4))
a = dask_ml.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
b = sklearn.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS[kernel]
r1 = a(X)
r2 = b(X.compute())
assert isinstance(X, da.Array)
assert_eq(r1, r2)
@pytest.mark.parametrize("sample_weight", [True, False])
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("labels", [[0, 1], [0, 1, 3], [1, 0]])
@pytest.mark.parametrize("daskify", [True, False])
def test_log_loss(labels, normalize, sample_weight, daskify):
n = 100
c = 25
y_true = np.random.choice(labels, size=n)
y_pred = np.random.uniform(size=(n, len(labels)))
y_pred /= y_pred.sum(1, keepdims=True)
if sample_weight:
sample_weight = np.random.uniform(size=n)
sample_weight /= sample_weight.sum()
dsample_weight = da.from_array(sample_weight, chunks=c)
else:
sample_weight = None
dsample_weight = None
if daskify:
dy_true = da.from_array(y_true, chunks=c)
dy_pred = da.from_array(y_pred, chunks=c)
else:
dy_true = y_true
dy_pred = y_pred
(dsample_weight,) = dask.compute(dsample_weight)
a = sklearn.metrics.log_loss(
y_true, y_pred, normalize=normalize, sample_weight=sample_weight
)
b = dask_ml.metrics.log_loss(
dy_true,
dy_pred,
labels=labels,
normalize=normalize,
sample_weight=dsample_weight,
)
assert_eq(a, b)
@pytest.mark.parametrize(
"yhat",
[
da.from_array(np.array([0.25, 0.25, 0.75, 0.75]), chunks=2),
da.from_array(np.array([0, 0, 1, 1]), chunks=2),
da.from_array(
np.array([[0.75, 0.25], [0.75, 0.25], [0.25, 0.75], [0.25, 0.75]]), chunks=2
),
],
)
def test_log_loss_shape(yhat):
y = da.from_array(np.array([0, 0, 1, 1]), chunks=2)
labels = [0, 1]
a = sklearn.metrics.log_loss(y, yhat)
b = dask_ml.metrics.log_loss(y, yhat, labels=labels)
assert_eq(a, b)
@pytest.mark.parametrize("y", [[0, 1, 1, 0], [0, 1, 2, 0]])
def test_log_loss_scoring(y):
# a_scorer = sklearn.metrics.get_scorer('neg_log_loss')
# b_scorer = dask_ml.metrics.get_scorer('neg_log_loss')
X = da.random.uniform(size=(4, 2), chunks=2)
labels = np.unique(y)
y = da.from_array(np.array(y), chunks=2)
a_scorer = sklearn.metrics.make_scorer(
sklearn.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
labels=labels,
)
b_scorer = sklearn.metrics.make_scorer(
dask_ml.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
labels=labels,
)
clf = dask_ml.wrappers.ParallelPostFit(
sklearn.linear_model.LogisticRegression(
n_jobs=1, solver="lbfgs", multi_class="auto"
)
)
clf.fit(*dask.compute(X, y))
result = b_scorer(clf, X, y)
expected = a_scorer(clf, *dask.compute(X, y))
assert_eq(result, expected)
| tests/metrics/test_metrics.py | 5,512 | X_blobs has 500 rows per block. Ensure 500 rows in the scikit-learn version too. a_scorer = sklearn.metrics.get_scorer('neg_log_loss') b_scorer = dask_ml.metrics.get_scorer('neg_log_loss') | 188 | en | 0.610655 |
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import logging
import tensorflow as tf
from tensorflow.contrib.rnn import MultiRNNCell, LSTMStateTuple
from tensorflow.python.framework import dtypes, tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.util import nest
from ludwig.models.modules.fully_connected_modules import fc_layer
from ludwig.models.modules.initializer_modules import get_initializer
from ludwig.models.modules.reduction_modules import reduce_sequence
from ludwig.utils.tf_utils import sequence_length_3D, sequence_length_2D
def get_cell_fun(cell_type):
if cell_type == 'rnn':
cell_fn = tf.nn.rnn_cell.BasicRNNCell
elif cell_type == 'lstm':
# allows for optional peephole connections and cell clipping
cell_fn = tf.nn.rnn_cell.LSTMCell
elif cell_type == 'lstm_block':
# Faster version of basic LSTM
cell_fn = tf.contrib.rnn.LSTMBlockCell
elif cell_type == 'lstm_ln':
cell_fn = tf.contrib.rnn.LayerNormBasicLSTMCell
elif cell_type == 'lstm_cudnn':
cell_fn = tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell
elif cell_type == 'gru':
cell_fn = tf.nn.rnn_cell.GRUCell
elif cell_type == 'gru_block':
# Faster version of GRU (25% faster in my tests)
cell_fn = tf.contrib.rnn.GRUBlockCell
elif cell_type == 'gru_cudnn':
# Faster version of GRU (25% faster in my tests)
cell_fn = tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell
else:
cell_fn = tf.nn.rnn_cell.BasicRNNCell
return cell_fn
class Projection(tf.layers.Layer):
def __init__(self, projection_weights, projection_biases, name=None,
**kwargs):
super(Projection, self).__init__(name=name, **kwargs)
self.projection_weights = projection_weights
self.projection_biases = projection_biases
def call(self, inputs, **kwargs):
inputs_shape = inputs.shape.as_list()
weights_shape = self.projection_weights.shape.as_list()
assert inputs_shape[-1] == weights_shape[0]
inputs = tf.reshape(inputs, [-1, inputs_shape[-1]])
outputs = tf.matmul(inputs, self.projection_weights)
if self.projection_biases is not None:
outputs = tf.nn.bias_add(outputs, self.projection_biases)
outputs_shape = inputs_shape
outputs_shape[0] = -1 # batch_size
outputs_shape[-1] = weights_shape[1]
outputs = tf.reshape(outputs, outputs_shape)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = input_shape
output_shape[-1] = self.projection_biases.shape.as_list()[0]
# output_shape = [input_shape[0], self.projection_biases.shape.as_list()[0]]
return tensor_shape.TensorShape(output_shape)
class BasicDecoderOutput(
collections.namedtuple('BasicDecoderOutput',
('rnn_output', 'sample_id', 'projection_input'))):
pass
class BasicDecoder(tf.contrib.seq2seq.BasicDecoder):
def _projection_input_size(self):
return self._cell.output_size
@property
def output_size(self):
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape,
projection_input=self._projection_input_size())
@property
def output_dtype(self):
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype,
nest.map_structure(lambda _: dtype, self._projection_input_size()))
def step(self, time, inputs, state, name=None):
with ops.name_scope(name, 'BasicDecoderStep', (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
projection_inputs = cell_outputs # get projection_inputs to compute sampled_softmax_cross_entropy_loss
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids,
projection_inputs)
return (outputs, next_state, next_inputs, finished)
class TimeseriesTrainingHelper(tf.contrib.seq2seq.TrainingHelper):
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, 'TrainingHelperSample', [time, outputs]):
return tf.zeros(tf.shape(outputs)[:-1], dtype=dtypes.int32)
class RecurrentStack:
def __init__(
self,
state_size=256,
cell_type='rnn',
num_layers=1,
bidirectional=False,
dropout=False,
regularize=True,
reduce_output='last',
**kwargs
):
self.state_size = state_size
self.cell_type = cell_type
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout
self.regularize = regularize
self.reduce_output = reduce_output
def __call__(
self,
input_sequence,
regularizer,
dropout_rate,
is_training=True
):
if not self.regularize:
regularizer = None
# Calculate the length of input_sequence and the batch size
sequence_length = sequence_length_3D(input_sequence)
# RNN cell
cell_fn = get_cell_fun(self.cell_type)
# initial state
# init_state = tf.get_variable(
# 'init_state',
# [1, state_size],
# initializer=tf.constant_initializer(0.0),
# )
# init_state = tf.tile(init_state, [batch_size, 1])
# main RNN operation
with tf.variable_scope('rnn_stack', reuse=tf.AUTO_REUSE,
regularizer=regularizer) as vs:
if self.bidirectional:
# forward direction cell
fw_cell = lambda state_size: cell_fn(state_size)
bw_cell = lambda state_size: cell_fn(state_size)
fw_cells = [fw_cell(self.state_size) for _ in
range(self.num_layers)]
bw_cells = [bw_cell(self.state_size) for _ in
range(self.num_layers)]
rnn_outputs, final_state_fw, final_state_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw=fw_cells,
cells_bw=bw_cells,
dtype=tf.float32,
sequence_length=sequence_length,
inputs=input_sequence
)
else:
cell = lambda state_size: cell_fn(state_size)
cells = MultiRNNCell(
[cell(self.state_size) for _ in range(self.num_layers)],
state_is_tuple=True)
rnn_outputs, final_state = tf.nn.dynamic_rnn(
cells,
input_sequence,
sequence_length=sequence_length,
dtype=tf.float32)
# initial_state=init_state)
for v in tf.global_variables():
if v.name.startswith(vs.name):
logging.debug(' {}: {}'.format(v.name, v))
logging.debug(' rnn_outputs: {0}'.format(rnn_outputs))
rnn_output = reduce_sequence(rnn_outputs, self.reduce_output)
logging.debug(' reduced_rnn_output: {0}'.format(rnn_output))
# dropout
if self.dropout and dropout_rate is not None:
rnn_output = tf.layers.dropout(
rnn_output,
rate=dropout_rate,
training=is_training
)
logging.debug(' dropout_rnn: {0}'.format(rnn_output))
return rnn_output, rnn_output.shape.as_list()[-1]
def recurrent_decoder(encoder_outputs, targets, max_sequence_length, vocab_size,
cell_type='rnn', state_size=256, embedding_size=50,
num_layers=1,
attention_mechanism=None, beam_width=1, projection=True,
tied_target_embeddings=True, embeddings=None,
initializer=None, regularizer=None,
is_timeseries=False):
with tf.variable_scope('rnn_decoder', reuse=tf.AUTO_REUSE,
regularizer=regularizer):
# ================ Setup ================
if beam_width > 1 and is_timeseries:
raise ValueError('Invalid beam_width: {}'.format(beam_width))
GO_SYMBOL = vocab_size
END_SYMBOL = 0
batch_size = tf.shape(encoder_outputs)[0]
# ================ Projection ================
# Project the encoder outputs to the size of the decoder state
encoder_outputs_size = encoder_outputs.shape[-1]
if projection and encoder_outputs_size != state_size:
with tf.variable_scope('projection'):
encoder_output_rank = len(encoder_outputs.shape)
if encoder_output_rank > 2:
sequence_length = tf.shape(encoder_outputs)[1]
encoder_outputs = tf.reshape(encoder_outputs,
[-1, encoder_outputs_size])
encoder_outputs = fc_layer(encoder_outputs,
encoder_outputs.shape[-1],
state_size,
activation=None,
initializer=initializer)
encoder_outputs = tf.reshape(encoder_outputs,
[-1, sequence_length,
state_size])
else:
encoder_outputs = fc_layer(encoder_outputs,
encoder_outputs.shape[-1],
state_size,
activation=None,
initializer=initializer)
# ================ Targets sequence ================
# Calculate the length of inputs and the batch size
with tf.variable_scope('sequence'):
targets_sequence_length = sequence_length_2D(targets)
start_tokens = tf.tile([GO_SYMBOL], [batch_size])
end_tokens = tf.tile([END_SYMBOL], [batch_size])
if is_timeseries:
start_tokens = tf.cast(start_tokens, tf.float32)
end_tokens = tf.cast(end_tokens, tf.float32)
targets_with_go = tf.concat([
tf.expand_dims(start_tokens, 1),
targets,
tf.expand_dims(end_tokens, 1)], 1)
logging.debug(' targets_with_go: {0}'.format(targets_with_go))
targets_sequence_length_with_eos = targets_sequence_length + 1 # the EOS symbol is 0 so it's not increasing the real length of the sequence
# ================ Embeddings ================
if is_timeseries:
targets_embedded = tf.expand_dims(targets_with_go, -1)
targets_embeddings = None
else:
with tf.variable_scope('embedding'):
if embeddings is not None:
embedding_size = embeddings.shape.as_list()[-1]
if tied_target_embeddings:
state_size = embedding_size
elif tied_target_embeddings:
embedding_size = state_size
if embeddings is not None:
embedding_go = tf.get_variable('embedding_GO',
initializer=tf.random_uniform(
[1, embedding_size],
-1.0, 1.0))
targets_embeddings = tf.concat([embeddings, embedding_go],
axis=0)
else:
initializer_obj = get_initializer(initializer)
targets_embeddings = tf.get_variable(
'embeddings',
initializer=initializer_obj(
[vocab_size + 1, embedding_size]),
regularizer=regularizer
)
logging.debug(
' targets_embeddings: {0}'.format(targets_embeddings))
targets_embedded = tf.nn.embedding_lookup(targets_embeddings,
targets_with_go,
name='decoder_input_embeddings')
logging.debug(' targets_embedded: {0}'.format(targets_embedded))
# ================ Class prediction ================
if tied_target_embeddings:
class_weights = tf.transpose(targets_embeddings)
else:
initializer_obj = get_initializer(initializer)
class_weights = tf.get_variable(
'class_weights',
initializer=initializer_obj([state_size, vocab_size + 1]),
regularizer=regularizer
)
logging.debug(' class_weights: {0}'.format(class_weights))
class_biases = tf.get_variable('class_biases', [vocab_size + 1])
logging.debug(' class_biases: {0}'.format(class_biases))
projection_layer = Projection(class_weights, class_biases)
# ================ RNN ================
initial_state = encoder_outputs
with tf.variable_scope('rnn_cells') as vs:
# Cell
cell_fun = get_cell_fun(cell_type)
if num_layers == 1:
cell = cell_fun(state_size)
if cell_type.startswith('lstm'):
initial_state = LSTMStateTuple(c=initial_state,
h=initial_state)
elif num_layers > 1:
cell = MultiRNNCell(
[cell_fun(state_size) for _ in range(num_layers)],
state_is_tuple=True)
if cell_type.startswith('lstm'):
initial_state = LSTMStateTuple(c=initial_state,
h=initial_state)
initial_state = tuple([initial_state] * num_layers)
else:
raise ValueError('num_layers in recurrent decoser: {}. '
'Number of layers in a recurrenct decoder cannot be <= 0'.format(
num_layers))
# Attention
if attention_mechanism is not None:
if attention_mechanism == 'bahdanau':
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=state_size, memory=encoder_outputs,
memory_sequence_length=sequence_length_3D(
encoder_outputs))
elif attention_mechanism == 'luong':
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units=state_size, memory=encoder_outputs,
memory_sequence_length=sequence_length_3D(
encoder_outputs))
else:
raise ValueError(
'Attention mechanism {} not supported'.format(
attention_mechanism))
cell = tf.contrib.seq2seq.AttentionWrapper(
cell, attention_mechanism, attention_layer_size=state_size)
initial_state = cell.zero_state(dtype=tf.float32,
batch_size=batch_size)
for v in tf.global_variables():
if v.name.startswith(vs.name):
logging.debug(' {}: {}'.format(v.name, v))
# ================ Decoding ================
def decode(initial_state, cell, helper, beam_width=1,
projection_layer=None):
# The decoder itself
if beam_width > 1:
# Tile inputs for beam search decoder
beam_initial_state = tf.contrib.seq2seq.tile_batch(
initial_state, beam_width)
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=cell,
embedding=targets_embeddings,
start_tokens=start_tokens,
end_token=END_SYMBOL,
initial_state=beam_initial_state,
beam_width=beam_width,
output_layer=projection_layer)
else:
decoder = BasicDecoder(
cell=cell, helper=helper,
initial_state=initial_state,
output_layer=projection_layer)
# The decoding operation
outputs = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
output_time_major=False,
impute_finished=False if beam_width > 1 else True,
maximum_iterations=max_sequence_length
)
return outputs
# ================ Decoding helpers ================
if is_timeseries:
train_helper = TimeseriesTrainingHelper(
inputs=targets_embedded,
sequence_length=targets_sequence_length_with_eos)
final_outputs_pred, final_state_pred, final_sequence_lengths_pred = decode(
initial_state,
cell,
train_helper,
projection_layer=projection_layer)
eval_logits = final_outputs_pred.rnn_output
train_logits = final_outputs_pred.projection_input
predictions_sequence = tf.reshape(eval_logits, [batch_size, -1])
predictions_sequence_length_with_eos = final_sequence_lengths_pred
else:
train_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=targets_embedded,
sequence_length=targets_sequence_length_with_eos)
final_outputs_train, final_state_train, final_sequence_lengths_train, = decode(
initial_state,
cell,
train_helper,
projection_layer=projection_layer)
eval_logits = final_outputs_train.rnn_output
train_logits = final_outputs_train.projection_input
# train_predictions = final_outputs_train.sample_id
pred_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=targets_embeddings,
start_tokens=start_tokens,
end_token=END_SYMBOL)
final_outputs_pred, final_state_pred, final_sequence_lengths_pred = decode(
initial_state,
cell,
pred_helper,
beam_width,
projection_layer=projection_layer)
if beam_width > 1:
predictions_sequence = final_outputs_pred.beam_search_decoder_output.predicted_ids[
:, :, 0]
# final_outputs_pred..predicted_ids[:,:,0] would work too, but it contains -1s for padding
predictions_sequence_scores = final_outputs_pred.beam_search_decoder_output.scores[
:, :, 0]
predictions_sequence_length_with_eos = final_sequence_lengths_pred[
:, 0]
else:
predictions_sequence = final_outputs_pred.sample_id
predictions_sequence_scores = final_outputs_pred.rnn_output
predictions_sequence_length_with_eos = final_sequence_lengths_pred
logging.debug(' train_logits: {0}'.format(train_logits))
logging.debug(' eval_logits: {0}'.format(eval_logits))
logging.debug(' predictions_sequence: {0}'.format(predictions_sequence))
logging.debug(' predictions_sequence_scores: {0}'.format(
predictions_sequence_scores))
return predictions_sequence, predictions_sequence_scores, predictions_sequence_length_with_eos, \
targets_sequence_length_with_eos, eval_logits, train_logits, class_weights, class_biases
| ludwig/models/modules/recurrent_modules.py | 22,124 | coding=utf-8 Copyright (c) 2019 Uber Technologies, Inc. Licensed under the Apache License, Version 2.0 (the 'License'); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== allows for optional peephole connections and cell clipping Faster version of basic LSTM Faster version of GRU (25% faster in my tests) Faster version of GRU (25% faster in my tests) batch_size output_shape = [input_shape[0], self.projection_biases.shape.as_list()[0]] get projection_inputs to compute sampled_softmax_cross_entropy_loss Calculate the length of input_sequence and the batch size RNN cell initial state init_state = tf.get_variable( 'init_state', [1, state_size], initializer=tf.constant_initializer(0.0), ) init_state = tf.tile(init_state, [batch_size, 1]) main RNN operation forward direction cell initial_state=init_state) dropout ================ Setup ================ ================ Projection ================ Project the encoder outputs to the size of the decoder state ================ Targets sequence ================ Calculate the length of inputs and the batch size the EOS symbol is 0 so it's not increasing the real length of the sequence ================ Embeddings ================ ================ Class prediction ================ ================ RNN ================ Cell Attention ================ Decoding ================ The decoder itself Tile inputs for beam search decoder The decoding operation ================ Decoding helpers ================ train_predictions = final_outputs_train.sample_id final_outputs_pred..predicted_ids[:,:,0] would work too, but it contains -1s for padding | 2,092 | en | 0.696033 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCheckmate(RPackage):
"""Tests and assertions to perform frequent argument checks.
A substantial part of the package was written in C to
minimize any worries about execution time overhead."""
homepage = "https://cloud.r-project.org/package=checkmate"
url = "https://cloud.r-project.org/src/contrib/checkmate_1.8.4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/checkmate"
version('1.9.4', sha256='faa25754b757fe483b876f5d07b73f76f69a1baa971420892fadec4af4bbad21')
version('1.8.4', sha256='6f948883e5a885a1c409d997f0c782e754a549227ec3c8eb18318deceb38f8f6')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r-backports@1.1.0:', type=('build', 'run'))
| var/spack/repos/builtin/packages/r-checkmate/package.py | 953 | Tests and assertions to perform frequent argument checks.
A substantial part of the package was written in C to
minimize any worries about execution time overhead.
Copyright 2013-2020 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) | 354 | en | 0.871747 |
import os
import torch
import os
import random
from torch.nn import(
Module,Linear,LayerNorm
)
import math
from .AutoEncoder import Encoder
class DeltaT(Module):
def __init__(self):
super().__init__()
self.reset_seed()
self.elem = math.prod(Encoder().output_size)
self.input_size = (1,self.elem)
self.output_size = (1,1)
## Model layers
self.dense1 = Linear(self.elem,512)
self.norm1= LayerNorm(512)
self.dense2 = Linear(512,256)
self.norm2 = LayerNorm(256)
self.dense3 = Linear(256,1)
def forward(self,x1,x2):
#x1,x2 = x1.unsqueeze(1),x2.unsqueeze(1)
#x = torch.cat([x1,x2],dim=1)
x = x1 - x2
x = torch.relu(self.norm1(self.dense1(x)))
x = x.view(x.size(0),-1)
x = torch.relu(self.norm2(self.dense2(x)))
x = torch.relu(self.dense3(x))
return x
def reset_seed(self,seed=0):
os.environ['PYTHONHASHSEED'] = '0'
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if __name__ == '__main__':
from torchsummaryX import summary
model = DeltaT()
dummy = torch.randn(model.input_size)
print(summary(model,dummy,dummy)) | _Sensation0/DeltaTime.py | 1,254 | Model layersx1,x2 = x1.unsqueeze(1),x2.unsqueeze(1)x = torch.cat([x1,x2],dim=1) | 79 | en | 0.330356 |
# Time: O(k * log(min(n, m, k))), where n is the size of num1, and m is the size of num2.
# Space: O(min(n, m, k))
# You are given two integer arrays nums1
# and nums2 sorted in ascending order and an integer k.
#
# Define a pair (u,v) which consists of one element
# from the first array and one element from the second array.
#
# Find the k pairs (u1,v1),(u2,v2) ...(uk,vk) with the smallest sums.
#
# Example 1:
# Given nums1 = [1,7,11], nums2 = [2,4,6], k = 3
#
# Return: [1,2],[1,4],[1,6]
#
# The first 3 pairs are returned from the sequence:
# [1,2],[1,4],[1,6],[7,2],[7,4],[11,2],[7,6],[11,4],[11,6]
# Example 2:
# Given nums1 = [1,1,2], nums2 = [1,2,3], k = 2
#
# Return: [1,1],[1,1]
#
# The first 2 pairs are returned from the sequence:
# [1,1],[1,1],[1,2],[2,1],[1,2],[2,2],[1,3],[1,3],[2,3]
# Example 3:
# Given nums1 = [1,2], nums2 = [3], k = 3
#
# Return: [1,3],[2,3]
#
# All possible pairs are returned from the sequence:
# [1,3],[2,3]
from heapq import heappush, heappop
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
pairs = []
if len(nums1) > len(nums2):
tmp = self.kSmallestPairs(nums2, nums1, k)
for pair in tmp:
pairs.append([pair[1], pair[0]])
return pairs
min_heap = []
def push(i, j):
if i < len(nums1) and j < len(nums2):
heappush(min_heap, [nums1[i] + nums2[j], i, j])
push(0, 0)
while min_heap and len(pairs) < k:
_, i, j = heappop(min_heap)
pairs.append([nums1[i], nums2[j]])
push(i, j + 1)
if j == 0:
push(i + 1, 0) # at most queue min(n, m) space
return pairs
# time: O(mn * log k)
# space: O(k)
from heapq import nsmallest
from itertools import product
class Solution2(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
return nsmallest(k, product(nums1, nums2), key=sum)
| Python/find-k-pairs-with-smallest-sums.py | 2,231 | :type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
Time: O(k * log(min(n, m, k))), where n is the size of num1, and m is the size of num2. Space: O(min(n, m, k)) You are given two integer arrays nums1 and nums2 sorted in ascending order and an integer k. Define a pair (u,v) which consists of one element from the first array and one element from the second array. Find the k pairs (u1,v1),(u2,v2) ...(uk,vk) with the smallest sums. Example 1: Given nums1 = [1,7,11], nums2 = [2,4,6], k = 3 Return: [1,2],[1,4],[1,6] The first 3 pairs are returned from the sequence: [1,2],[1,4],[1,6],[7,2],[7,4],[11,2],[7,6],[11,4],[11,6] Example 2: Given nums1 = [1,1,2], nums2 = [1,2,3], k = 2 Return: [1,1],[1,1] The first 2 pairs are returned from the sequence: [1,1],[1,1],[1,2],[2,1],[1,2],[2,2],[1,3],[1,3],[2,3] Example 3: Given nums1 = [1,2], nums2 = [3], k = 3 Return: [1,3],[2,3] All possible pairs are returned from the sequence: [1,3],[2,3] at most queue min(n, m) space time: O(mn * log k) space: O(k) | 1,121 | en | 0.579595 |
"""
Gets concordance for keywords and groups by word.
"""
from defoe import query_utils
from defoe.alto.query_utils import get_page_matches
def do_query(archives, config_file=None, logger=None, context=None):
"""
Gets concordance for keywords and groups by word.
config_file must be the path to a configuration file with a list
of the keywords to search for, one per line.
Both keywords and words in documents are normalized, by removing
all non-'a-z|A-Z' characters.
Returns result of form:
{
<WORD>:
[
{
"title": <TITLE>,
"place": <PLACE>,
"publisher": <PUBLISHER>,
"page_number": <PAGE_NUMBER>,
"content": <PAGE_CONTENT>,
"year": <YEAR>,
"document_id": <DOCUMENT_ID>,
"filename": <FILENAME>
},
...
],
<WORD>:
...
}
:param archives: RDD of defoe.alto.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: information on documents in which keywords occur grouped
by word
:rtype: dict
"""
keywords = query_utils.get_normalized_keywords(config_file)
# [document, ...]
documents = archives.flatMap(
lambda archive: [document for document in list(archive)]
)
# [(year, document, page, word), ...]
filtered_words = documents.flatMap(
lambda document: get_page_matches(document, keywords)
)
# [(year, document, page, word), ...]
# =>
# [(word, {"title": title, ...}), ...]
matching_docs = filtered_words.map(
lambda year_document_page_word: (
year_document_page_word[3],
{
"title": year_document_page_word[1].title,
"place": year_document_page_word[1].place,
"publisher": year_document_page_word[1].publisher,
"page_number": year_document_page_word[2].code,
"content": year_document_page_word[2].content,
"year": year_document_page_word[0],
"document_id": year_document_page_word[1].code,
"filename": year_document_page_word[1].archive.filename,
},
)
)
# [(word, {"title": title, ...}), ...]
# =>
# [(word, [{"title": title, ...], {...}), ...)]
result = (
matching_docs.groupByKey()
.map(lambda year_context: (year_context[0], list(year_context[1])))
.collect()
)
return result
| defoe/alto/queries/keyword_concordance_by_word.py | 2,852 | Gets concordance for keywords and groups by word.
config_file must be the path to a configuration file with a list
of the keywords to search for, one per line.
Both keywords and words in documents are normalized, by removing
all non-'a-z|A-Z' characters.
Returns result of form:
{
<WORD>:
[
{
"title": <TITLE>,
"place": <PLACE>,
"publisher": <PUBLISHER>,
"page_number": <PAGE_NUMBER>,
"content": <PAGE_CONTENT>,
"year": <YEAR>,
"document_id": <DOCUMENT_ID>,
"filename": <FILENAME>
},
...
],
<WORD>:
...
}
:param archives: RDD of defoe.alto.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: information on documents in which keywords occur grouped
by word
:rtype: dict
Gets concordance for keywords and groups by word.
[document, ...] [(year, document, page, word), ...] [(year, document, page, word), ...] => [(word, {"title": title, ...}), ...] [(word, {"title": title, ...}), ...] => [(word, [{"title": title, ...], {...}), ...)] | 1,375 | en | 0.727846 |
"""
Demonstrate differences between __str__() and __reper__().
"""
class neither:
pass
class stronly:
def __str__(self):
return "STR"
class repronly:
def __repr__(self):
return "REPR"
class both(stronly, repronly):
pass
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return self.name
def __repr__(self):
return "Person({0.name!r}, {0.age!r})".format(self) | Python3/Python3_Lesson09/src/reprmagic.py | 493 | Demonstrate differences between __str__() and __reper__(). | 58 | en | 0.732133 |
#!/usr/bin/env python3
"""
"""
import socket
device_ca_server_prefix = f'{socket.gethostname()}_dio_controller:'
from caproto.threading.client import Context
ctx = Context()
ca_name = device_ca_server_prefix
pv_names = ['dio',
'bit0_indicator',
'bit0',
'bit0_enable',
'bit1_indicator',
'bit1',
'bit1_enable',
'bit2_indicator',
'bit2',
'bit2_enable',
'bit3_indicator',
'bit3',
'bit3_enable']
pvs = {}
for item in pv_names:
pvs[item], = ctx.get_pvs(f'{ca_name}{item}',)
if __name__ == '__main__':
pass
| icarus_nmr/scripts/digital_controller_terminal_client.py | 734 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
from flask import Flask, render_template, request, redirect
from flask import render_template
app = Flask(__name__)
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
from flask import Flask,request,render_template,redirect
# 绑定访问地址127.0.0.1:5000/user
@app.route("/user", methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if username == "user" and password == "password":
return redirect("http://www.baidu.com")
else:
message = "Failed Login"
return render_template('login.html', message=message)
return render_template('login.html')
if __name__ == '__main__':
app.run()
| demo_flask.py | 836 | 绑定访问地址127.0.0.1:5000/user | 25 | en | 0.165828 |
#
# io_fits.py -- Module wrapper for loading FITS files.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
There are two possible choices for a python FITS file reading package
compatible with Ginga: astropy/pyfits and fitsio. Both are based on
the CFITSIO library, although it seems that astropy's version has
changed quite a bit from the original, while fitsio is still tracking
the current version.
To force the use of one, do:
from ginga.util import io_fits
io_fits.use('package')
(replace 'package' with one of {'astropy', 'fitsio'}) before you load
any images. Otherwise Ginga will try to pick one for you.
"""
import numpy
fits_configured = False
fitsLoaderClass = None
have_pyfits = False
have_fitsio = False
class FITSError(Exception):
pass
def use(fitspkg, raise_err=True):
global fits_configured, fitsLoaderClass, \
have_pyfits, pyfits, \
have_fitsio, fitsio
if fitspkg == 'astropy':
try:
from astropy.io import fits as pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError:
try:
# maybe they have a standalone version of pyfits?
import pyfits
have_pyfits = True
fitsLoaderClass = PyFitsFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
elif fitspkg == 'fitsio':
try:
import fitsio
have_fitsio = True
fitsLoaderClass = FitsioFileHandler
return True
except ImportError as e:
if raise_err:
raise
return False
return False
class BaseFitsFileHandler(object):
pass
class PyFitsFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(PyFitsFileHandler, self).__init__()
if not have_pyfits:
raise FITSError("Need astropy or pyfits module installed to use this file handler")
self.logger = logger
self.kind = 'pyfits'
def fromHDU(self, hdu, ahdr):
header = hdu.header
if hasattr(header, 'cards'):
#newer astropy.io.fits don't have ascardlist
for card in header.cards:
bnch = ahdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
else:
for card in header.ascardlist():
bnch = ahdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.data
if len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
# Drill down to 2D data slice
if not naxispath:
naxispath = ([0] * (len(data.shape)-2))
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None):
filepath = get_path(filespec)
self.logger.info("Loading file '%s' ..." % (filepath))
fits_f = pyfits.open(filepath, 'readonly')
# this seems to be necessary now for some fits files...
try:
fits_f.verify('fix')
except Exception, e:
raise FITSError("Error loading fits file '%s': %s" % (
fitspath, str(e)))
if numhdu == None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
if hdu.data == None:
# compressed FITS file or non-pixel data hdu?
continue
if not isinstance(hdu.data, numpy.ndarray):
# We need to open a numpy array
continue
#print "data type is %s" % hdu.data.dtype.kind
# Looks good, let's try it
found_valid_hdu = True
break
if not found_valid_hdu:
raise FITSError("No data HDU found that Ginga can open in '%s'" % (
filepath))
else:
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
fits_f.close()
return (data, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header, **kwdargs):
fits_f = self.create_fits(data, header)
fits_f.writeto(path, **kwdargs)
fits_f.close()
def save_as_file(self, path, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
class FitsioFileHandler(BaseFitsFileHandler):
def __init__(self, logger):
super(FitsioFileHandler, self).__init__()
if not have_fitsio:
raise FITSError("Need fitsio module installed to use this file handler")
self.logger = logger
self.kind = 'fitsio'
def fromHDU(self, hdu, ahdr):
header = hdu.read_header()
for d in header.records():
bnch = ahdr.__setitem__(d['name'], d['value'])
bnch.comment = d['comment']
def load_hdu(self, hdu, ahdr, fobj=None, naxispath=None):
data = hdu.read()
if len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
else:
# Drill down to 2D data slice
if not naxispath:
naxispath = ([0] * (len(data.shape)-2))
for idx in naxispath:
data = data[idx]
self.fromHDU(hdu, ahdr)
return (data, naxispath)
def load_file(self, filespec, ahdr, numhdu=None, naxispath=None):
filepath = get_path(filespec)
self.logger.info("Loading file '%s' ..." % (filepath))
fits_f = fitsio.FITS(filepath)
if numhdu == None:
found_valid_hdu = False
for i in range(len(fits_f)):
hdu = fits_f[i]
info = hdu.get_info()
if not ('ndims' in info) or (info['ndims'] == 0):
# compressed FITS file or non-pixel data hdu?
continue
#print "data type is %s" % hdu.data.dtype.kind
# Looks good, let's try it
found_valid_hdu = True
break
if not found_valid_hdu:
raise FITSError("No data HDU found that Ginga can open in '%s'" % (
filepath))
else:
hdu = fits_f[numhdu]
data, naxispath = self.load_hdu(hdu, ahdr, fobj=fits_f,
naxispath=naxispath)
fits_f.close()
return (data, naxispath)
def create_fits(self, data, header):
fits_f = pyfits.HDUList()
hdu = pyfits.PrimaryHDU()
hdu.data = data
for kwd in header.keys():
card = header.get_card(kwd)
hdu.header.update(card.key, card.value, comment=card.comment)
fits_f.append(hdu)
return fits_f
def write_fits(self, path, data, header):
fits_f = fitsio.FITS(path, 'rw')
fits_f = self.create_fits(data, header)
fits_f.writeto(path, output_verify='fix')
fits_f.close()
def save_as_file(self, path, data, header, **kwdargs):
self.write_fits(filepath, data, header, **kwdargs)
def get_path(fileSpec):
path = fileSpec
if fileSpec.startswith('file://'):
path = fileSpec[7:]
# TODO: handle web references by fetching the file
return path
# default
fitsLoaderClass = PyFitsFileHandler
# try to use them in this order
# astropy is faster
for name in ('astropy', 'fitsio'):
if use(name, raise_err=True):
break
def get_fitsloader(kind=None, logger=None):
return fitsLoaderClass(logger)
#END
| ginga/util/io_fits.py | 8,569 | io_fits.py -- Module wrapper for loading FITS files. Eric Jeschke (eric@naoj.org) Copyright (c) Eric R. Jeschke. All rights reserved. This is open-source software licensed under a BSD license. Please see the file LICENSE.txt for details. maybe they have a standalone version of pyfits?newer astropy.io.fits don't have ascardlist Expand 1D arrays into 1xN array Drill down to 2D data slice this seems to be necessary now for some fits files... compressed FITS file or non-pixel data hdu? We need to open a numpy arrayprint "data type is %s" % hdu.data.dtype.kind Looks good, let's try it Expand 1D arrays into 1xN array Drill down to 2D data slice compressed FITS file or non-pixel data hdu?print "data type is %s" % hdu.data.dtype.kind Looks good, let's try it TODO: handle web references by fetching the file default try to use them in this order astropy is fasterEND | 869 | en | 0.82313 |
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: onyx_qos
author: "Anas Badaha (@anasb)"
short_description: Configures QoS
description:
- This module provides declarative management of Onyx QoS configuration
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.8130
options:
interfaces:
description:
- list of interfaces name.
required: true
trust:
description:
- trust type.
choices: ['L2', 'L3', 'both']
default: L2
rewrite_pcp:
description:
- rewrite with type pcp.
choices: ['enabled', 'disabled']
default: disabled
rewrite_dscp:
description:
- rewrite with type dscp.
choices: ['enabled', 'disabled']
default: disabled
'''
EXAMPLES = """
- name: Configure QoS
onyx_QoS:
interfaces:
- Mpo7
- Mpo7
trust: L3
rewrite_pcp: disabled
rewrite_dscp: enabled
- name: Configure QoS
onyx_QoS:
interfaces:
- Eth1/1
- Eth1/2
trust: both
rewrite_pcp: disabled
rewrite_dscp: enabled
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface ethernet 1/16 qos trust L3
- interface mlag-port-channel 7 qos trust L3
- interface port-channel 1 qos trust L3
- interface mlag-port-channel 7 qos trust L2
- interface mlag-port-channel 7 qos rewrite dscp
- interface ethernet 1/16 qos rewrite pcp
- interface ethernet 1/1 no qos rewrite pcp
"""
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import show_cmd
from ansible_collections.mellanox.onyx.plugins.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxQosModule(BaseOnyxModule):
TRUST_CMD = "interface {0} {1} qos trust {2}"
NO_REWRITE_PCP_CMD = "interface {0} {1} no qos rewrite pcp"
NO_REWRITE_DSCP_CMD = "interface {0} {1} no qos rewrite dscp"
REWRITE_PCP_CMD = "interface {0} {1} qos rewrite pcp"
REWRITE_DSCP_CMD = "interface {0} {1} qos rewrite dscp"
REWRITE_PCP = "pcp"
REWRITE_DSCP = "dscp"
IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$")
IF_PO_REGEX = re.compile(r"^Po(\d+)$")
MLAG_NAME_REGEX = re.compile(r"^Mpo(\d+)$")
IF_TYPE_ETH = "ethernet"
PORT_CHANNEL = "port-channel"
MLAG_PORT_CHANNEL = "mlag-port-channel"
IF_TYPE_MAP = {
IF_TYPE_ETH: IF_ETH_REGEX,
PORT_CHANNEL: IF_PO_REGEX,
MLAG_PORT_CHANNEL: MLAG_NAME_REGEX
}
def init_module(self):
""" initialize module
"""
element_spec = dict(
interfaces=dict(type='list', required=True),
trust=dict(choices=['L2', 'L3', 'both'], default='L2'),
rewrite_pcp=dict(choices=['enabled', 'disabled'], default='disabled'),
rewrite_dscp=dict(choices=['enabled', 'disabled'], default='disabled')
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self.validate_param_values(self._required_config)
def _get_interface_type(self, if_name):
if_type = None
if_id = None
for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP):
match = interface_regex.match(if_name)
if match:
if_type = interface_type
if_id = match.group(1)
break
return if_type, if_id
def _set_interface_qos_config(self, interface_qos_config, interface, if_type, if_id):
interface_qos_config = interface_qos_config[0].get(interface)
trust = interface_qos_config[0].get("Trust mode")
rewrite_dscp = interface_qos_config[0].get("DSCP rewrite")
rewrite_pcp = interface_qos_config[0].get("PCP,DEI rewrite")
self._current_config[interface] = dict(trust=trust, rewrite_dscp=rewrite_dscp,
rewrite_pcp=rewrite_pcp, if_type=if_type, if_id=if_id)
def _show_interface_qos(self, if_type, interface):
cmd = "show qos interface {0} {1}".format(if_type, interface)
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
for interface in self._required_config.get("interfaces"):
if_type, if_id = self._get_interface_type(interface)
if not if_id:
self._module.fail_json(
msg='unsupported interface: {0}'.format(interface))
interface_qos_config = self._show_interface_qos(if_type, if_id)
if interface_qos_config is not None:
self._set_interface_qos_config(interface_qos_config, interface, if_type, if_id)
else:
self._module.fail_json(
msg='Interface {0} does not exist on switch'.format(interface))
def generate_commands(self):
trust = self._required_config.get("trust")
rewrite_pcp = self._required_config.get("rewrite_pcp")
rewrite_dscp = self._required_config.get("rewrite_dscp")
for interface in self._required_config.get("interfaces"):
ignored1, ignored2, current_trust, if_type, if_id = self._get_current_rewrite_config(interface)
self._add_interface_trust_cmds(if_type, if_id, interface, trust, current_trust)
self._add_interface_rewrite_cmds(if_type, if_id, interface,
rewrite_pcp, rewrite_dscp)
def _get_current_rewrite_config(self, interface):
current_interface_qos_config = self._current_config.get(interface)
current_rewrite_pcp = current_interface_qos_config.get('rewrite_pcp')
current_rewrite_dscp = current_interface_qos_config.get('rewrite_dscp')
if_type = current_interface_qos_config.get("if_type")
if_id = current_interface_qos_config.get("if_id")
current_trust = current_interface_qos_config.get('trust')
return current_rewrite_pcp, current_rewrite_dscp, current_trust, if_type, if_id
def _add_interface_trust_cmds(self, if_type, if_id, interface, trust, current_trust):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if trust == "L3" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "L2" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "both" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
def _add_interface_rewrite_cmds(self, if_type, if_id, interface, rewrite_pcp, rewrite_dscp):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if rewrite_pcp == "enabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.REWRITE_PCP_CMD.format(if_type, if_id))
elif rewrite_pcp == "disabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
if rewrite_dscp == "enabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.REWRITE_DSCP_CMD.format(if_type, if_id))
elif rewrite_dscp == "disabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
def _add_no_rewrite_cmd(self, if_type, if_id, interface, rewrite_type, current_rewrite):
if rewrite_type == self.REWRITE_PCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_pcp"] = "disabled"
elif rewrite_type == self.REWRITE_DSCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_dscp"] = "disabled"
def main():
""" main entry point for module execution
"""
OnyxQosModule.main()
if __name__ == '__main__':
main()
| venv/lib/python3.7/site-packages/ansible_collections/mellanox/onyx/plugins/modules/onyx_qos.py | 9,244 | initialize module
main entry point for module execution
!/usr/bin/python Copyright: Ansible Project GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) | 205 | en | 0.469944 |
from abstractclasses import solver, solver_model
"""
The Nash equilibrium solver takes a payoff matrix from game theory,
then it solves for a nash equilibrium, if one exists.
"""
# ————————————————————————————————————————————————
# NASH EQUILIBRIUM SOLVER CLASS
# ————————————————————————————————————————————————
class nash_equilibrium_solver(solver):
def format_payoff_matrix(
self,
payoff_matrix: list,
player_1_strategies: list,
player_2_strategies: list,
) -> str:
"""
This is a helper function that turns a payoff matrix and available
strategies into ASCII art of a payoff matrix
"""
ret = "\t Player 1\n"
ret += "\t " + player_1_strategies[0] + " "
for j in range(1, len(payoff_matrix[0])):
ret += player_1_strategies[j] + " "
ret += "\n"
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
ret += "Player 2 " + str(player_2_strategies[0]) + " |"
for j in range(len(payoff_matrix[0])):
ret += (
"{:>5g}, {:<5g}".format(
payoff_matrix[0][j][0], payoff_matrix[0][j][1]
)
+ "|"
)
ret += "\n"
for i in range(1, len(payoff_matrix)):
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
ret += (
"\t "
+ player_2_strategies[i]
+ " |"
+ "{:>5g}, {:<5g}".format(
payoff_matrix[i][0][0], payoff_matrix[i][0][1]
)
+ "|"
)
for j in range(1, len(payoff_matrix[i])):
ret += (
"{:>5g}, {:<5g}".format(
payoff_matrix[i][j][0], payoff_matrix[i][j][1]
)
+ "|"
)
ret += "\n"
ret += "\t +------------+"
for j in range(1, len(payoff_matrix[0])):
ret += "------------+"
ret += "\n"
return ret
def prompt_inputs(self) -> None:
player_1_strategies = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
]
player_2_strategies = [
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
]
num_strategies_1 = self.prompt_integer(
"Please enter the number of strategies for player 1 (2-13) > ",
2,
13,
)
num_strategies_2 = self.prompt_integer(
"Please enter the number of strategies for player 2 (2-13) > ",
2,
13,
)
player_1_strategies = player_1_strategies[:num_strategies_1]
player_2_strategies = player_2_strategies[:num_strategies_2]
payoff_matrix = [
[(0, 0) for i in range(num_strategies_1)]
for j in range(num_strategies_2)
]
print(
self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
)
for i in range(num_strategies_2):
for j in range(num_strategies_1):
player_1_payoff = self.prompt_float(
"Please enter the payoff value for Player "
+ str(1)
+ " in cell "
+ str(player_1_strategies[j])
+ ", "
+ str(player_2_strategies[i])
+ " of the payoff matrix > "
)
player_2_payoff = self.prompt_float(
"Please enter the payoff value for Player "
+ str(2)
+ " in cell "
+ str(player_1_strategies[j])
+ ", "
+ str(player_2_strategies[i])
+ " of the payoff matrix > "
)
payoff_matrix[i][j] = (player_2_payoff, player_1_payoff)
print(
self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
)
# Set inputs
self.inputs["payoff_matrix"] = payoff_matrix
self.inputs["player_1_strategies"] = player_1_strategies
self.inputs["player_2_strategies"] = player_2_strategies
self.inputs["format_payoff_matrix"] = self.format_payoff_matrix
# ————————————————————————————————————————————————
# NASH EQUILIBRIUM MODEL CLASS
# ————————————————————————————————————————————————
class nash_equilibrium_model(solver_model):
def __init__(self, **inputs) -> None:
super().__init__(**inputs)
self.format_payoff_matrix = self.inputs["format_payoff_matrix"]
def solve(self) -> None:
payoff_matrix = self.inputs["payoff_matrix"]
player_1_strategies = self.inputs["player_1_strategies"]
player_2_strategies = self.inputs["player_2_strategies"]
self.ans, self.work = self.nash(
payoff_matrix, player_1_strategies, player_2_strategies
)
def nash(
self,
payoff_matrix: list,
player_1_strategies: list,
player_2_strategies: list,
) -> tuple:
"""
Takes a payoff matrix from game theory and the available strategies for
both players. Solves for the Nash equilibrium
"""
work = ""
no_dominant_exists = False
while not no_dominant_exists and not (
len(player_1_strategies) == 1 and len(player_2_strategies) == 1
):
is_break = False
for i in range(len(payoff_matrix)):
for j in range(len(payoff_matrix)):
if (
i != j
and i < len(payoff_matrix)
and j < len(payoff_matrix)
):
is_greater = False
for k in range(len(payoff_matrix[0])):
if float(payoff_matrix[i][k][0]) >= float(
payoff_matrix[j][k][0]
):
is_greater = True
if is_greater:
break
if not is_greater:
work += (
"Player 2's Strategy "
+ str(player_2_strategies[j])
+ " dominates strategy "
+ str(player_2_strategies[i])
+ "\n"
)
payoff_matrix.pop(i)
player_2_strategies.pop(i)
is_break = True
work += self.format_payoff_matrix(
payoff_matrix,
player_1_strategies,
player_2_strategies,
)
work += "\n"
break
if is_break:
break
if not is_break:
no_dominant_exists = True
else:
no_dominant_exists = False
is_break = False
for i in range(len(payoff_matrix[0])):
for j in range(len(payoff_matrix[0])):
if (
i != j
and i < len(payoff_matrix[0])
and j < len(payoff_matrix[0])
):
is_greater = False
for k in range(len(payoff_matrix)):
if float(payoff_matrix[k][i][1]) >= float(
payoff_matrix[k][j][1]
):
is_greater = True
if is_greater:
break
if not is_greater:
work += (
"Player 1's Strategy "
+ str(player_1_strategies[j])
+ " dominates strategy "
+ str(player_1_strategies[i])
+ "\n"
)
for index in range(len(payoff_matrix)):
payoff_matrix[index].pop(i)
player_1_strategies.pop(i)
work += self.format_payoff_matrix(
payoff_matrix,
player_1_strategies,
player_2_strategies,
)
work += "\n"
is_break = True
break
if not is_break:
no_dominant_exists = True
else:
no_dominant_exists = False
if is_break:
no_dominant_exists = False
if not (
len(player_1_strategies) == 1 and len(player_2_strategies) == 1
):
ans = (
"There is no Nash equilibrium, since at least one player has"
+ " multiple viable strategies.\n"
)
work += ans
work += self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
else:
ans = (
"This is the Nash equilibrium of the entered payoff matrix,"
+ " calculated by eliminating dominanted strategies.\n"
)
ans += self.format_payoff_matrix(
payoff_matrix, player_1_strategies, player_2_strategies
)
work += ans
return ans, work
| modules/nashequilibrium.py | 10,956 | This is a helper function that turns a payoff matrix and available
strategies into ASCII art of a payoff matrix
Takes a payoff matrix from game theory and the available strategies for
both players. Solves for the Nash equilibrium
———————————————————————————————————————————————— NASH EQUILIBRIUM SOLVER CLASS ———————————————————————————————————————————————— Set inputs ———————————————————————————————————————————————— NASH EQUILIBRIUM MODEL CLASS ———————————————————————————————————————————————— | 497 | en | 0.376387 |
"""Describe overall framework configuration."""
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import (
DEFAULT_IMAGE,
DEFAULT_PULL_POLICY,
DEFAULT_IC_TYPE,
DEFAULT_SERVICE,
DEFAULT_DEPLOYMENT_TYPE,
NUM_REPLICAS,
BATCH_START,
BATCH_RESOURCES,
)
from suite.resources_utils import get_first_pod_name
def pytest_addoption(parser) -> None:
"""Get cli-arguments.
:param parser: pytest parser
:return:
"""
parser.addoption(
"--context",
action="store",
default="",
help="The context to use in the kubeconfig file.",
)
parser.addoption(
"--image",
action="store",
default=DEFAULT_IMAGE,
help="The Ingress Controller image.",
)
parser.addoption(
"--image-pull-policy",
action="store",
default=DEFAULT_PULL_POLICY,
help="The pull policy of the Ingress Controller image.",
)
parser.addoption(
"--deployment-type",
action="store",
default=DEFAULT_DEPLOYMENT_TYPE,
help="The type of the IC deployment: deployment or daemon-set.",
)
parser.addoption(
"--ic-type",
action="store",
default=DEFAULT_IC_TYPE,
help="The type of the Ingress Controller: nginx-ingress or nginx-ingress-plus.",
)
parser.addoption(
"--service",
action="store",
default=DEFAULT_SERVICE,
help="The type of the Ingress Controller service: nodeport or loadbalancer.",
)
parser.addoption(
"--replicas",
action="store",
default=NUM_REPLICAS,
help="Number of replica pods for type deployment",
)
parser.addoption(
"--node-ip",
action="store",
help="The public IP of a cluster node. Not required if you use the loadbalancer service (see --service argument).",
)
parser.addoption(
"--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="An absolute path to a kubeconfig file.",
)
parser.addoption(
"--show-ic-logs",
action="store",
default="no",
help="Show IC logs in stdout on test failure",
)
parser.addoption(
"--batch-start",
action="store",
default=BATCH_START,
help="Run tests for pods restarts with multiple resources deployed (Ingress/VS): True/False",
)
parser.addoption(
"--batch-resources",
action="store",
default=BATCH_RESOURCES,
help="Number of VS/Ingress resources to deploy",
)
# import fixtures into pytest global namespace
pytest_plugins = ["suite.fixtures"]
def pytest_collection_modifyitems(config, items) -> None:
"""
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
"""
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
if config.getoption("--ic-type") == "nginx-plus-ingress":
skip_for_nginx_plus = pytest.mark.skip(reason="Skip a test for Nginx Plus")
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
item.add_marker(appprotect)
if str(config.getoption("--batch-start")) != "True":
batch_start = pytest.mark.skip(reason="Skipping pod restart test with multiple resources")
for item in items:
if "batch_start" in item.keywords:
item.add_marker(batch_start)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item) -> None:
"""
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if (
rep.when == "call"
and rep.failed
and item.config.getoption("--show-ic-logs") == "yes"
):
pod_namespace = item.funcargs["ingress_controller_prerequisites"].namespace
pod_name = get_first_pod_name(item.funcargs["kube_apis"].v1, pod_namespace)
print("\n===================== IC Logs Start =====================")
print(
item.funcargs["kube_apis"].v1.read_namespaced_pod_log(
pod_name, pod_namespace
)
)
print("\n===================== IC Logs End =====================")
| tests/conftest.py | 5,263 | Get cli-arguments.
:param parser: pytest parser
:return:
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
Describe overall framework configuration.
import fixtures into pytest global namespace execute all other hooks to obtain the report object we only look at actual failing test calls, not setup/teardown | 685 | en | 0.649976 |
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from dailymed.models import Set, Spl, InactiveIngredient
from dailymed.serializers import SplSerializer
import json
from pathlib import Path
SPL_URL = reverse('spl-list')
PRODUCT_URL = reverse('product-list')
PACKAGE_URL = reverse('package-list')
class PublicApiTest(TestCase):
"""Test public daily med API"""
def setUp(self):
self.client = APIClient()
"""Creates sample data for database"""
cwd = Path(__file__).parent.absolute()
with open(f'{cwd}/test.json', 'r') as f:
default = json.load(f)
for data in default['results']:
set_id = data.pop('set_id')
products_data = data.pop('products')
set_obj = Set.objects.create(id=set_id)
spl_obj = set_obj.spls.create(**data)
for product_data in products_data:
product_data.pop('name')
packages_data = product_data.pop('packages')
if 'inactive_ingredients' in product_data:
inactive_ingredients_data = product_data\
.pop('inactive_ingredients')
inactive_ingredients_list = []
for inactive_ingredient_data in inactive_ingredients_data:
try:
ingredient = InactiveIngredient.objects.get(
**inactive_ingredient_data
)
inactive_ingredients_list.append(ingredient)
except Exception:
ingredient = InactiveIngredient.objects.create(
**inactive_ingredient_data
)
inactive_ingredients_list.append(ingredient)
product_obj = spl_obj.products.create(**product_data)
product_obj.inactive_ingredients\
.add(*inactive_ingredients_list)
for package_data in packages_data:
product_obj.packages.create(**package_data)
def test_retrieve_spls(self):
"""Test retrieving spls"""
res = self.client.get(
SPL_URL,
format='json'
)
serializer = SplSerializer(Spl.objects.filter(), many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_set(self):
"""Test retrieving a spl by set filter"""
set_id = Set.objects.first()
res = self.client.get(
SPL_URL,
{'set_id': set_id.id},
format='json')
serializer = SplSerializer(
Spl.objects.filter(set__id=set_id.id), many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_inactive_ing(self):
"""Test retrieving a spl by inactive ingredient filter"""
inactive_ing = 'alcohol'
res = self.client.get(
SPL_URL,
{'inactive_ingredient_name': inactive_ing},
format='json')
serializer = SplSerializer(
Spl.objects.filter(
products__inactive_ingredients__name__icontains=inactive_ing)
.distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_schedule(self):
"""Test retrieving spls by schedule filter"""
schedule = 'CIV'
res = self.client.get(
SPL_URL,
{'schedule': schedule},
format='json')
serializer = SplSerializer(Spl.objects.filter(
products__schedule=schedule).distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_drug_name(self):
"""Test retrieving spls by drug name filter"""
name = 'Ciprofloxacin'
res = self.client.get(
SPL_URL,
{'product_name': name},
format='json')
serializer = SplSerializer(Spl.objects.filter(
products__name=name).distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_complex(self):
"""Test retrieving spls filtered by set & inactive ingredient"""
set_id = 'b88efb93-f1d1-4606-a669-6896f432a27f'
inactive_ing = 'alcohol'
res = self.client.get(
SPL_URL,
{'set_id': set_id,
'inactive_ingredient_name': inactive_ing},
format='json'
)
serializer = SplSerializer(
Spl.objects.filter(
products__inactive_ingredients__name__icontains=inactive_ing,
set__id=set_id)
.distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data['results']), 1)
self.assertEqual(serializer.data, res.data['results'])
| api/dailymed/tests/test_api.py | 5,524 | Test public daily med API
Test retrieving spls
Test retrieving spls filtered by set & inactive ingredient
Test retrieving spls by drug name filter
Test retrieving a spl by inactive ingredient filter
Test retrieving spls by schedule filter
Test retrieving a spl by set filter | 274 | en | 0.761861 |
import os
os.chdir("./export")
from reader.csv_mod import CsvReader
from reader.sarif_mod import SarifReader
from reader.server_mod import RestfulReader
from export.export import Exporter
def generate(args):
project_name = args.name
sarif_list = args.sarif
if sarif_list == None:
sarif_list = []
json_list = args.json
if json_list == None:
json_list = []
csv_list = args.csv
if csv_list == None:
csv_list = []
proj_data = []
sarif_reader = SarifReader()
for f in sarif_list:
sarif_reader.read(f)
sarif_data = sarif_reader.get_data()
proj_data.extend(sarif_data['data'])
csv_reader = CsvReader()
for f in csv_list:
csv_reader.read(f)
csv_data = csv_reader.get_data()
proj_data.extend(csv_data['data'])
restful_reader = RestfulReader()
for rid in json_list:
restful_reader.read(rid)
restful_data = restful_reader.get_data()
proj_data.extend(restful_data['data'])
reporter = Exporter()
reporter.setData(proj_data)
return reporter.build(project_name)
#r = SarifReader()
#r.read('/home/heersin/blackhole/codeql/result.sarif')
#print(os.getcwd())
#project_name = "socat"
#pdf_factory = Exporter()
#pdf_factory.setData(r.get_data())
#pdf_factory.build(project_name) | codql-report/generator.py | 1,320 | r = SarifReader()r.read('/home/heersin/blackhole/codeql/result.sarif')print(os.getcwd())project_name = "socat"pdf_factory = Exporter()pdf_factory.setData(r.get_data())pdf_factory.build(project_name) | 198 | en | 0.4546 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the extention functions
import numpy as np
from ...fluid.data_feeder import check_dtype
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.layers.tensor import assign
from ...fluid import core, dygraph_utils
from ...fluid.layers.layer_function_generator import templatedoc
from ...fluid.layers.sequence_lod import sequence_mask
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
"""
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
The argument ``offset`` determines which diagonal is generated:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
input(Tensor|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
Returns:
Tensor, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle.nn.functional as F
import numpy as np
diag_embed = np.random.randn(2, 3).astype('float32')
# [[ 0.7545889 , -0.25074545, 0.5929117 ],
# [-0.6097662 , -0.01753256, 0.619769 ]]
data1 = F.diag_embed(diag_embed)
data1.numpy()
# [[[ 0.7545889 , 0. , 0. ],
# [ 0. , -0.25074545, 0. ],
# [ 0. , 0. , 0.5929117 ]],
# [[-0.6097662 , 0. , 0. ],
# [ 0. , -0.01753256, 0. ],
# [ 0. , 0. , 0.619769 ]]]
data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
data2.numpy()
# [[[ 0. , 0. , 0. , 0. ],
# [ 0.7545889 , 0. , 0. , 0. ],
# [ 0. , -0.25074545, 0. , 0. ],
# [ 0. , 0. , 0.5929117 , 0. ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [-0.6097662 , 0. , 0. , 0. ],
# [ 0. , -0.01753256, 0. , 0. ],
# [ 0. , 0. , 0.619769 , 0. ]]]
data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
data3.numpy()
# [[[ 0. , 0.7545889 , 0. , 0. ],
# [ 0. , -0.6097662 , 0. , 0. ]],
#
# [[ 0. , 0. , -0.25074545, 0. ],
# [ 0. , 0. , -0.01753256, 0. ]],
#
# [[ 0. , 0. , 0. , 0.5929117 ],
# [ 0. , 0. , 0. , 0.619769 ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [ 0. , 0. , 0. , 0. ]]]
"""
inputs = {'Input': [input]}
attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2}
if not isinstance(input, Variable):
input = assign(input)
def __check_input(input, offset, dim1, dim2):
check_dtype(input.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'diag_embed')
input_shape = list(input.shape)
assert len(input_shape) >= 1, \
"Input must be at least 1-dimensional, " \
"But received Input's dimensional: %s.\n" % \
len(input_shape)
assert np.abs(dim1) <= len(input_shape), \
"Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim1)
assert np.abs(dim2) <= len(input_shape), \
"Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim2)
dim1_ = dim1 if dim1 >= 0 else len(input_shape) + dim1 + 1
dim2_ = dim2 if dim2 >= 0 else len(input_shape) + dim2 + 1
assert dim1_ != dim2_, \
"dim1 and dim2 cannot be the same dimension." \
"But received dim1 = %d, dim2 = %d\n"%(dim1, dim2)
if not in_dygraph_mode():
__check_input(input, offset, dim1, dim2)
helper = LayerHelper("diag_embed", **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='diag_embed',
inputs={'Input': [input]},
attrs={'offset': offset,
'dim1': dim1,
'dim2': dim2},
outputs={'Out': [out]})
out.stop_gradient = True
return out
| python/paddle/nn/functional/extension.py | 5,990 | This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
The argument ``offset`` determines which diagonal is generated:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
input(Tensor|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
Returns:
Tensor, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle.nn.functional as F
import numpy as np
diag_embed = np.random.randn(2, 3).astype('float32')
# [[ 0.7545889 , -0.25074545, 0.5929117 ],
# [-0.6097662 , -0.01753256, 0.619769 ]]
data1 = F.diag_embed(diag_embed)
data1.numpy()
# [[[ 0.7545889 , 0. , 0. ],
# [ 0. , -0.25074545, 0. ],
# [ 0. , 0. , 0.5929117 ]],
# [[-0.6097662 , 0. , 0. ],
# [ 0. , -0.01753256, 0. ],
# [ 0. , 0. , 0.619769 ]]]
data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
data2.numpy()
# [[[ 0. , 0. , 0. , 0. ],
# [ 0.7545889 , 0. , 0. , 0. ],
# [ 0. , -0.25074545, 0. , 0. ],
# [ 0. , 0. , 0.5929117 , 0. ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [-0.6097662 , 0. , 0. , 0. ],
# [ 0. , -0.01753256, 0. , 0. ],
# [ 0. , 0. , 0.619769 , 0. ]]]
data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
data3.numpy()
# [[[ 0. , 0.7545889 , 0. , 0. ],
# [ 0. , -0.6097662 , 0. , 0. ]],
#
# [[ 0. , 0. , -0.25074545, 0. ],
# [ 0. , 0. , -0.01753256, 0. ]],
#
# [[ 0. , 0. , 0. , 0.5929117 ],
# [ 0. , 0. , 0. , 0.619769 ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [ 0. , 0. , 0. , 0. ]]]
Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TODO: define the extention functions | 3,496 | en | 0.694547 |
# -*- coding: utf-8 -*-
# @Time : 2021/6/10
# @Author : kaka
import argparse
import logging
import os
from config import Params
from datasets import load_dataset
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
import numpy as np
from SimCSE import SimCSE
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument("train_file", type=str, help="train text file")
# parser.add_argument("--pretrained", type=str, default="hfl/chinese-bert-wwm-ext", help="huggingface pretrained model")
# parser.add_argument("--model_out", type=str, default="./finder_model", help="model output path")
parser.add_argument("--num_proc", type=int, default=1, help="dataset process thread num")
parser.add_argument("--max_length", type=int, default=64, help="sentence max length")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--epochs", type=int, default=101, help="epochs")
parser.add_argument("--lr", type=float, default=1e-5, help="learning rate")
parser.add_argument("--tao", type=float, default=0.05, help="temperature")
parser.add_argument("--device", type=str, default="cuda", help="device")
parser.add_argument("--display_interval", type=int, default=500, help="display interval")
parser.add_argument("--save_interval", type=int, default=10, help="save interval")
parser.add_argument("--pool_type", type=str, default="pooler", help="pool_type")
parser.add_argument("--dropout_rate", type=float, default=0.3, help="dropout_rate")
args = parser.parse_args()
return args
def read_data(args):
with open(Params.dialogues_file, 'r') as f:
sentences = f.readlines()
dl = DataLoader(sentences,
batch_size=args.batch_size)
return dl
def duplicate_batch(batch, tokenzier, args):
'''
句子进行重复
'''
new_batch = []
for sentence in batch:
new_batch.append(sentence)
new_batch.append(sentence)
batch_encoding = tokenzier(new_batch,
padding=True,
truncation=True,
max_length=args.max_length,
return_tensors='pt')
return batch_encoding
def compute_loss(y_pred, tao=0.05, device="cuda"):
idxs = torch.arange(0, y_pred.shape[0], device=device)
y_true = idxs + 1 - idxs % 2 * 2
similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12
similarities = similarities / tao
loss = F.cross_entropy(similarities, y_true)
return torch.mean(loss)
def train(args):
tokenizer = AutoTokenizer.from_pretrained(Params.pretrained_model_path)
dl = read_data(args)
model = SimCSE(Params.pretrained_model_path, args.pool_type, args.dropout_rate).to(args.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
model.train()
batch_idx = 0
min_loss = 10000000
for epoch_idx in range(args.epochs):
epoch_losses = []
for data in tqdm(dl):
batch_idx += 1
new_batch_data = duplicate_batch(data, tokenizer, args)
pred = model(input_ids=new_batch_data["input_ids"].to(args.device),
attention_mask=new_batch_data["attention_mask"].to(args.device),
token_type_ids=new_batch_data["token_type_ids"].to(args.device))
loss = compute_loss(pred, args.tao, args.device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.item()
epoch_losses.append(loss)
if batch_idx % args.display_interval == 0:
logging.info(f"epoch: {epoch_idx}, batch_idx: {batch_idx}, loss: {loss:>10f}")
avg_epoch_loss = np.mean(epoch_losses)
if avg_epoch_loss < min_loss:
min_loss = avg_epoch_loss
torch.save({
'epoch': epoch_idx,
'model_state_dict': model.state_dict(),
'loss': avg_epoch_loss
}, Params.simcse_model_path)
def main():
args = parse_args()
train(args)
if __name__ == "__main__":
log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| simcse/train_unsup.py | 4,614 | 句子进行重复
-*- coding: utf-8 -*- @Time : 2021/6/10 @Author : kaka parser.add_argument("train_file", type=str, help="train text file") parser.add_argument("--pretrained", type=str, default="hfl/chinese-bert-wwm-ext", help="huggingface pretrained model") parser.add_argument("--model_out", type=str, default="./finder_model", help="model output path") | 351 | en | 0.111723 |
#---------------------------------------------------------------
# ALGORITHM DEMO : TOPLOGICAL SORT
#---------------------------------------------------------------
# Topological Sort is a algorithm can find "ordering" on an "order dependency" graph
# Concept
# https://blog.techbridge.cc/2020/05/10/leetcode-topological-sort/
# https://alrightchiu.github.io/SecondRound/graph-li-yong-dfsxun-zhao-dagde-topological-sorttuo-pu-pai-xu.html
# V0
# IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods
# step 1) maintain a stack, save "ordering" nodes in it (and return in final step)
# step 2) init visited as [False]*self.V (all nodes are NOT visited yet)
# step 3) iterate over all vertices in graph, if not visited, then run topologicalSortUtil
# step 4) return result (stack)
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
# for build graph
def addEdge(self, u, v):
self.graph[u].append(v)
def topologicalSortUtil(self, v, visited, stack):
visited[v] = True
### NOTE this !!! (self.graph[v])
for k in self.graph[v]:
if visited[k] == False:
self.topologicalSortUtil(k, visited, stack)
# stack.insert(0,v) # instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1])
"""
### NOTE !! stack.append(v) is wrong, we SHOULD use stack.insert(0,v)
"""
stack.insert(0,v)
def topologicalSort(self):
visited = [False] * self.V
stack = []
### NOTE this !!! (range(self.V))
for v in range(self.V):
# call tologicalSortUtil only if visited[v] == False (the vertice is not visited yet)
if visited[v] == False:
self.topologicalSortUtil(v, visited, stack)
# return the result in inverse order
return stack[::-1]
### TEST
{"A": 0, "B":1, "C":2, "D": 3}
v = 4
g = Graph(v)
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(2, 3)
g.addEdge(3, 1)
print (g.graph)
# ans should be TableB, TableD, TableC, TableA.
r = g.topologicalSort()
print (r)
# V0'
from collections import defaultdict
class Graph:
def __init__(self, v):
self.graph = defaultdict(list)
self.v = v
def addEdge(self, a, b):
self.graph[a].append(b)
def topologicalSortUtil(self, x, visited, stack):
# V1
if visited[x]:
return
for k in self.graph[x]:
self.topologicalSortUtil(k, visited, stack)
visited[x] = True
stack.insert(0, x)
# V2
# visited[v] = True
# ### NOTE this !!! (self.graph[v])
# for k in self.graph[v]:
# if visited[k] == False:
# self.topologicalSortUtil(k, visited, stack)
# # stack.insert(0,v) # instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1])
# """
# ### NOTE !! stack.append(v) is wrong, we SHOULD use stack.insert(0,v)
# """
# stack.insert(0,v)
def topologicalSort(self):
visited = [False] * self.v
stack = []
for x in range(self.v):
if not visited[x]:
self.topologicalSortUtil(x, visited, stack)
print ("stack = " + str(stack))
return stack[::-1]
# V0''
# IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods
from collections import defaultdict
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
# for testing (build graph)
def addEdge(self,u,v):
self.graph[u].append(v)
def topologicalSortUtil(self,v,visited,stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
stack.insert(0,v)
def topologicalSort(self):
visited = [False]*self.V
stack =[]
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
print (stack)
# V1
# https://www.geeksforgeeks.org/topological-sorting/
# Python program to print topological sorting of a DAG
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list) # dictionary containing adjacency List
self.V = vertices # No. of vertices
# function to add an edge to graph
def addEdge(self, u, v):
self.graph[u].append(v)
# A recursive function used by topologicalSort
def topologicalSortUtil(self, v, visited, stack):
# Mark the current node as visited.
visited[v] = True
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Push current vertex to stack which stores result
#stack.append(v)
stack.insert(0,v)
# The function to do Topological Sort. It uses recursive
# topologicalSortUtil()
def topologicalSort(self):
# Mark all the vertices as not visited
visited = [False]*self.V
stack = []
# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Print contents of the stack
print(stack[::-1]) # return list in reverse order
# TEST
# Driver Code
# g = Graph(6)
# g.addEdge(5, 2)
# g.addEdge(5, 0)
# g.addEdge(4, 0)
# g.addEdge(4, 1)
# g.addEdge(2, 3)
# g.addEdge(3, 1)
#
# print ("Following is a Topological Sort of the given graph")
#
# # Function Call
# g.topologicalSort()
# V1
# https://github.com/TheAlgorithms/Python/blob/master/sorts/topological_sort.py
"""Topological Sort."""
# a
# / \
# b c
# / \
# d e
# edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
# vertices = ["a", "b", "c", "d", "e"]
class Graph:
def topological_sort(self, start, visited, sort):
"""Perform topological sort on a directed acyclic graph."""
current = start
# add current to visited
visited.append(current)
neighbors = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
sort = topological_sort(neighbor, visited, sort)
# if all neighbors visited add current to sort
sort.append(current)
# if all vertices haven't been visited select a new one to visit
if len(visited) != len(vertices):
for vertice in vertices:
if vertice not in visited:
sort = topological_sort(vertice, visited, sort)
# return sort
return sort
# TEST
# edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
# vertices = ["a", "b", "c", "d", "e"]
# sort = topological_sort("a", [], [])
# print(sort)
# V1'
# http://www.runoob.com/python3/python-topological-sorting.html
class Graph:
from collections import defaultdict
def __init__(self,vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self,u,v):
self.graph[u].append(v)
def topologicalSortUtil(self,v,visited,stack):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
stack.insert(0,v)
def topologicalSort(self):
visited = [False]*self.V
stack =[]
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i,visited,stack)
print (stack)
# TEST
# g= Graph(6)
# g.addEdge(5, 2);
# g.addEdge(5, 0);
# g.addEdge(4, 0);
# g.addEdge(4, 1);
# g.addEdge(2, 3);
# g.addEdge(3, 1);
# print ("output of Topological Sort ")
# g.topologicalSort()
# [5, 4, 2, 3, 1, 0]
# V2
# https://zhuanlan.zhihu.com/p/69858335
def topoSort(graph):
in_degrees = dict((u,0) for u in graph) # init (value with 0)
num = len(in_degrees)
for u in graph:
for v in graph[u]:
in_degrees[v] += 1
Q = [u for u in in_degrees if in_degrees[u] == 0]
Seq = []
while Q:
u = Q.pop()
Seq.append(u)
for v in graph[u]:
in_degrees[v] -= 1
if in_degrees[v] == 0:
Q.append(v)
if len(Seq) == num:
return Seq
else:
return None
# TEST
# G = {
# 'a':'bf',
# 'b':'cdf',
# 'c':'d',
# 'd':'ef',
# 'e':'f',
# 'f':''
# }
# print(topoSort(G))
# ['a', 'b', 'c', 'd', 'e', 'f']
# V3
# https://www.educative.io/courses/grokking-the-coding-interview/m25rBmwLV00
from collections import deque
def topological_sort(vertices, edges):
sortedOrder = []
if vertices <= 0:
return sortedOrder
# a. Initialize the graph
inDegree = {i: 0 for i in range(vertices)} # count of incoming edges
graph = {i: [] for i in range(vertices)} # adjacency list graph
# b. Build the graph
for edge in edges:
parent, child = edge[0], edge[1]
graph[parent].append(child) # put the child into it's parent's list
inDegree[child] += 1 # increment child's inDegree
# c. Find all sources i.e., all vertices with 0 in-degrees
sources = deque()
for key in inDegree:
if inDegree[key] == 0:
sources.append(key)
# d. For each source, add it to the sortedOrder and subtract one from all of its children's in-degrees
# if a child's in-degree becomes zero, add it to the sources queue
while sources:
vertex = sources.popleft()
sortedOrder.append(vertex)
for child in graph[vertex]: # get the node's children to decrement their in-degrees
inDegree[child] -= 1
if inDegree[child] == 0:
sources.append(child)
# topological sort is not possible as the graph has a cycle
if len(sortedOrder) != vertices:
return []
return sortedOrder
# TEST
# def main():
# print("Topological sort: " +
# str(topological_sort(4, [[3, 2], [3, 0], [2, 0], [2, 1]])))
# print("Topological sort: " +
# str(topological_sort(5, [[4, 2], [4, 3], [2, 0], [2, 1], [3, 1]])))
# print("Topological sort: " +
# str(topological_sort(7, [[6, 4], [6, 2], [5, 3], [5, 4], [3, 0], [3, 1], [3, 2], [4, 1]])))
#main() | algorithm/python/topological_sort.py | 10,916 | Perform topological sort on a directed acyclic graph.
--------------------------------------------------------------- ALGORITHM DEMO : TOPLOGICAL SORT--------------------------------------------------------------- Topological Sort is a algorithm can find "ordering" on an "order dependency" graph Concept https://blog.techbridge.cc/2020/05/10/leetcode-topological-sort/ https://alrightchiu.github.io/SecondRound/graph-li-yong-dfsxun-zhao-dagde-topological-sorttuo-pu-pai-xu.html V0 IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods step 1) maintain a stack, save "ordering" nodes in it (and return in final step) step 2) init visited as [False]*self.V (all nodes are NOT visited yet) step 3) iterate over all vertices in graph, if not visited, then run topologicalSortUtil step 4) return result (stack) for build graph NOTE this !!! (self.graph[v]) stack.insert(0,v) instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1]) NOTE this !!! (range(self.V)) call tologicalSortUtil only if visited[v] == False (the vertice is not visited yet) return the result in inverse order TEST ans should be TableB, TableD, TableC, TableA. V0' V1 V2 visited[v] = True NOTE this !!! (self.graph[v]) for k in self.graph[v]: if visited[k] == False: self.topologicalSortUtil(k, visited, stack) stack.insert(0,v) instead of insert v to idx = 0, we can still append v to stack and reverse it and return (e.g. return stack[::-1]) """ NOTE !! stack.append(v) is wrong, we SHOULD use stack.insert(0,v) """ stack.insert(0,v) V0'' IDEA : implement topologicalSortUtil, topologicalSort, and addEdge methods for testing (build graph) V1 https://www.geeksforgeeks.org/topological-sorting/ Python program to print topological sorting of a DAG dictionary containing adjacency List No. of vertices function to add an edge to graph A recursive function used by topologicalSort Mark the current node as visited. Recur for all the vertices adjacent to this vertex Push current vertex to stack which stores resultstack.append(v) The function to do Topological Sort. It uses recursive topologicalSortUtil() Mark all the vertices as not visited Call the recursive helper function to store Topological Sort starting from all vertices one by one Print contents of the stack return list in reverse order TEST Driver Code g = Graph(6) g.addEdge(5, 2) g.addEdge(5, 0) g.addEdge(4, 0) g.addEdge(4, 1) g.addEdge(2, 3) g.addEdge(3, 1) print ("Following is a Topological Sort of the given graph") Function Call g.topologicalSort() V1 https://github.com/TheAlgorithms/Python/blob/master/sorts/topological_sort.py a / \ b c / \ d e edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} vertices = ["a", "b", "c", "d", "e"] add current to visited if neighbor not in visited, visit if all neighbors visited add current to sort if all vertices haven't been visited select a new one to visit return sort TEST edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} vertices = ["a", "b", "c", "d", "e"] sort = topological_sort("a", [], []) print(sort) V1' http://www.runoob.com/python3/python-topological-sorting.html TEST g= Graph(6) g.addEdge(5, 2); g.addEdge(5, 0); g.addEdge(4, 0); g.addEdge(4, 1); g.addEdge(2, 3); g.addEdge(3, 1); print ("output of Topological Sort ") g.topologicalSort() [5, 4, 2, 3, 1, 0] V2 https://zhuanlan.zhihu.com/p/69858335 init (value with 0) TEST G = { 'a':'bf', 'b':'cdf', 'c':'d', 'd':'ef', 'e':'f', 'f':'' } print(topoSort(G)) ['a', 'b', 'c', 'd', 'e', 'f'] V3 https://www.educative.io/courses/grokking-the-coding-interview/m25rBmwLV00 a. Initialize the graph count of incoming edges adjacency list graph b. Build the graph put the child into it's parent's list increment child's inDegree c. Find all sources i.e., all vertices with 0 in-degrees d. For each source, add it to the sortedOrder and subtract one from all of its children's in-degrees if a child's in-degree becomes zero, add it to the sources queue get the node's children to decrement their in-degrees topological sort is not possible as the graph has a cycle TEST def main(): print("Topological sort: " + str(topological_sort(4, [[3, 2], [3, 0], [2, 0], [2, 1]]))) print("Topological sort: " + str(topological_sort(5, [[4, 2], [4, 3], [2, 0], [2, 1], [3, 1]]))) print("Topological sort: " + str(topological_sort(7, [[6, 4], [6, 2], [5, 3], [5, 4], [3, 0], [3, 1], [3, 2], [4, 1]])))main() | 4,549 | en | 0.716686 |
#
# CSS
#
PIPELINE_CSS = {
'search': {
'source_filenames': (
'crashstats/css/lib/flatpickr.dark.min.css',
'supersearch/css/search.less',
),
'output_filename': 'css/search.min.css',
},
'select2': {
'source_filenames': (
'crashstats/js/lib/select2/select2.css',
),
'output_filename': 'css/select2.min.css',
},
'jquery_ui': {
'source_filenames': (
'crashstats/css/lib/jquery-ui.css',
'crashstats/css/lib/jquery-ui.structure.css',
'crashstats/css/lib/jquery-ui.theme.css',
),
'output_filename': 'css/jquery-ui.min.css',
},
'accordion': {
'source_filenames': (
'crashstats/css/accordion.less',
),
'output_filename': 'css/accordion.min.css',
},
'metricsgraphics': {
'source_filenames': (
'crashstats/css/lib/metricsgraphics.css',
'crashstats/css/metricsgraphics_custom.css',
),
'output_filename': 'css/metricsgraphics.min.css',
},
'crashstats_base': {
'source_filenames': (
'crashstats/css/screen.less',
'status/css/status.less',
),
'output_filename': 'css/crashstats-base.min.css',
},
'api_documentation': {
'source_filenames': (
'api/css/documentation.css',
),
'output_filename': 'css/api-documentation.min.css',
},
'crashes_per_day': {
'source_filenames': (
'crashstats/css/crashes_per_day.less',
),
'output_filename': 'css/crashes-per-day.min.css',
},
'crontabber_state': {
'source_filenames': (
'crashstats/css/crontabber_state.css',
),
'output_filename': 'css/crontabber-state.min.css',
},
'documentation': {
'source_filenames': (
'documentation/css/documentation.less',
'documentation/css/jsonview.custom.less',
),
'output_filename': 'css/documentation.min.css',
},
'report_index': {
'source_filenames': (
'crashstats/css/report_index.css',
),
'output_filename': 'css/report-index.min.css',
},
'report_pending': {
'source_filenames': (
'crashstats/css/report_pending.less',
),
'output_filename': 'css/report-pending.min.css',
},
'api_tokens': {
'source_filenames': (
'manage/css/api_tokens.css',
),
'output_filename': 'css/api-tokens.min.css',
},
'manage:home': {
'source_filenames': (
'crashstats/css/lib/font-awesome/css/font-awesome.css',
'crashstats/css/fonts.less',
'manage/css/home.less',
),
'output_filename': 'css/manage-home.min.css',
},
'manage:supersearch_fields': {
'source_filenames': (
'manage/css/supersearch_fields.less',
),
'output_filename': 'css/manage-supersearch-fields.min.css',
},
'manage:status_message': {
'source_filenames': (
'manage/css/status_message.css',
),
'output_filename': 'css/manage-status-message.min.css',
},
'profile': {
'source_filenames': (
'profile/css/profile.css',
),
'output_filename': 'css/profile.min.css',
},
'signature_report': {
'source_filenames': (
'signature/css/signature_report.less',
),
'output_filename': 'css/signature-report.min.css',
},
'symbols': {
'source_filenames': (
'symbols/css/home.css',
),
'output_filename': 'css/symbols.min.css',
},
'tokens': {
'source_filenames': (
'tokens/css/home.css',
),
'output_filename': 'css/tokens.min.css',
},
'topcrashers': {
'source_filenames': (
'topcrashers/css/topcrashers.less',
),
'output_filename': 'css/topcrashers.min.css',
},
'tablesorter': {
'source_filenames': (
'tablesorter/css/theme.default.min.css',
),
'output_filename': 'js/tablesorter.min.css',
},
}
#
# JavaScript
#
PIPELINE_JS = {
'pagination': {
'source_filenames': (
'manage/js/pagination_utils.js',
),
'output_filename': 'js/pagination.min.js',
},
'date_filters': {
'source_filenames': (
'crashstats/js/lib/flatpickr.min.js',
'supersearch/js/socorro/date_filters.js',
),
'output_filename': 'js/date-filters.min.js',
},
'dynamic_form': {
'source_filenames': (
'supersearch/js/lib/dynamic_form.js',
),
'output_filename': 'js/dynamic-form.min.js',
},
'bugzilla': {
'source_filenames': (
'crashstats/js/socorro/bugzilla.js',
),
'output_filename': 'js/bugzilla.min.js',
},
'd3': {
'source_filenames': (
'crashstats/js/lib/d3.min.js',
),
'output_filename': 'js/d3.min.js',
},
'jquery_ui': {
'source_filenames': (
'crashstats/js/jquery/plugins/jquery-ui.js',
),
'output_filename': 'js/jquery-ui.min.js',
},
'accordion': {
'source_filenames': (
'crashstats/js/lib/accordions.js',
),
'output_filename': 'js/accordion.min.js',
},
'correlation': {
'source_filenames': (
'crashstats/js/polyfill/fetch.js',
'crashstats/js/polyfill/es6-promise.auto.min.js',
'crashstats/js/lib/sha1.js',
'crashstats/js/socorro/correlation.js',
),
'output_filename': 'js/correlation.min.js',
},
'metricsgraphics': {
'source_filenames': (
'crashstats/js/lib/metricsgraphics.min.js',
),
'output_filename': 'js/metricsgraphics.min.js',
},
'select2': {
'source_filenames': (
'crashstats/js/lib/select2/select2.js',
),
'output_filename': 'js/select2.min.js',
},
'tablesorter': {
'source_filenames': (
'tablesorter/js/jquery.tablesorter.js',
),
'output_filename': 'js/jquery-tablesorter.min.js',
},
'socorro_utils': {
'source_filenames': (
'crashstats/js/socorro/utils.js',
),
'output_filename': 'js/socorro-utils.min.js',
},
'topcrashers': {
'source_filenames': (
'topcrashers/js/topcrashers.js',
),
'output_filename': 'js/topcrashers.min.js',
},
'crashstats_base': {
'source_filenames': (
'crashstats/js/jquery/jquery-2.0.3.min.js',
'crashstats/js/jquery/plugins/jquery.cookies.2.2.0.js',
'crashstats/js/lib/qs.js',
'crashstats/js/lib/moment.min.js',
'crashstats/js/socorro/timeutils.js',
'crashstats/js/socorro/oauth2.js',
'crashstats/js/socorro/nav.js',
'crashstats/js/socorro/analytics.js',
),
'output_filename': 'js/crashstats-base.min.js',
},
'api_documentation': {
'source_filenames': (
'api/js/lib/filesize.min.js',
'api/js/testdrive.js'
),
'output_filename': 'js/api-documentation.min.js',
},
'crashes_per_day': {
'source_filenames': (
'crashstats/js/socorro/crashes_per_day.js',
),
'output_filename': 'js/crashes-per-day.min.js',
},
'crontabber_state': {
'source_filenames': (
'crashstats/js/underscore-min.js',
'crashstats/js/lib/sankey.js',
'crashstats/js/socorro/crontabber_state.js',
),
'output_filename': 'js/crontabber-state.min.js',
},
'documentation': {
'source_filenames': (
'documentation/js/lib/jquery.jsonview.js',
'documentation/js/documentation.js',
),
'output_filename': 'js/documentation.min.js',
},
'exploitability_report': {
'source_filenames': (
'crashstats/js/socorro/exploitability_report.js',
),
'output_filename': 'js/exploitability-report.min.js',
},
'home': {
'source_filenames': (
'home/js/home.js',
),
'output_filename': 'js/home.min.js',
},
'report_index': {
'source_filenames': (
'crashstats/js/socorro/report.js',
'crashstats/js/socorro/reprocessing.js',
),
'output_filename': 'js/report-index.min.js',
},
'report_pending': {
'source_filenames': (
'crashstats/js/socorro/pending.js',
),
'output_filename': 'js/report-pending.min.js',
},
'api_tokens': {
'source_filenames': (
'manage/js/api_tokens.js',
),
'output_filename': 'js/api-tokens.min.js',
},
'manage:events': {
'source_filenames': (
'manage/js/events.js',
),
'output_filename': 'js/manage-events.min.js',
},
'manage:graphics_devices': {
'source_filenames': (
'manage/js/graphics_devices.js',
),
'output_filename': 'js/manage-graphics-devices.min.js',
},
'manage:groups': {
'source_filenames': (
'manage/js/groups.js',
),
'output_filename': 'js/manage-groups.min.js',
},
'manage:supersearch_field': {
'source_filenames': (
'manage/js/supersearch_field.js',
),
'output_filename': 'js/manage-supersearch-field.min.js',
},
'manage:supersearch_fields': {
'source_filenames': (
'manage/js/supersearch_fields.js',
),
'output_filename': 'js/manage-supersearch-fields.min.js',
},
'manage:symbols_uploads': {
'source_filenames': (
'manage/js/symbols-uploads.js',
),
'output_filename': 'js/manage-symbols-uploads.min.js',
},
'manage:users': {
'source_filenames': (
'manage/js/users.js',
),
'output_filename': 'js/manage-users.min.js',
},
'signature_report': {
'source_filenames': (
'signature/js/signature_report.js',
'signature/js/signature_tab.js',
'signature/js/signature_tab_summary.js',
'signature/js/signature_tab_graphs.js',
'signature/js/signature_tab_reports.js',
'signature/js/signature_tab_aggregations.js',
'signature/js/signature_tab_comments.js',
'signature/js/signature_tab_correlations.js',
'signature/js/signature_tab_bugzilla.js',
'signature/js/signature_tab_graph.js',
'signature/js/signature_panel.js',
),
'output_filename': 'js/signature-report.min.js',
},
'search_custom': {
'source_filenames': (
'supersearch/js/lib/ace/ace.js',
'supersearch/js/lib/ace/theme-monokai.js',
'supersearch/js/lib/ace/mode-json.js',
'supersearch/js/socorro/search_custom.js',
),
'output_filename': 'js/search-custom.min.js',
},
'search': {
'source_filenames': (
'supersearch/js/socorro/search.js',
),
'output_filename': 'js/search.min.js',
},
'tokens': {
'source_filenames': (
'tokens/js/home.js',
),
'output_filename': 'js/tokens.min.js',
},
'error': {
'source_filenames': (
'js/error.js',
),
'output_filename': 'js/error.min.js',
},
'google_analytics': {
'source_filenames': (
'crashstats/js/socorro/google_analytics.js',
),
'output_filename': 'js/google-analytics.min.js',
},
}
# This is sanity checks, primarily for developers. It checks that
# you haven't haven't accidentally make a string a tuple with an
# excess comma, no underscores in the bundle name and that the
# bundle file extension is either .js or .css.
# We also check, but only warn, if a file is re-used in a different bundle.
# That's because you might want to consider not including that file in the
# bundle and instead break it out so it can be re-used on its own.
_used = {}
for config in PIPELINE_JS, PIPELINE_CSS: # NOQA
_trouble = set()
for k, v in config.items():
assert isinstance(k, basestring), k
out = v['output_filename']
assert isinstance(v['source_filenames'], tuple), v
assert isinstance(out, basestring), v
assert not out.split('/')[-1].startswith('.'), k
assert '_' not in out
assert out.endswith('.min.css') or out.endswith('.min.js')
for asset_file in v['source_filenames']:
if asset_file in _used:
# Consider using warnings.warn here instead
print '{:<52} in {:<20} already in {}'.format(
asset_file,
k,
_used[asset_file]
)
_trouble.add(asset_file)
_used[asset_file] = k
for asset_file in _trouble:
print "REPEATED", asset_file
found_in = []
sets = []
for k, v in config.items():
if asset_file in v['source_filenames']:
found_in.append(k)
sets.append(set(list(v['source_filenames'])))
print "FOUND IN", found_in
print "ALWAYS TOGETHER WITH", set.intersection(*sets)
break
| webapp-django/crashstats/settings/bundles.py | 13,620 | CSS JavaScript This is sanity checks, primarily for developers. It checks that you haven't haven't accidentally make a string a tuple with an excess comma, no underscores in the bundle name and that the bundle file extension is either .js or .css. We also check, but only warn, if a file is re-used in a different bundle. That's because you might want to consider not including that file in the bundle and instead break it out so it can be re-used on its own. NOQA Consider using warnings.warn here instead | 506 | en | 0.940092 |
# Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import pytest
import numpy as np
from scipy.optimize import linear_sum_assignment
from doctr.utils.metrics import box_iou
@pytest.mark.asyncio
async def test_text_detection(test_app_asyncio, mock_detection_image):
response = await test_app_asyncio.post("/detection", files={'file': mock_detection_image})
assert response.status_code == 200
json_response = response.json()
gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339
# Check that IoU with GT if reasonable
assert isinstance(json_response, list) and len(json_response) == gt_boxes.shape[0]
pred_boxes = np.array([elt['box'] for elt in json_response])
iou_mat = box_iou(gt_boxes, pred_boxes)
gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)
is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8
assert gt_idxs[is_kept].shape[0] == gt_boxes.shape[0]
| api/tests/routes/test_detection.py | 1,184 | Copyright (C) 2021, Mindee. This program is licensed under the Apache License version 2. See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. Check that IoU with GT if reasonable | 222 | en | 0.851742 |
from .eLABJournalObject import *
import json
import pandas as pd
import numbers
class SampleSerie(eLABJournalObject):
def __init__(self, api, data):
"""
Internal use only: initialize sample serie
"""
if ((data is not None) & (type(data) == dict) &
("name" in data.keys())
):
super().__init__(api, data, "seriesID", str(data["name"]))
else:
raise Exception("no (valid) sampleSerie data")
def barcode(self):
"""
Get the barcode.
"""
if "barcode" in self.data():
barcode = self.data()["barcode"]
return(barcode)
return None
def samples(self):
"""
Get a dict with the samples for this sample serie.
The sampleID is used as a key, the value is a sample object.
"""
sample_list = []
if "samples" in self.data():
samplesData = self.data()["samples"]
if isinstance(samplesData, list):
for sampleItem in samplesData:
if isinstance(sampleItem,dict) & ("sampleID" in sampleItem):
sample_list.append(sampleItem["sampleID"])
elif isinstance(sampleItem,numbers.Integral) | isinstance(sampleItem,str):
sample_list.append(sampleItem)
return(self._eLABJournalObject__api.sample(sample_list))
| elabjournal/elabjournal/SampleSerie.py | 1,480 | Internal use only: initialize sample serie
Get the barcode.
Get a dict with the samples for this sample serie.
The sampleID is used as a key, the value is a sample object. | 171 | en | 0.851228 |
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
| nodes/axis.py | 27,430 | The ROS-VAPIX interface for video streaming.
A class representing a CIF standard resolution.
A class representing a video resolution.
Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
Start the ROS driver and ROS node.
Lazy-start the image-publisher.
Lazy-stop the image-publisher when nobody is interested
Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver.
Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
!/usr/bin/env python BACKWARDS COMPATIBILITY LAYER deprecated True every time the video parameters have changed and the URL has to be altered (set from other threads). autodetect the VAPIX API and connect to it; try it forever Sometimes the camera falls into power saving mode and stops streaming. This setting allows the script to try to wake up the camera. dynamic-reconfigurable properties - definitions deprecated deprecated treat empty strings as None in width and height params dynamic-reconfigurable properties - defaults TODO change to perform default resolution detection from VAPIX only advertise the supported resolutions on dynamic reconfigure dynamic reconfigure server camera info setup generate a valid camera name based on the hostname required before getCameraInfo() the thread used for streaming images (is instantiated when the first image subscriber subscribes) the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe BACKWARDS COMPATIBILITY LAYER deprecated deprecated deprecated deprecated deprecated deprecated in the initialization phase, we want to give precedence to the values given to the constructor DYNAMIC RECONFIGURE CALLBACKS deprecated values bool("0") returns True because it is a nonempty string compare by attribute values reuse the former __eq__ definition resolution given by CIF name default IP address default login name resolve frame_id with tf_prefix (unless already absolute) not absolute? prefix defined? prefix not absolute? | 9,734 | en | 0.698402 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CakeGallery.created'
db.add_column(u'cakegallery_cakegallery', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 11, 18, 0, 0), blank=True),
keep_default=False)
# Adding field 'CakeGallery.updated'
db.add_column(u'cakegallery_cakegallery', 'updated',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 11, 18, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CakeGallery.created'
db.delete_column(u'cakegallery_cakegallery', 'created')
# Deleting field 'CakeGallery.updated'
db.delete_column(u'cakegallery_cakegallery', 'updated')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cakegallery.cakecategory': {
'Meta': {'object_name': 'CakeCategory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'cakegallery.cakegallery': {
'Meta': {'object_name': 'CakeGallery'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'category'", 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategory'", 'to': u"orm['cakegallery.CakeSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'cakegallery.cakeimage': {
'Meta': {'object_name': 'CakeImage'},
'add_watermark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': "'True'", 'related_name': "'images'", 'null': 'True', 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'for_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gallery'", 'null': 'True', 'to': u"orm['cakegallery.CakeGallery']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'image_alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ip_addr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'blank': "'True'", 'related_name': "'images'", 'null': 'True', 'to': u"orm['cakegallery.CakeSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'cakegallery.cakesubcategory': {
'Meta': {'object_name': 'CakeSubCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategories'", 'to': u"orm['cakegallery.CakeCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cakegallery'] | povary/apps/cakegallery/migrations/0014_auto__add_field_cakegallery_created__add_field_cakegallery_updated.py | 9,566 | -*- coding: utf-8 -*- Adding field 'CakeGallery.created' Adding field 'CakeGallery.updated' Deleting field 'CakeGallery.created' Deleting field 'CakeGallery.updated' | 165 | en | 0.529426 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-24 05:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('utils', '0011_auto_20160822_1127'),
]
operations = [
migrations.CreateModel(
name='River',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(unique=True)),
('added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterField(
model_name='channel',
name='river',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rivers', to='utils.River'),
),
]
| apps/utils/migrations/0012_auto_20160824_0543.py | 921 | -*- coding: utf-8 -*- Generated by Django 1.9.7 on 2016-08-24 05:43 | 67 | en | 0.658825 |
import time
import pytest
from tools import utils, constants
PARAMS = ['--connections', '500']
# TODO parameterize test
@pytest.mark.baker
@pytest.mark.multinode
@pytest.mark.slow
@pytest.mark.incremental
class TestManyBakers:
"""Run 5 bakers and num nodes, wait and check logs"""
def test_init(self, sandbox):
for i in range(10):
sandbox.add_node(i, params=PARAMS)
utils.activate_alpha(sandbox.client(0))
for i in range(5):
sandbox.add_baker(i, f'bootstrap{i + 1}',
proto=constants.ALPHA_DEAMON)
def test_wait(self):
time.sleep(5)
def test_check_logs(self, sandbox):
if not sandbox.log_dir:
pytest.skip()
assert sandbox.logs
error_pattern = r"canceled|crashed"
assert utils.check_logs(sandbox.logs, error_pattern)
| tests_python/tests/test_many_bakers.py | 869 | Run 5 bakers and num nodes, wait and check logs
TODO parameterize test | 72 | en | 0.514597 |
"""
Telnet server.
Example usage::
class MyTelnetApplication(TelnetApplication):
def client_connected(self, telnet_connection):
# Set CLI with simple prompt.
telnet_connection.set_application(
telnet_connection.create_prompt_application(...))
def handle_command(self, telnet_connection, document):
# When the client enters a command, just reply.
telnet_connection.send('You said: %r\n\n' % document.text)
...
a = MyTelnetApplication()
TelnetServer(application=a, host='127.0.0.1', port=23).run()
"""
from __future__ import unicode_literals
import socket
import select
import threading
import os
import fcntl
from six import int2byte, text_type, binary_type
from codecs import getincrementaldecoder
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.eventloop.base import EventLoop
from prompt_toolkit.interface import CommandLineInterface, Application
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from .log import logger
from .protocol import IAC, DO, LINEMODE, SB, MODE, SE, WILL, ECHO, NAWS, SUPPRESS_GO_AHEAD
from .protocol import TelnetProtocolParser
from .application import TelnetApplication
__all__ = (
'TelnetServer',
)
def _initialize_telnet(connection):
logger.info('Initializing telnet connection')
# Iac Do Linemode
connection.send(IAC + DO + LINEMODE)
# Suppress Go Ahead. (This seems important for Putty to do correct echoing.)
# This will allow bi-directional operation.
connection.send(IAC + WILL + SUPPRESS_GO_AHEAD)
# Iac sb
connection.send(IAC + SB + LINEMODE + MODE + int2byte(0) + IAC + SE)
# IAC Will Echo
connection.send(IAC + WILL + ECHO)
# Negotiate window size
connection.send(IAC + DO + NAWS)
class _ConnectionStdout(object):
"""
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
"""
def __init__(self, connection, encoding):
self._encoding = encoding
self._connection = connection
self._buffer = []
def write(self, data):
assert isinstance(data, text_type)
self._buffer.append(data.encode(self._encoding))
self.flush()
def flush(self):
try:
self._connection.send(b''.join(self._buffer))
except socket.error as e:
logger.error("Couldn't send data over socket: %s" % e)
self._buffer = []
class TelnetConnection(object):
"""
Class that represents one Telnet connection.
"""
def __init__(self, conn, addr, application, server, encoding):
assert isinstance(addr, tuple) # (addr, port) tuple
assert isinstance(application, TelnetApplication)
assert isinstance(server, TelnetServer)
assert isinstance(encoding, text_type) # e.g. 'utf-8'
self.conn = conn
self.addr = addr
self.application = application
self.closed = False
self.handling_command = True
self.server = server
self.encoding = encoding
self.callback = None # Function that handles the CLI result.
# Create "Output" object.
self.size = Size(rows=40, columns=79)
# Initialize.
_initialize_telnet(conn)
# Create output.
def get_size():
return self.size
self.stdout = _ConnectionStdout(conn, encoding=encoding)
self.vt100_output = Vt100_Output(self.stdout, get_size, write_binary=False)
# Create an eventloop (adaptor) for the CommandLineInterface.
self.eventloop = _TelnetEventLoopInterface(server)
# Set default CommandLineInterface.
self.set_application(create_prompt_application())
# Call client_connected
application.client_connected(self)
# Draw for the first time.
self.handling_command = False
self.cli._redraw()
def set_application(self, app, callback=None):
"""
Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI.
"""
assert isinstance(app, Application)
assert callback is None or callable(callback)
self.cli = CommandLineInterface(
application=app,
eventloop=self.eventloop,
output=self.vt100_output)
self.callback = callback
# Create a parser, and parser callbacks.
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
# Input decoder for stdin. (Required when working with multibyte
# characters, like chinese input.)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()] # nonlocal
# Tell the CLI that it's running. We don't start it through the run()
# call, but will still want _redraw() to work.
self.cli._is_running = True
def data_received(data):
""" TelnetProtocolParser 'data_received' callback """
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return ''
def size_received(rows, columns):
""" TelnetProtocolParser 'size_received' callback """
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received)
def feed(self, data):
"""
Handler for incoming data. (Called by TelnetServer.)
"""
assert isinstance(data, binary_type)
self.parser.feed(data)
# Render again.
self.cli._redraw()
# When a return value has been set (enter was pressed), handle command.
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
# Control-D or Control-C was pressed.
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
# Handle CLI command
self._handle_command(return_value)
def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor)
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
def send(self, data):
"""
Send text to the client.
"""
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
# connection is raw, so we are responsible for inserting \r.)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush()
def close(self):
"""
Close the connection.
"""
self.application.client_leaving(self)
self.conn.close()
self.closed = True
class _TelnetEventLoopInterface(EventLoop):
"""
Eventloop object to be assigned to `CommandLineInterface`.
"""
def __init__(self, server):
self._server = server
def close(self):
" Ignore. "
def stop(self):
" Ignore. "
def run_in_executor(self, callback):
self._server.run_in_executor(callback)
def call_from_executor(self, callback, _max_postpone_until=None):
self._server.call_from_executor(callback)
def add_reader(self, fd, callback):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
class TelnetServer(object):
"""
Telnet server implementation.
"""
def __init__(self, host='127.0.0.1', port=23, application=None, encoding='utf-8'):
assert isinstance(host, text_type)
assert isinstance(port, int)
assert isinstance(application, TelnetApplication)
assert isinstance(encoding, text_type)
self.host = host
self.port = port
self.application = application
self.encoding = encoding
self.connections = set()
self._calls_from_executor = []
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
@classmethod
def create_socket(cls, host, port):
# Create and bind socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
def run_in_executor(self, callback):
threading.Thread(target=callback).start()
def call_from_executor(self, callback):
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
def run(self):
"""
Run the eventloop for the telnet server.
"""
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
# Removed closed connections.
self.connections = set([c for c in self.connections if not c.closed])
# Ignore connections handling commands.
connections = set([c for c in self.connections if not c.handling_command])
# Wait for next event.
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
# When the socket itself is ready, accept a new connection.
if s == listen_socket:
self._accept(listen_socket)
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif s == self._schedule_pipe[0]:
self._process_callbacks()
# Handle incoming data on socket.
else:
self._handle_incoming_data(s)
finally:
listen_socket.close()
def _accept(self, listen_socket):
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr)
def _handle_incoming_data(self, conn):
"""
Handle incoming data on socket.
"""
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection)
| oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/server.py | 13,300 | Class that represents one Telnet connection.
Telnet server implementation.
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
Eventloop object to be assigned to `CommandLineInterface`.
Accept new incoming connection.
Handle command. This will run in a separate thread, in order not
to block the event loop.
Handle incoming data on socket.
Process callbacks from `call_from_executor` in eventloop.
Close the connection.
Ignore.
TelnetProtocolParser 'data_received' callback
Erase output screen.
Handler for incoming data. (Called by TelnetServer.)
Run the eventloop for the telnet server.
Send text to the client.
Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI.
TelnetProtocolParser 'size_received' callback
Ignore.
Telnet server.
Example usage::
class MyTelnetApplication(TelnetApplication):
def client_connected(self, telnet_connection):
# Set CLI with simple prompt.
telnet_connection.set_application(
telnet_connection.create_prompt_application(...))
def handle_command(self, telnet_connection, document):
# When the client enters a command, just reply.
telnet_connection.send('You said: %r
' % document.text)
...
a = MyTelnetApplication()
TelnetServer(application=a, host='127.0.0.1', port=23).run()
Iac Do Linemode Suppress Go Ahead. (This seems important for Putty to do correct echoing.) This will allow bi-directional operation. Iac sb IAC Will Echo Negotiate window size (addr, port) tuple e.g. 'utf-8' Function that handles the CLI result. Create "Output" object. Initialize. Create output. Create an eventloop (adaptor) for the CommandLineInterface. Set default CommandLineInterface. Call client_connected Draw for the first time. Create a parser, and parser callbacks. Input decoder for stdin. (Required when working with multibyte characters, like chinese input.) nonlocal Tell the CLI that it's running. We don't start it through the run() call, but will still want _redraw() to work. Render again. When a return value has been set (enter was pressed), handle command. Control-D or Control-C was pressed. Handle CLI command Reset state and draw again. (If the connection is still open -- the application could have called TelnetConnection.close() When data is send back to the client, we should replace the line endings. (We didn't allocate a real pseudo terminal, and the telnet connection is raw, so we are responsible for inserting \r.) Create a pipe for inter thread communication. Create and bind socket Flush all the pipe content. Process calls from executor. Removed closed connections. Ignore connections handling commands. Wait for next event. When the socket itself is ready, accept a new connection. If we receive something on our "call_from_executor" pipe, process these callbacks in a thread safe way. Handle incoming data on socket. | 3,063 | en | 0.812384 |
# model settings
model = dict(
type='CenterNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
add_summay_every_n_step=200,
style='pytorch'),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=1,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=True,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.05,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| configs/centernext/paper_cxt18_Ro16_3lr_wd4e4_hm2wh1_s123_nos_2x.py | 4,037 | model settings training and testing settings dataset settings optimizer learning policy yapf:disable yapf:enable runtime settings | 129 | en | 0.778863 |
#!/usr/bin/env python
"""
This now uses the imshow command instead of pcolor which *is much
faster*
"""
from __future__ import division, print_function
import numpy as np
from matplotlib.pyplot import *
from matplotlib.collections import LineCollection
import matplotlib.cbook as cbook
# I use if 1 to break up the different regions of code visually
if 1: # load the data
# data are 256x256 16 bit integers
dfile = cbook.get_sample_data('s1045.ima.gz')
im = np.fromstring(dfile.read(), np.uint16).astype(float)
im.shape = 256, 256
if 1: # plot the MRI in pcolor
subplot(221)
imshow(im, cmap=cm.gray)
axis('off')
if 1: # plot the histogram of MRI intensity
subplot(222)
im = np.ravel(im)
im = im[np.nonzero(im)] # ignore the background
im = im/(2.0**15) # normalize
hist(im, 100)
xticks([-1, -.5, 0, .5, 1])
yticks([])
xlabel('intensity')
ylabel('MRI density')
if 1: # plot the EEG
# load the data
numSamples, numRows = 800,4
eegfile = cbook.get_sample_data('eeg.dat', asfileobj=False)
print('loading eeg %s' % eegfile)
data = np.fromstring(open(eegfile, 'rb').read(), float)
data.shape = numSamples, numRows
t = 10.0 * np.arange(numSamples, dtype=float)/numSamples
ticklocs = []
ax = subplot(212)
xlim(0,10)
xticks(np.arange(10))
dmin = data.min()
dmax = data.max()
dr = (dmax - dmin)*0.7 # Crowd them a bit.
y0 = dmin
y1 = (numRows-1) * dr + dmax
ylim(y0, y1)
segs = []
for i in range(numRows):
segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
ticklocs.append(i*dr)
offsets = np.zeros((numRows,2), dtype=float)
offsets[:,1] = ticklocs
lines = LineCollection(segs, offsets=offsets,
transOffset=None,
)
ax.add_collection(lines)
# set the yticks to use axes coords on the y axis
ax.set_yticks(ticklocs)
ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
xlabel('time (s)')
show()
| examples/pylab_examples/mri_with_eeg.py | 2,057 | This now uses the imshow command instead of pcolor which *is much
faster*
!/usr/bin/env python I use if 1 to break up the different regions of code visually load the data data are 256x256 16 bit integers plot the MRI in pcolor plot the histogram of MRI intensity ignore the background normalize plot the EEG load the data Crowd them a bit. set the yticks to use axes coords on the y axis | 388 | en | 0.69962 |
import pandas as pd
kraken_rank_dictionary = {
'P': 'phylum',
'C': 'class',
'O': 'order',
'F': 'family',
'G': 'genus',
'S': 'species'
}
greengenes_rank_dict = {
'k__': 'kingdom',
'p__': 'phylum',
'c__': 'class',
'o__': 'order',
'f__': 'family',
'g__': 'genus',
's__': 'species'
}
kraken_columns = ['PERCENTAGE', 'lca_read_count', 'read_count', 'rank',
'@@TAXID', 'TAXNAME']
def kraken2_transformer(all_rank_summary, output_rank_summaries, ranks):
# TODO finsih docs
"""Converts a summary of all ranks from kraken into rank-wise profiles
similar to the CAMI-SIM output
Parameters
----------
all_rank_summary
output_rank_summaries
ranks
Returns
-------
"""
# TODO COULD be split into two format functions: one to reformat,
# and one to split on rank
# TODO give error for invalid rank value
all_ranks = pd.read_csv(all_rank_summary, sep='\t')
all_ranks.columns = kraken_columns
# TODO for kraken is it okay to just take the first part (drop the number)
all_ranks['rank'] = all_ranks['rank'].str[0]
all_ranks = all_ranks.loc[all_ranks['rank'].isin(kraken_rank_dictionary)]
all_ranks['RANK'] = [kraken_rank_dictionary[key] for key in
all_ranks['rank']]
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for output_, rank in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[all_ranks['RANK'] == rank]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False)
def metaphlan2_transformer(all_rank_summary, output_rank_summaries, ranks):
all_ranks = pd.read_csv(all_rank_summary, sep='\t', skiprows=3)
def last_entry(x): return x.split('|')[-1]
all_ranks['last_clade'] = all_ranks['#clade_name'].map(last_entry)
all_ranks['@@TAXID'] = all_ranks['NCBI_tax_id'].map(last_entry)
all_ranks['RANK'] = all_ranks['last_clade'].map(
lambda x: greengenes_rank_dict[x[:3]])
all_ranks['TAXNAME'] = all_ranks['last_clade'].map(lambda x: x[3:])
all_ranks['PERCENTAGE'] = all_ranks['relative_abundance']
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for output_, rank in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[all_ranks['RANK'] == rank]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False)
| benchutils/transformers.py | 2,472 | Converts a summary of all ranks from kraken into rank-wise profiles
similar to the CAMI-SIM output
Parameters
----------
all_rank_summary
output_rank_summaries
ranks
Returns
-------
TODO finsih docs TODO COULD be split into two format functions: one to reformat, and one to split on rank TODO give error for invalid rank value TODO for kraken is it okay to just take the first part (drop the number) | 404 | en | 0.669179 |
"""empty message
Revision ID: f6d196dc5629
Revises: fd5076041bff
Create Date: 2019-04-06 22:25:32.133764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f6d196dc5629'
down_revision = 'fd5076041bff'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('admin', sa.Boolean(), nullable=True))
op.execute('UPDATE users SET admin=False')
op.alter_column('users', 'admin', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'admin')
# ### end Alembic commands ###
| services/backend/migrations/versions/f6d196dc5629_.py | 749 | empty message
Revision ID: f6d196dc5629
Revises: fd5076041bff
Create Date: 2019-04-06 22:25:32.133764
revision identifiers, used by Alembic. commands auto generated by Alembic - please adjust! end Alembic commands commands auto generated by Alembic - please adjust! end Alembic commands | 296 | en | 0.579339 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TODO:
* needs to check if required modules are installed (or prefereably developed)
* needs to be able to ignore plugins that the user doesnt care about
Super Setup
PREREQ:
git config --global push.default current
export CODE_DIR=~/code
mkdir $CODE_DIR
cd $CODE_DIR
git clone https://github.com/WildbookOrg/ibeis.git
cd ibeis
python super_setup.py --bootstrap
OR (if in virtual environment)
python super_setup.py --bootstrap --nosudo
OR
./_scripts/bootstrap.py
THEN
./_scripts/__install_prereqs__.sh
THEN
./super_setup.py --build --develop
./super_setup.py --build --develop
./super_setup.py --status
# If on current branch copy so super setup isn't overwriten as we go
python -c "import utool as ut; ut.copy('super_setup.py', '_ibeis_setup.py')"
# Status
python _ibeis_setup.py -y --gg "git status"
python _ibeis_setup.py -y --gg "git branch"
# Setup Next
#python _ibeis_setup.py -y --gg "git pull"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git pull"
#python _ibeis_setup.py -y --gg "git checkout -b next"
#python _ibeis_setup.py -y --gg "git checkout next"
#python _ibeis_setup.py -y --gg "git push -u origin next"
#python _ibeis_setup.py -y --gg "git push remote origin/next"
####python _ibeis_setup.py -y --gg "git merge master"
#python _ibeis_setup.py -y --gg "git checkout ^HEAD"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git checkout next"
# -- MERGE topic -> next
##python _ibeis_setup.py -y --gg "git checkout topic"
##python _ibeis_setup.py -y --gg "git checkout next"
##python _ibeis_setup.py -y --gg "git merge topic"
# -- MERGE next -> master
python _ibeis_setup.py -y --gg "git checkout master"
python _ibeis_setup.py -y --gg "git merge next"
# -- SAFER MERGE topic -> next
python super_setup.py --checkout next
python super_setup.py --newlocalbranch merge_next_joncrall_dev_branch
python super_setup.py --merge joncrall_dev_branch
./run_tests.py
python super_setup.py --checkout next
python super_setup.py --merge merge_next_joncrall_dev_branch
# Push
python _ibeis_setup.py -y --gg "git push"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git checkout next"
# MAKE A NEW BRANCH
python super_setup.py --newbranch joncrall_dev_branch
python super_setup.py --checkout joncrall_dev_branch
python super_setup.py --checkout next
python super_setup.py --newbranch jdb
python super_setup.py --checkout jdb
GitReferences:
http://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging
FIXME:
graph-viz
pydot
ibeis_cnn
Theano
Lasange
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from os.path import dirname, realpath
import platform
import sys
import os
#-----------------
# SYSTEM ENTRY POINT, NO UTOOL, BARE PYTHON
#-----------------
USAGE = ('''
--- USAGE ---
IBEIS (IMAGE ANALYSIS) SUPER SETUP
This script is meant to help setup, install, and update the developer
enviroment for IBEIS Image Analysis.
****
# Step 1 Initial Development Prereqs:
The first thing is to ensure you have a baseline development enviroment (gcc,
fortran, cmake, blas, git, pip, etc...). This should work well for apt-get,
yum, and macports package managers. It is possible to use Windows MinGW, but
it is not well supported.
The following command outputs the commands to install these prereq packages.
python super_setup.py --bootstrap
****
# Step 2 - utool
Just running the script will download and install utool --- a utility library
used in all aspects of the system.
python super_setup.py
****
# Step 3 - Download / Update Image Analysis Packages
Running the script again once utool is installed will ensure the rest of the
repositories are cloned and on your machine in the directory above this one, or
in a custom location set by your $CODE_DIR environment variable. Running with
the pull command will update the packages as well.
python super_setup.py pull
Note: if you have wildme credientials you can run this to setup git
python super_setup.py pull --move-wildme-ssh
****
# Step 3.5 - Grab and Build Extern libraries with scripts
python super_setup.py --opencv
python super_setup.py --hesaff
python super_setup.py --flann
python super_setup.py --dcnn
python super_setup.py --pydarknet
python super_setup.py --pyqt
python super_setup.py --pyrf
****
# Step 4 - Build C++ components.
Some submodles require C++ libraries. Build them using the following Command.
python super_setup.py build
****
# Step 5 - Install the system.
Register these packages with the python enviroment.
# Install external modules
python super_setup.py --develop
# Install the ibeis module
pip install -e .
--- /USAGE ---
''')
def define_argparse():
""" todo, find a way to use this effectively """
import argparse
parser = argparse.ArgumentParser(description='IBEIS super setup')
# parser.add_argument('command', help='command to run')
def add_flag(group, name, help=None):
group.add_argument(name.replace('--', ''), action='store_true',
default=False, help=help)
# subparsers = parser.add_subparsers()
# subparsers.add_parser('pull', help='pulls IBEIS repos')
# subparsers.add_parser('ensure', help='ensures checkouts of IBEIS repos')
# sub = subparsers.add_parser('move-wildme', help='changes to the wildme repos')
# sub.add_argument('--fmt', dest='fmt', action='store',
# choices=['ssh', 'https'], help='url type')
# # Setup options for parser_a
# # Add nargs="*" for zero or more other commands
# parser.add_argument('extra', nargs = "*", help = 'Other commands')
# parser.add_argument('command', action='store_true', default=False,
# help='outputs commands to install prereqs')
g1 = parser.add_argument_group('setup')
add_flag(g1, 'bootstrap', help='outputs commands to install prereqs')
add_flag(g1, 'ensure', help='ensures that all repos are checked out')
add_flag(g1, 'build', help='builds python packages')
add_flag(g1, 'develop', help='installs packages in developer mode')
add_flag(g1, 'dcnn', help='setup dcnn packages')
g4 = parser.add_argument_group('maintenance')
add_flag(g4, 'pull', help='pulls all IBIES repos')
g3 = parser.add_argument_group('extern')
add_flag(g3, 'no_qt')
add_flag(g3, 'no_gui')
add_flag(g3, 'ignore_opencv')
g2 = parser.add_argument_group('utils')
add_flag(g2, 'move_wildme',
help='changes to the wildme repos')
args = parser.parse_args()
return args
# args = define_argparse()
# print('args = %r' % (args,))
# sys.exit(1)
def get_plat_specifier():
"""
Standard platform specifier used by distutils
"""
import setuptools # NOQA
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier
def import_module_from_fpath(module_fpath):
""" imports module from a file path """
import platform
from os.path import basename, splitext
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
else:
raise AssertionError('invalid python version')
return module
def bootstrap(WIN32):
if WIN32:
# need to preinstall parse
win32bootstrap_fpath = os.path.abspath('_scripts/win32bootstrap.py')
win32bootstrap = import_module_from_fpath(win32bootstrap_fpath)
win32bootstrap.bootstrap_sysreq()
else:
#import bootstrap
bootstrap_fpath = os.path.abspath('_scripts/bootstrap.py')
bootstrap = import_module_from_fpath(bootstrap_fpath)
#sys.path.append(os.path.abspath('_scripts'))
bootstrap.bootstrap_sysreq()
sys.exit(0)
#################
# ENSURING UTOOL
#################
def syscmd(cmdstr):
print('RUN> ' + cmdstr)
os.system(cmdstr)
def in_virtual_env():
print('sys.real_prefix=%r' % (getattr(sys, 'real_prefix', None),))
print('sys.base_prefix=%r' % (getattr(sys, 'base_prefix', None),))
print('sys.prefix=%r' % (getattr(sys, 'prefix', None),))
in_venv = False
if hasattr(sys, 'real_prefix'):
# For virtualenv module
in_venv = True
elif hasattr(sys, 'base_prefix'):
# For venv module
in_venv = sys.base_prefix != sys.prefix
return in_venv
def ensure_utool(CODE_DIR, pythoncmd):
WIN32 = sys.platform.startswith('win32')
#UTOOL_BRANCH = ' -b <branch> <remote_repo>'
UTOOL_BRANCH = 'next'
UTOOL_REPO = 'https://github.com/WildbookOrg/utool.git'
print('WARNING: utool is not found')
print('Attempting to get utool. Enter (y) to continue')
if '-y' in sys.argv:
ans = 'y'
else:
try:
ans = input('Enter y to continue. Anything else to exit...\n')
except:
ans = raw_input('Enter y to continue. Anything else to exit...\n') # NOQA
if ans != 'y':
print('Please install utool to continue')
sys.exit(0)
cwdpath = os.path.realpath(os.getcwd())
usr_code_dir = os.path.expanduser(CODE_DIR)
os.chdir(usr_code_dir)
print("user code dir = %r" % usr_code_dir)
print('cloning utool')
if not os.path.exists('utool'):
syscmd('git clone ' + UTOOL_REPO + ' -b ' + UTOOL_BRANCH)
os.chdir('utool')
print('pulling utool')
syscmd('git pull')
print('installing utool for development')
cmdstr = '{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd)
# TODO: use pip instead
# cmdstr = '{pythoncmd} -m pip install .'.format(pythoncmd=pythoncmd)
if not WIN32 and not in_virtual_env():
cmdstr = 'sudo ' + cmdstr
syscmd(cmdstr)
os.chdir(cwdpath)
# sys.path.append(usr_code_dir)
print('Please rerun super_setup.py')
print(' '.join(sys.argv))
sys.exit(1)
#-----------------
# UTOOL PYTHON
#-----------------
def initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3):
import utool as ut
WITH_CNN = True
#WITH_TPL = True
WITH_QT = not ut.get_argflag('--no-qt')
WITH_GUI = not ut.get_argflag('--no-gui')
WITH_CUSTOM_TPL = True
WITH_PLUGINS = True
#-----------
# IBEIS project repos
#-----------
# if True:
# jon_repo_base = 'https://github.com/WildbookOrg'
# jason_repo_base = 'https://github.com/WildbookOrg'
# else:
# jon_repo_base = 'https://github.com/wildme'
# jason_repo_base = 'https://github.com/wildme'
ibeis_rman = ut.RepoManager([
'https://github.com/WildbookOrg/utool.git',
# 'https://github.com/WildbookOrg/sandbox_utools.git',
'https://github.com/WildbookOrg/vtool.git',
'https://github.com/WildbookOrg/dtool.git',
'https://github.com/Erotemic/ubelt.git',
'https://github.com/WildbookOrg/detecttools.git',
], CODE_DIR, label='core', pythoncmd=pythoncmd)
tpl_rman = ut.RepoManager([], CODE_DIR, label='tpl', pythoncmd=pythoncmd)
if not GET_ARGFLAG('--ignore-opencv'):
cv_repo = ut.Repo('https://github.com/Itseez/opencv.git', CODE_DIR, modname='cv2')
tpl_rman.add_repo(cv_repo)
if WITH_GUI:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/plottool.git',
])
if WITH_QT:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/guitool.git',
])
tpl_rman.add_repo(ut.Repo(modname=('PyQt4', 'PyQt5', 'PyQt')))
if WITH_CUSTOM_TPL:
flann_repo = ut.Repo('https://github.com/WildbookOrg/flann.git', CODE_DIR, modname='pyflann')
ibeis_rman.add_repo(flann_repo)
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/hesaff.git',
])
if WITH_CNN:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis_cnn.git',
'https://github.com/WildbookOrg/pydarknet.git',
'https://gitlab.com/bluemellophone/lightnet.git',
'https://gitlab.com/bluemellophone/brambox.git',
])
# NEW CNN Dependencies
tpl_rman.add_repos([
'https://github.com/pytorch/pytorch.git',
])
# if GET_ARGFLAG('--libgpuarray'):
tpl_rman.add_repos([
'https://github.com/Theano/libgpuarray.git',
])
# CNN Dependencies
tpl_rman.add_repos([
'https://github.com/Theano/Theano.git',
# 'https://github.com/lisa-lab/pylearn2.git',
'https://github.com/Lasagne/Lasagne.git',
])
if WITH_PLUGINS:
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis-flukematch-module.git',
'https://github.com/WildbookOrg/ibeis-curvrank-module.git',
'https://github.com/WildbookOrg/ibeis-deepsense-module.git',
'https://github.com/WildbookOrg/ibeis-finfindr-module.git',
'https://github.com/WildbookOrg/ibeis-kaggle7-module.git',
'https://github.com/WildbookOrg/pyrf.git',
])
if False:
# Depricated
ibeis_rman.add_repos([
#'https://github.com/WildbookOrg/pybing.git',
#'https://github.com/aweinstock314/cyth.git',
#'https://github.com/hjweide/pygist',
])
# Add main repo (Must be checked last due to dependency issues)
ibeis_rman.add_repos([
'https://github.com/WildbookOrg/ibeis.git',
])
#-----------
# Custom third party build/install scripts
#-----------
define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3)
return tpl_rman, ibeis_rman
def define_custom_scripts(tpl_rman, ibeis_rman, PY2, PY3):
"""
export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"
set THEANO_FLAGS=device=cpu,print_active_device=True,enable_initial_driver_test=True,print_test_value=True
python -c "import pydot; print(pydot.__file__)"
python -c "import pydot; print(pydot.__version__)"
python -c "import pydot; print(pydot.find_graphviz())"
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
python -c "import theano; print(theano.__file__)"
# python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import lasagne; print(lasagne.__file__)"
python -c "import ibeis_cnn; print(ibeis_cnn.__file__)"
python -c "import detecttools; print(detecttools.__file__)"
# http://stackoverflow.com/questions/18042919/how-to-install-pyqt5-on-a-new-virtualenv-and-work-on-an-idle
pip install vext.pyqt5
sudo apt-get install pyqt5-dev
sudo apt-get install python3-pyqt5
python
python -c "import sip; print('[test] Python can import sip')"
python -c "import sip; print('sip.__file__=%r' % (sip.__file__,))"
python -c "import sip; print('sip.SIP_VERSION=%r' % (sip.SIP_VERSION,))"
python -c "import sip; print('sip.SIP_VERSION_STR=%r' % (sip.SIP_VERSION_STR,))"
ln -s /usr/lib/python3/dist-packages/PyQt5/ /home/joncrall/venv3/lib/python3.4/site-packages/PyQt5
ln -s /usr/lib/python3/dist-packages/sip*.so /home/joncrall/venv3/lib/python3.4/site-packages/
ln -s /usr/lib/python3/dist-packages/sip*.py /home/joncrall/venv3/lib/python3.4/site-packages/
"""
import utool as ut
major = str(sys.version_info.major)
minor = str(sys.version_info.minor)
majorminor = [major, minor]
pyoff = '2' if sys.version_info.major == 3 else '3'
pyon = majorminor[0]
plat_spec = get_plat_specifier()
# build_dname = 'build' + ''.join(majorminor)
build_dname = 'cmake_builds/build' + plat_spec
script_fmtdict = {
'pyexe' : sys.executable,
'pyversion' : 'python' + '.'.join(majorminor),
'pypkg_var' : 'PYTHON' + pyon + '_PACKAGES_PATH',
'build_dname' : build_dname,
'pyoff' : pyoff,
'pyon' : pyon,
'cv_pyon_var' : 'BUILD_opencv_python' + pyon,
'cv_pyoff_var' : 'BUILD_opencv_python' + pyoff,
'plat_spec' : plat_spec,
'source_dpath' : '../..',
'libext' : ut.get_lib_ext(),
}
if os.environ.get('VIRTUAL_ENV', '') == '':
if sys.platform.startswith('darwin'):
local_prefix = '/opt/local'
else:
local_prefix = '/usr/local'
else:
local_prefix = os.environ['VIRTUAL_ENV']
opencv_dir = os.path.join(local_prefix, '/share/OpenCV')
if not os.path.exists(opencv_dir):
if not ut.get_argflag('--opencv'):
opencv_dir = ''
print('OpenCV is not installed in the expected location: {}'.format(opencv_dir))
print('Running this script with --opencv will build and install it there')
# define bash variables for different combinations of python distros and
# virtual environments
python_bash_setup = ut.codeblock(
r'''
# STARTBLOCK bash
if [[ "$VIRTUAL_ENV" == "" ]]; then
# The case where we are installying system-wide
# It is recommended that a virtual enviornment is used instead
export PYTHON_EXECUTABLE=$(which {pyversion})
if [[ '$OSTYPE' == 'darwin'* ]]; then
# Mac system info
export LOCAL_PREFIX=/opt/local
export {pypkg_var}=$($PYTHON_EXECUTABLE -c "import site; print(site.getsitepackages()[0])")
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO="sudo"
else
# Linux system info
export LOCAL_PREFIX=/usr/local
export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/dist-packages
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO="sudo"
fi
# No windows support here
else
# The prefered case where we are in a virtual environment
export PYTHON_EXECUTABLE=$(which python)
# export LOCAL_PREFIX=$VIRTUAL_ENV/local
export LOCAL_PREFIX=$VIRTUAL_ENV
export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/site-packages
export PYTHON_PACKAGES_PATH=${pypkg_var}
export _SUDO=""
fi
echo "LOCAL_PREFIX = $LOCAL_PREFIX"
echo "{pypkg_var} = ${pypkg_var}"
# ENDBLOCK bash
'''
).format(**script_fmtdict)
script_fmtdict['python_bash_setup'] = python_bash_setup
#===================
# PYFLANN SETUP SCRIPTS
#===================
ibeis_rman['pyflann'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd {repo_dir}
mkdir -p {build_dname}
cd {build_dname}
cmake -G "Unix Makefiles" \
-DCMAKE_BUILD_TYPE="Release" \
-DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \
-DBUILD_EXAMPLES=Off \
-DBUILD_TESTS=Off \
-DBUILD_PYTHON_BINDINGS=On \
-DBUILD_MATLAB_BINDINGS=Off \
-DBUILD_CUDA_LIB=Off\
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\
{source_dpath}
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
# ENDBLOCK bash
''').format(repo_dir=ibeis_rman['pyflann'].dpath, **script_fmtdict)
)
ibeis_rman['pyflann'].add_script('install', ut.codeblock(
r'''
# STARTBLOCK bash
# The pyflann source lives here
cd {repo_dir}/src/python
# Need to run build to move the libs to the build directory
python setup.py build
# Use pip to editable install
pip install -e {repo_dir}/src/python
# Old way of doing it
# But the setup script is generated during build
# python {repo_dir}/build/src/python/setup.py develop
python -c "import pyflann; print(pyflann.__file__)" --verb-flann
python -c "import pyflann; print(pyflann)" --verb-flann
# ENDBLOCK bash
''').format(repo_dir=ibeis_rman['pyflann'].dpath)
)
#===================
# HESAFF
#===================
ibeis_rman['hesaff'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/hesaff
mkdir -p {build_dname}
cd {build_dname}
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
cmake -G "Unix Makefiles" \
-DCMAKE_OSX_ARCHITECTURES=x86_64 \
-DCMAKE_C_COMPILER=clang2 \
-DCMAKE_CXX_COMPILER=clang2++ \
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
$OPENCV_ARGS \
{source_dpath}
else
cmake -G "Unix Makefiles" \
-DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
$OPENCV_ARGS \
{source_dpath}
fi
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
#make VERBOSE=1
cp -v libhesaff{libext} {source_dpath}/pyhesaff/libhesaff{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# PYDARKNET
#===================
ibeis_rman['pydarknet'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/pydarknet
mkdir -p {build_dname}
cd {build_dname}
if [[ "$(which nvcc)" == "" ]]; then
export CMAKE_CUDA=Off
else
export CMAKE_CUDA=On
fi
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
else
export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
fi
export CONFIG="$CONFIG -DCUDA=$CMAKE_CUDA"
echo "CONFIG = $CONFIG"
cmake $CONFIG -G 'Unix Makefiles' {source_dpath}
#################################
echo 'Building with make'
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS -w
#################################
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
echo 'Moving the shared library'
# cp -v lib* ../pydarknet
cp -v lib*{libext} {source_dpath}/pydarknet
# cp -v libdarknet{libext} {source_dpath}/pydarknet/libdarknet{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# PYRF
#===================
ibeis_rman['pyrf'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/pyrf
mkdir -p {build_dname}
cd {build_dname}
# only specify an explicit opencv directory if we know one exists
if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then
OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV"
else
OPENCV_ARGS=""
fi
echo 'Configuring with cmake'
if [[ '$OSTYPE' == 'darwin'* ]]; then
export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
else
export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS"
fi
echo "CONFIG = $CONFIG"
cmake $CONFIG -G 'Unix Makefiles' {source_dpath}
#################################
echo 'Building with make'
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS -w
#################################
export MAKE_EXITCODE=$?
echo "MAKE_EXITCODE=$MAKE_EXITCODE"
# Move the compiled library into the source folder
if [[ $MAKE_EXITCODE == 0 ]]; then
echo 'Moving the shared library'
# cp -v lib* ../pyrf
cp -v lib*{libext} {source_dpath}/pyrf
# cp -v libpyrf{libext} {source_dpath}/pyrf/libpyrf{plat_spec}{libext}
fi
# ENDBLOCK
''').format(**script_fmtdict))
#===================
# OPENCV SETUP SCRIPTS
#===================
"""
./super_setup.py --dump-scripts
"""
tpl_rman['cv2'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
# Checkout opencv core
cd $CODE_DIR
# export REPO_DIR=$CODE_DIR/opencv
export REPO_DIR={repo_dpath}
# git clone https://github.com/Itseez/opencv.git
cd $REPO_DIR
# Checkout opencv extras
git clone https://github.com/Itseez/opencv_contrib.git
# cd opencv_contrib
# git pull
# cd ..
# git pull
mkdir -p $REPO_DIR/{build_dname}
cd $REPO_DIR/{build_dname}
cmake -G "Unix Makefiles" \
-D WITH_OPENMP=ON \
-D CMAKE_BUILD_TYPE=RELEASE \
-D {cv_pyoff_var}=Off \
-D {cv_pyon_var}=On \
-D PYTHON_DEFAULT_EXECUTABLE="{pyexe}" \
-D {pypkg_var}=${pypkg_var} \
-D CMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \
-D OPENCV_EXTRA_MODULES_PATH=$REPO_DIR/opencv_contrib/modules \
-D WITH_CUDA=Off \
-D BUILD_opencv_dnn=Off \
-D BUILD_opencv_dnn_modern=Off \
-D WITH_VTK=Off \
-D WITH_CUDA=Off \
-D WITH_MATLAB=Off \
$REPO_DIR
# -D WITH_OPENCL=Off \
# -D BUILD_opencv_face=Off \
# -D BUILD_opencv_objdetect=Off \
# -D BUILD_opencv_video=Off \
# -D BUILD_opencv_videoio=Off \
# -D BUILD_opencv_videostab=Off \
# -D BUILD_opencv_ximgproc=Off \
# -D BUILD_opencv_xobjdetect=Off \
# -D BUILD_opencv_xphoto=Off \
# -D BUILD_opencv_datasets=Off \
# -D CXX_FLAGS="-std=c++11" \ %TODO
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
# ENDBLOCK
''').format(repo_dpath=ut.unexpanduser(tpl_rman['cv2'].dpath),
**script_fmtdict))
tpl_rman['cv2'].add_script('install', ut.codeblock(
r'''
# STARTBLOCK bash
{python_bash_setup}
cd $CODE_DIR/opencv/{build_dname}
$_SUDO make install
# Hack because cv2 does not want to be installed for some reason
# cp lib/cv2.so $PYTHON_PACKAGES_PATH
# Seems to work now that local is removed from prefix
# cp -v lib/cv2.so $PYTHON_PACKAGES_PATH
# Test makesure things working
python -c "import numpy; print(numpy.__file__)"
python -c "import numpy; print(numpy.__version__)"
python -c "import cv2; print(cv2.__version__)"
python -c "import cv2; print(cv2.__file__)"
#python -c "import vtool"
# Check if we have contrib modules
python -c "import cv2; print(cv2.xfeatures2d)"
# ENDBLOCK
''').format(**script_fmtdict))
# if GET_ARGFLAG('--libgpuarray'):
tpl_rman['libgpuarray'].add_script('build', ut.codeblock(
r'''
# STARTBLOCK bash
# Ensure the repo was checked out
if [ ! -d {repo_dpath} ]; then
git clone https://github.com/Theano/libgpuarray.git {repo_dpath}
fi
{python_bash_setup}
cd {repo_dpath}
# need a specific version of libgpuarray
git checkout tags/v0.6.2 -b v0.6.2
mkdir -p {repo_dpath}/{build_dname}
cd {repo_dpath}/{build_dname}
# First build the C library
cmake {repo_dpath} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX
export NCPUS=$(grep -c ^processor /proc/cpuinfo)
make -j$NCPUS
$_SUDO make install
# Now build the python libarary
cd {repo_dpath}
python setup.py build_ext -L $LOCAL_PREFIX/lib -I $LOCAL_PREFIX/include
python setup.py build
# python setup.py install
$_SUDO pip install -e {repo_dpath}
# DEVICE="<test device>" python -c "import pygpu;pygpu.test()"
# DEVICE="gpu0" python -c "import pygpu;pygpu.test()"
cd ~
$_SUDO pip install nose
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
# pip uninstall pygpu
# ENDBLOCK
''').format(repo_dpath=ut.unexpanduser(tpl_rman['libgpuarray'].dpath),
**script_fmtdict))
#===================
# PYQT SETUP SCRIPTS
#===================
if ut.in_virtual_env():
try:
fmtdict = {
'sys_dist_packages': ut.get_global_dist_packages_dir(),
'venv_site_packages': ut.get_site_packages_dir(),
'pyqt' : 'PyQt4' if PY2 else 'PyQt5',
# Need the PyQT5 SVG module for IPython to work properly
'debian-python-qt' : (
'python-qt4' if PY2 else
'qt5-default python3-pyqt5 debian-python-qt-svg'),
'pip-python-qt' : 'python-qt4' if PY2 else 'python-qt5'
}
# sys_dist_packages = ut.get_global_dist_packages_dir()
# sys_pyqt_dir = sys_dist_packages + '/{pyqt}'
# Allows us to use a system qt install in a virtual environment.
system_to_venv = ut.codeblock(
r'''
# STARTBLOCK bash
# Creates a symlink to the global PyQt in a virtual env
export GLOBAL_DIST_PACKAGES="{sys_dist_packages}"
export VENV_DIST_PACKAGES="{venv_site_packages}"
if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then
echo "have qt"
ls $GLOBAL_DIST_PACKAGES/{pyqt}
ls $VENV_DIST_PACKAGES/{pyqt}
else
# Ensure PyQt is installed first (FIXME make this work for non-debian systems)
sudo apt-get install {debian-python-qt}
# pip install {pip-python-qt}
fi
if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then
# Install system pyqt packages to virtual envirment via symlink
ln -s $GLOBAL_DIST_PACKAGES/{pyqt}/ $VENV_DIST_PACKAGES/{pyqt}
ln -s $GLOBAL_DIST_PACKAGES/sip*.so $VENV_DIST_PACKAGES/
ln -s $GLOBAL_DIST_PACKAGES/sip*.py $VENV_DIST_PACKAGES/
else
echo "{pyqt} DOES NOT SEEM TO BE INSTALLED ON THE SYSTEM"
fi
echo "testing"
python -c "import {pyqt}; print({pyqt})"
# ENDBLOCK bash
''').format(**fmtdict)
# TODO: add custom build alternative
tpl_rman['PyQt'].add_script('system_to_venv', system_to_venv)
except NotImplementedError:
pass
#-----------
# Verify TPL Dependencies
#-----------
def GET_ARGFLAG(arg, *args, **kwargs):
import utool as ut
return arg.lstrip('--') in sys.argv or ut.get_argflag(arg, *args, **kwargs)
def move_wildme(ibeis_rman, fmt):
wildme_user = 'WildbookOrg'
wildme_remote = 'wildme'
for repo in ibeis_rman.repos:
try:
gitrepo = repo.as_gitpython()
except Exception:
repo.change_url_format(fmt)
print('repo {!r} does not exist yet'.format(repo))
continue
wildme_url = repo._new_remote_url(host='github.com', user=wildme_user, fmt=fmt)
remotes = repo.remotes
message = 'Checking %s for move to wildme' % (repo,)
print(message)
incorrect_version = repo._ensure_remote_exists(wildme_remote, wildme_url)
if 'origin' in remotes:
try:
origin = remotes['origin']
origin_protocol = origin['url'].split(':')[0]
origin_user = origin['username']
if origin_user != wildme_user or origin_protocol != fmt or incorrect_version:
if origin_user not in remotes:
# first add a remote that is the original origin
origin_url = origin['url']
print(' * Create remote %r: %r' % (origin_user, origin_url,))
gitrepo.create_remote(origin_user, origin_url)
# change origin to use wildme url
gitorigin = gitrepo.remote('origin')
print(' * Change origin url to %r' % (wildme_url,))
gitorigin.set_url(wildme_url)
except:
print('\tWARNING: COULD NOT MIGRATE REPO = %r' % (repo, ))
repo.change_url_format(fmt)
def execute_commands(tpl_rman, ibeis_rman):
import utool as ut
GET_ARGVAL = ut.get_argval
ut.init_catch_ctrl_c()
if 0:
print('Version Check Source:')
for repo in tpl_rman.repos:
print('python -c "import {0}; print({0}.__file__)"'.format(repo.modname))
print('python -c "import {0}; print({0}.__version__)"'.format(repo.modname))
#-----------
# Execute Commands on Core Repos
#-----------
CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo()
print('ibeis_rman = %r' % (ibeis_rman,))
wildme_ssh_flags = GET_ARGFLAG('--move-wildme') or GET_ARGFLAG('--move-wildme-ssh')
wildme_https_flags = GET_ARGFLAG('--move-wildme-https') or GET_ARGFLAG('--move-wildme-http')
if wildme_ssh_flags or wildme_https_flags:
fmt = 'ssh' if wildme_ssh_flags else 'https'
move_wildme(ibeis_rman, fmt)
# Commands on global git repos
if GET_ARGFLAG('--status'):
ibeis_rman.issue('git status')
sys.exit(0)
ibeis_rman.ensure()
if GET_ARGFLAG('--dump') or GET_ARGFLAG('--dump-scripts'):
dpath = '_super_scripts/' + 'scripts' + get_plat_specifier()
ut.ensuredir(dpath)
dumps = [
(tpl_rman, 'cv2', 'build'),
(tpl_rman, 'cv2', 'install'),
(ibeis_rman, 'flann', 'build'),
(ibeis_rman, 'flann', 'install'),
(ibeis_rman, 'hesaff', 'build'),
(tpl_rman, 'PyQt', 'system_to_venv'),
(tpl_rman, 'libgpuarray', 'build'),
]
for rman, mod, sname in dumps:
from os.path import join
# if mod not in rman:
# print('mod=%r not available in rman=%r' % (mod, rman))
# continue
script = rman[mod].get_script(sname).text
suffix = get_plat_specifier()
sh_fpath = join(dpath, mod + '_' + sname + suffix + '.sh')
ut.write_to(sh_fpath, script)
if GET_ARGFLAG('--requirements'):
ut.cmd('pip install -r requirements.txt')
# HACKED IN SCRIPTS WHILE IM STILL FIGURING OUT TPL DEPS
if GET_ARGFLAG('--opencv'):
# There is now a pypi for opencv! Yay
# ut.cmd('pip install opencv-python')
# Bummer, but we need opencv source for pyhessaff
# we should just make a wheel for pyhessaff
cv_repo = tpl_rman['cv2']
cv_repo.clone()
script = cv_repo.get_script('build')
script.exec_()
cv_repo = tpl_rman['cv2']
script = cv_repo.get_script('install')
script.exec_()
if GET_ARGFLAG('--flann'):
script = ibeis_rman['flann'].get_script('build')
script.exec_()
script = ibeis_rman['flann'].get_script('install')
script.exec_()
if GET_ARGFLAG('--pyqt'):
script = tpl_rman['PyQt'].get_script('system_to_venv')
script.exec_()
if GET_ARGFLAG('--hesaff'):
script = ibeis_rman['hesaff'].get_script('build')
script.exec_()
if GET_ARGFLAG('--pydarknet'):
script = ibeis_rman['pydarknet'].get_script('build')
script.exec_()
if GET_ARGFLAG('--pyrf'):
script = ibeis_rman['pyrf'].get_script('build')
script.exec_()
if GET_ARGFLAG('--torch'):
# Theano and lasange code should be moved to pytorch
tpl_rman['pytorch'].clone(recursive=True)
tpl_rman['pytorch'].issue('git submodule update --init')
tpl_rman['pytorch'].issue('python setup install')
tpl_rman['pytorch'].issue('pip install torchvision')
# tpl_rman['pytorch'].issue('NO_CUDNN=TRUE && python setup install')
# tpl_rman['pytorch'].issue('pip install -e .')
if GET_ARGFLAG('--libgpuarray') or GET_ARGFLAG('--dcnn'):
tpl_rman['libgpuarray'].clone()
script = tpl_rman['libgpuarray'].get_script('build')
script.exec_()
if GET_ARGFLAG('--dcnn'):
tpl_rman['theano'].clone()
# tpl_rman['pylearn2'].clone()
tpl_rman['lasagne'].clone()
tpl_rman['theano'].issue('pip install -e .')
# tpl_rman['pylearn2'].issue('pip install -e .')
tpl_rman['lasagne'].issue('pip install -e .')
# tpl_rman['pylearn2'].python_develop()
# tpl_rman['theano'].python_develop()
# tpl_rman['lasagne'].python_develop()
#_===
if GET_ARGFLAG('--fix') or GET_ARGFLAG('--check'):
missing_dynlib = tpl_rman.check_cpp_build()
missing_dynlib += ibeis_rman.check_cpp_build()
missing_install = tpl_rman.check_installed()
missing_install += ibeis_rman.check_installed()
problems = []
problems += ibeis_rman.check_importable()
problems += tpl_rman.check_importable()
if GET_ARGFLAG('--fix'):
print('Trying to fix problems')
for repo in missing_dynlib:
repo.custom_build()
for repo, recommended_fix in problems:
print('Trying to fix repo = %r' % (repo,))
print(' * recommended_fix = %r' % (recommended_fix,))
if recommended_fix == 'rebuild':
repo.custom_build()
print('Can currently only fix one module at a time. Please re-run')
sys.exit(1)
else:
print('Not sure how to fix %r' % (repo,))
if GET_ARGFLAG('--pull'):
ibeis_rman.issue('git pull')
if GET_ARGFLAG('--build'):
# Build tpl repos
# tpl_rman.custom_build()
# ibeis_rman.custom_build()
# Build only IBEIS repos with setup.py
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('{pythoncmd} setup.py build'.format(pythoncmd=pythoncmd))
# Like install, but better if you are developing
if GET_ARGFLAG('--develop'):
_rman = ibeis_rman.only_with_pysetup()
# # _rman.issue('{pythoncmd} setup.py develop'.format(pythoncmd=pythoncmd),
# # sudo=not ut.in_virtual_env())
_rman.issue('{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd),
sudo=not ut.in_virtual_env())
if GET_ARGFLAG('--clean'):
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('{pythoncmd} setup.py clean'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--install'):
print('WARNING: Dont use install if you are a developer. Use develop instead.')
_rman = ibeis_rman.only_with_pysetup()
_rman.issue('python setup.py install'.format(pythoncmd=pythoncmd))
if GET_ARGFLAG('--push'):
ibeis_rman.issue('git push')
if GET_ARGFLAG('--branch'):
ibeis_rman.issue('git branch')
sys.exit(0)
if GET_ARGFLAG('--tag-status'):
ibeis_rman.issue('git tag')
# Tag everything
tag_name = GET_ARGVAL('--newtag', type_=str, default=None)
if tag_name is not None:
ibeis_rman.issue('git tag -a "{tag_name}" -m "super_setup autotag {tag_name}"'.format(**locals()))
ibeis_rman.issue('git push --tags')
if GET_ARGFLAG('--bext'):
ibeis_rman.issue('{pythoncmd} setup.py build_ext --inplace'.format(pythoncmd=pythoncmd))
commit_msg = GET_ARGVAL('--commit', type_=str, default=None)
if commit_msg is not None:
ibeis_rman.issue('git commit -am "{commit_msg}"'.format(**locals()))
# Change Branch
branch_name = GET_ARGVAL('--checkout', type_=str, default=None)
if branch_name is not None:
try:
ibeis_rman.issue('git checkout "{branch_name}"'.format(**locals()))
except Exception:
print('ERROR: Could not checkout branch: %r' % (branch_name, ))
# Creates new branches
newbranch_name = GET_ARGVAL('--newbranch', type_=str, default=None)
if newbranch_name is not None:
#rman.issue('git stash"'.format(**locals()))
ibeis_rman.issue('git checkout -b "{newbranch_name}"'.format(**locals()))
ibeis_rman.issue('git push --set-upstream origin {newbranch_name}'.format(**locals()))
#rman.issue('git stash pop"'.format(**locals()))
# Creates new branches
newlocalbranch_name = GET_ARGVAL('--newlocalbranch', type_=str, default=None)
if newlocalbranch_name is not None:
#rman.issue('git stash"'.format(**locals()))
ibeis_rman.issue('git checkout -b "{newlocalbranch_name}"'.format(**locals()))
#rman.issue('git push --set-upstream origin {newlocalbranch_name}'.format(**locals()))
#rman.issue('git stash pop"'.format(**locals()))
# Creates new branches
mergebranch_name = GET_ARGVAL('--merge', type_=str, default=None)
if mergebranch_name is not None:
ibeis_rman.issue('git merge "{mergebranch_name}"'.format(**locals()))
# Change ownership
if GET_ARGFLAG('--serverchmod'):
ibeis_rman.issue('chmod -R 755 *')
if GET_ARGFLAG('--chown'):
# Fixes problems where repos are checked out as root
username = os.environ.get('USERNAME', ut.get_argval('--username'))
if username is None:
username = os.environ.get('USER', None)
if username is None:
raise AssertionError('cannot find username in commandline or environment vars')
usergroup = username
ibeis_rman.issue('chown -R {username}:{usergroup} *'.format(**locals()),
sudo=True)
upstream_branch = GET_ARGVAL('--set-upstream', type_=str, default=None)
if upstream_branch is not None:
# git 2.0
ibeis_rman.issue('git branch --set-upstream-to=origin/{upstream_branch} {upstream_branch}'.format(**locals()))
upstream_push = GET_ARGVAL('--upstream-push', type_=str, default=None)
if upstream_push is not None:
ibeis_rman.issue('git push --set-upstream origin {upstream_push}'.format(**locals()))
if GET_ARGFLAG('--test'):
failures = []
for repo_dpath in ibeis_rman.repo_dirs:
# ut.getp_
mod_dpaths = ut.get_submodules_from_dpath(repo_dpath, recursive=False,
only_packages=True)
modname_list = ut.lmap(ut.get_modname_from_modpath, mod_dpaths)
print('Checking modules = %r' % (modname_list,))
for modname in modname_list:
try:
ut.import_modname(modname)
print(modname + ' success')
except ImportError as ex:
failures += [modname]
print(modname + ' failure')
print('failures = %s' % (ut.repr3(failures),))
if False:
try:
from six.moves import input
except ImportError:
input = raw_input # NOQA
# General global git command
gg_cmd = GET_ARGVAL('--gg', None) # global command
if gg_cmd is not None:
ans = 'yes' if GET_ARGFLAG('-y') else input('Are you sure you want to run: %r on all directories? ' % (gg_cmd,))
if ans == 'yes':
ibeis_rman.issue(gg_cmd)
def is_running_as_root():
"""
References:
http://stackoverflow.com/questions/5721529/running-python-script-as-root
http://stackoverflow.com/questions/2806897/checking-script-has-root
"""
return os.getenv('USER') == 'root'
def get_sysinfo(verbose=0):
if verbose:
print('USER = %r' % os.getenv("USER"))
if is_running_as_root():
print('Do not run super_setup.py as root')
sys.exit(1)
WIN32 = sys.platform.startswith('win32')
if verbose:
print('[super_setup] __IBEIS_SUPER_SETUP__')
if 'CODE_DIR' in os.environ:
CODE_DIR = os.environ.get('CODE_DIR')
else:
CODE_DIR = dirname(dirname(realpath(__file__))) # Home is where the .. is. # '~/code'
if verbose:
print('[super_setup] code_dir: %r' % CODE_DIR)
(DISTRO, DISTRO_VERSION, DISTRO_TAG) = platform.dist()
python_version = platform.python_version()
PY2 = python_version.startswith('2.7')
PY3 = python_version.startswith('3')
# '--py3' in sys.argv
# assert PY3 or
# 'IBEIS currently supports python 2.7, Instead got python=%r. use --py3 to override' % python_version
pythoncmd = sys.executable
# if PY2:
# pythoncmd = 'python' if WIN32 else 'python2.7'
# elif PY3:
# pythoncmd = 'python3'
return CODE_DIR, pythoncmd, WIN32, PY2, PY3
def main():
print('''
IBEIS Image Analysis (IA)
____ _ _ ___ ____ ____ ____ ____ ___ _ _ ___
[__ | | |__] |___ |__/ [__ |___ | | | |__]
___] |__| | |___ | \ ___] |___ | |__| |
Use --help to show usage
''')
show_usage = len(sys.argv) > 1 and sys.argv[1] in ['--help', '-h']
if show_usage:
print(USAGE)
CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo(verbose=1)
try:
import cv2 # NOQA
except ImportError:
print('Need to install OpenCV')
print('python super_setup.py --opencv')
try:
import pyflann # NOQA
except ImportError:
print('Need to install FLANN')
print('python super_setup.py --flann')
try:
import theano, lasagne # NOQA
except ImportError:
print('Need to install Theano/Lasagne/Pylearn2')
print('python super_setup.py --dcnn')
except ValueError as ex:
print(repr(ex))
print('Probably need libgpu array')
print('python super_setup.py --libgpuarray')
try:
try:
import PyQt4 # NOQA
except ImportError:
import PyQt5 # NOQA
except ImportError:
print('Need to install PyQt')
print('python super_setup.py --pyqt')
if '--bootstrap' in sys.argv or 'bootstrap' in sys.argv:
bootstrap(WIN32)
try:
# HACK IN A WAY TO ENSURE UTOOL
print('Checking utool')
import utool as ut # NOQA
except Exception:
ensure_utool(CODE_DIR, pythoncmd)
tpl_rman, ibeis_rman = initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3)
execute_commands(tpl_rman, ibeis_rman)
if __name__ == '__main__':
main()
| super_setup.py | 48,428 | todo, find a way to use this effectively
export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"
set THEANO_FLAGS=device=cpu,print_active_device=True,enable_initial_driver_test=True,print_test_value=True
python -c "import pydot; print(pydot.__file__)"
python -c "import pydot; print(pydot.__version__)"
python -c "import pydot; print(pydot.find_graphviz())"
DEVICE="cuda" python -c "import pygpu;pygpu.test()"
python -c "import theano; print(theano.__file__)"
# python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import lasagne; print(lasagne.__file__)"
python -c "import ibeis_cnn; print(ibeis_cnn.__file__)"
python -c "import detecttools; print(detecttools.__file__)"
# http://stackoverflow.com/questions/18042919/how-to-install-pyqt5-on-a-new-virtualenv-and-work-on-an-idle
pip install vext.pyqt5
sudo apt-get install pyqt5-dev
sudo apt-get install python3-pyqt5
python
python -c "import sip; print('[test] Python can import sip')"
python -c "import sip; print('sip.__file__=%r' % (sip.__file__,))"
python -c "import sip; print('sip.SIP_VERSION=%r' % (sip.SIP_VERSION,))"
python -c "import sip; print('sip.SIP_VERSION_STR=%r' % (sip.SIP_VERSION_STR,))"
ln -s /usr/lib/python3/dist-packages/PyQt5/ /home/joncrall/venv3/lib/python3.4/site-packages/PyQt5
ln -s /usr/lib/python3/dist-packages/sip*.so /home/joncrall/venv3/lib/python3.4/site-packages/
ln -s /usr/lib/python3/dist-packages/sip*.py /home/joncrall/venv3/lib/python3.4/site-packages/
Standard platform specifier used by distutils
imports module from a file path
References:
http://stackoverflow.com/questions/5721529/running-python-script-as-root
http://stackoverflow.com/questions/2806897/checking-script-has-root
TODO:
* needs to check if required modules are installed (or prefereably developed)
* needs to be able to ignore plugins that the user doesnt care about
Super Setup
PREREQ:
git config --global push.default current
export CODE_DIR=~/code
mkdir $CODE_DIR
cd $CODE_DIR
git clone https://github.com/WildbookOrg/ibeis.git
cd ibeis
python super_setup.py --bootstrap
OR (if in virtual environment)
python super_setup.py --bootstrap --nosudo
OR
./_scripts/bootstrap.py
THEN
./_scripts/__install_prereqs__.sh
THEN
./super_setup.py --build --develop
./super_setup.py --build --develop
./super_setup.py --status
# If on current branch copy so super setup isn't overwriten as we go
python -c "import utool as ut; ut.copy('super_setup.py', '_ibeis_setup.py')"
# Status
python _ibeis_setup.py -y --gg "git status"
python _ibeis_setup.py -y --gg "git branch"
# Setup Next
#python _ibeis_setup.py -y --gg "git pull"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git pull"
#python _ibeis_setup.py -y --gg "git checkout -b next"
#python _ibeis_setup.py -y --gg "git checkout next"
#python _ibeis_setup.py -y --gg "git push -u origin next"
#python _ibeis_setup.py -y --gg "git push remote origin/next"
####python _ibeis_setup.py -y --gg "git merge master"
#python _ibeis_setup.py -y --gg "git checkout ^HEAD"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git checkout next"
# -- MERGE topic -> next
##python _ibeis_setup.py -y --gg "git checkout topic"
##python _ibeis_setup.py -y --gg "git checkout next"
##python _ibeis_setup.py -y --gg "git merge topic"
# -- MERGE next -> master
python _ibeis_setup.py -y --gg "git checkout master"
python _ibeis_setup.py -y --gg "git merge next"
# -- SAFER MERGE topic -> next
python super_setup.py --checkout next
python super_setup.py --newlocalbranch merge_next_joncrall_dev_branch
python super_setup.py --merge joncrall_dev_branch
./run_tests.py
python super_setup.py --checkout next
python super_setup.py --merge merge_next_joncrall_dev_branch
# Push
python _ibeis_setup.py -y --gg "git push"
#python _ibeis_setup.py -y --gg "git checkout master"
#python _ibeis_setup.py -y --gg "git checkout next"
# MAKE A NEW BRANCH
python super_setup.py --newbranch joncrall_dev_branch
python super_setup.py --checkout joncrall_dev_branch
python super_setup.py --checkout next
python super_setup.py --newbranch jdb
python super_setup.py --checkout jdb
GitReferences:
http://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging
FIXME:
graph-viz
pydot
ibeis_cnn
Theano
Lasange
!/usr/bin/env python -*- coding: utf-8 -*------------------ SYSTEM ENTRY POINT, NO UTOOL, BARE PYTHON----------------- parser.add_argument('command', help='command to run') subparsers = parser.add_subparsers() subparsers.add_parser('pull', help='pulls IBEIS repos') subparsers.add_parser('ensure', help='ensures checkouts of IBEIS repos') sub = subparsers.add_parser('move-wildme', help='changes to the wildme repos') sub.add_argument('--fmt', dest='fmt', action='store', choices=['ssh', 'https'], help='url type') Setup options for parser_a Add nargs="*" for zero or more other commands parser.add_argument('extra', nargs = "*", help = 'Other commands') parser.add_argument('command', action='store_true', default=False, help='outputs commands to install prereqs') args = define_argparse() print('args = %r' % (args,)) sys.exit(1) NOQA need to preinstall parseimport bootstrapsys.path.append(os.path.abspath('_scripts')) ENSURING UTOOL For virtualenv module For venv moduleUTOOL_BRANCH = ' -b <branch> <remote_repo>' NOQA TODO: use pip instead cmdstr = '{pythoncmd} -m pip install .'.format(pythoncmd=pythoncmd) sys.path.append(usr_code_dir)----------------- UTOOL PYTHON-----------------WITH_TPL = True----------- IBEIS project repos----------- if True: jon_repo_base = 'https://github.com/WildbookOrg' jason_repo_base = 'https://github.com/WildbookOrg' else: jon_repo_base = 'https://github.com/wildme' jason_repo_base = 'https://github.com/wildme' 'https://github.com/WildbookOrg/sandbox_utools.git', NEW CNN Dependencies if GET_ARGFLAG('--libgpuarray'): CNN Dependencies 'https://github.com/lisa-lab/pylearn2.git', Depricated'https://github.com/WildbookOrg/pybing.git','https://github.com/aweinstock314/cyth.git','https://github.com/hjweide/pygist', Add main repo (Must be checked last due to dependency issues)----------- Custom third party build/install scripts----------- build_dname = 'build' + ''.join(majorminor) define bash variables for different combinations of python distros and virtual environments=================== PYFLANN SETUP SCRIPTS====================================== HESAFF====================================== PYDARKNET====================================== PYRF====================================== OPENCV SETUP SCRIPTS=================== if GET_ARGFLAG('--libgpuarray'):=================== PYQT SETUP SCRIPTS=================== Need the PyQT5 SVG module for IPython to work properly sys_dist_packages = ut.get_global_dist_packages_dir() sys_pyqt_dir = sys_dist_packages + '/{pyqt}' Allows us to use a system qt install in a virtual environment. TODO: add custom build alternative----------- Verify TPL Dependencies----------- first add a remote that is the original origin change origin to use wildme url----------- Execute Commands on Core Repos----------- Commands on global git repos if mod not in rman: print('mod=%r not available in rman=%r' % (mod, rman)) continue HACKED IN SCRIPTS WHILE IM STILL FIGURING OUT TPL DEPS There is now a pypi for opencv! Yay ut.cmd('pip install opencv-python') Bummer, but we need opencv source for pyhessaff we should just make a wheel for pyhessaff Theano and lasange code should be moved to pytorch tpl_rman['pytorch'].issue('NO_CUDNN=TRUE && python setup install') tpl_rman['pytorch'].issue('pip install -e .') tpl_rman['pylearn2'].clone() tpl_rman['pylearn2'].issue('pip install -e .') tpl_rman['pylearn2'].python_develop() tpl_rman['theano'].python_develop() tpl_rman['lasagne'].python_develop()_=== Build tpl repos tpl_rman.custom_build() ibeis_rman.custom_build() Build only IBEIS repos with setup.py Like install, but better if you are developing _rman.issue('{pythoncmd} setup.py develop'.format(pythoncmd=pythoncmd), sudo=not ut.in_virtual_env()) Tag everything Change Branch Creates new branchesrman.issue('git stash"'.format(**locals()))rman.issue('git stash pop"'.format(**locals())) Creates new branchesrman.issue('git stash"'.format(**locals()))rman.issue('git push --set-upstream origin {newlocalbranch_name}'.format(**locals()))rman.issue('git stash pop"'.format(**locals())) Creates new branches Change ownership Fixes problems where repos are checked out as root git 2.0 ut.getp_ NOQA General global git command global command Home is where the .. is. '~/code' '--py3' in sys.argv assert PY3 or 'IBEIS currently supports python 2.7, Instead got python=%r. use --py3 to override' % python_version if PY2: pythoncmd = 'python' if WIN32 else 'python2.7' elif PY3: pythoncmd = 'python3' NOQA NOQA NOQA NOQA NOQA HACK IN A WAY TO ENSURE UTOOL NOQA | 8,999 | en | 0.35084 |
#!/usr/bin/env python3
# Software Name: ngsildclient
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: Apache 2.0
#
# This software is distributed under the Apache 2.0;
# see the NOTICE file for more details.
#
# Author: Fabien BATTELLO <fabien.battello@orange.com> et al.
# SPDX-License-Identifier: Apache-2.0
import logging
from ngsildclient.api.client import Client, Vendor
from .common import mocked_connected
logger = logging.getLogger(__name__)
def test_api_is_connected(requests_mock):
requests_mock.get("http://localhost:1026/ngsi-ld/v1/entities", status_code=200)
client = Client()
assert client.is_connected()
def test_api_guess_broker(mocked_connected, requests_mock):
requests_mock.get(
"http://localhost:1026/version",
status_code=200,
json={"orionld version": "post-v0.8.1"},
)
client = Client()
vendor, version = client.guess_vendor()
logger.info(f"{vendor=}")
assert vendor == Vendor.ORIONLD
assert version == "post-v0.8.1"
| tests/test_client.py | 1,040 | !/usr/bin/env python3 Software Name: ngsildclient SPDX-FileCopyrightText: Copyright (c) 2021 Orange SPDX-License-Identifier: Apache 2.0 This software is distributed under the Apache 2.0; see the NOTICE file for more details. Author: Fabien BATTELLO <fabien.battello@orange.com> et al. SPDX-License-Identifier: Apache-2.0 | 320 | en | 0.45636 |
from django import template
from django.db import models
register = template.Library()
try:
''.rsplit
def rsplit(s, delim, maxsplit):
return s.rsplit(delim, maxsplit)
except AttributeError:
def rsplit(s, delim, maxsplit):
"""
Return a list of the words of the string s, scanning s
from the end. To all intents and purposes, the resulting
list of words is the same as returned by split(), except
when the optional third argument maxsplit is explicitly
specified and nonzero. When maxsplit is nonzero, at most
maxsplit number of splits - the rightmost ones - occur,
and the remainder of the string is returned as the first
element of the list (thus, the list will have at most
maxsplit+1 elements). New in version 2.4.
>>> rsplit('foo.bar.baz', '.', 0)
['foo.bar.baz']
>>> rsplit('foo.bar.baz', '.', 1)
['foo.bar', 'baz']
>>> rsplit('foo.bar.baz', '.', 2)
['foo', 'bar', 'baz']
>>> rsplit('foo.bar.baz', '.', 99)
['foo', 'bar', 'baz']
"""
assert maxsplit >= 0
if maxsplit == 0: return [s]
# the following lines perform the function, but inefficiently.
# This may be adequate for compatibility purposes
items = s.split(delim)
if maxsplit < len(items):
items[:-maxsplit] = [delim.join(items[:-maxsplit])]
return items
class FilterAdminApplistNode(template.Node):
def __init__(self, listname, varname):
self.listname = listname
self.varname = varname
def render(self, context):
all_apps = {}
for app in models.get_apps():
name = len(rsplit(app.__name__, '.', 0))>1 and rsplit(app.__name__, '.', 0)[-2] or app.__name__
all_apps[name] = app.__name__
filtered_app_list = []
for entry in context[self.listname]:
app = all_apps.get(entry['name'].lower(),'')
if not app.startswith('satchmo_'):
filtered_app_list.append(entry)
context[self.varname] = filtered_app_list
return ''
def filter_admin_app_list(parser, token):
"""Filters the list of installed apps returned by
django.contrib.admin.templatetags.adminapplist,
excluding apps installed by satchmo.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError, "'%s' tag requires two arguments" % tokens[0]
if tokens[2] != 'as':
raise template.TemplateSyntaxError, "Second argument to '%s' tag must be 'as'" % tokens[0]
return FilterAdminApplistNode(tokens[1], tokens[3])
register.tag('filter_admin_app_list', filter_admin_app_list)
| satchmo/apps/satchmo_store/shop/templatetags/satchmo_adminapplist.py | 2,769 | the following lines perform the function, but inefficiently. This may be adequate for compatibility purposes | 109 | en | 0.930966 |
# Copyright (c) 2010-2019 openpyxl
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.packaging.manifest import Manifest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from .test_fields import (
Index,
Number,
Text,
)
@pytest.fixture
def Record():
from ..record import Record
return Record
class TestRecord:
def test_ctor(self, Record, Number, Text, Index):
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = n + s + x
field = Record(_fields=fields)
xml = tostring(field.to_tree())
expected = """
<r>
<n v="1"/>
<n v="25"/>
<s v="2014-03-24"/>
<x v="0"/>
<x v="0"/>
<x v="0"/>
</r>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Record, Number, Text, Index):
src = """
<r>
<n v="1"/>
<x v="0"/>
<s v="2014-03-24"/>
<x v="0"/>
<n v="25"/>
<x v="0"/>
</r>
"""
node = fromstring(src)
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = [
Number(v=1),
Index(),
Text(v="2014-03-24"),
Index(),
Number(v=25),
Index(),
]
field = Record.from_tree(node)
assert field == Record(_fields=fields)
@pytest.fixture
def RecordList():
from ..record import RecordList
return RecordList
class TestRecordList:
def test_ctor(self, RecordList):
cache = RecordList()
xml = tostring(cache.to_tree())
expected = """
<pivotCacheRecords xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
count="0" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, RecordList):
src = """
<pivotCacheRecords count="0" />
"""
node = fromstring(src)
cache = RecordList.from_tree(node)
assert cache == RecordList()
def test_write(self, RecordList):
out = BytesIO()
archive = ZipFile(out, mode="w")
manifest = Manifest()
records = RecordList()
xml = tostring(records.to_tree())
records._write(archive, manifest)
manifest.append(records)
assert archive.namelist() == [records.path[1:]]
assert manifest.find(records.mime_type)
| openpyxl/pivot/tests/test_record.py | 2,691 | Copyright (c) 2010-2019 openpyxl | 32 | en | 0.506525 |
## Calculate feature importance, but focus on "meta-features" which are categorized by
## rules from different perspectives: orders, directions, powers.
## for "comprehensive methods"
from util_relaimpo import *
from util_ca import *
from util import loadNpy
def mainCA(x_name, y_name, divided_by = "", feature_names = []):
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# INFO
print("Dataset", x_name, y_name)
print("Method: ", "CA")
print("Divided by", divided_by)
# make dataframe
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
# divide X
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
# if power, only use the first four terms
if divided_by=='power': x_list, feature_names = x_list[0:4], feature_names[0:4]
print("bootstrapping ...")
coef_boot, comb_feature = bootstrappingCA(x_list, Y)
result_df = caResultDf(coef_boot, comb_feature)
printBootResultCA(result_df)
def mainDA(x_name, y_name, divided_by = "", feature_names = []):
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# INFO
print("Dataset", x_name, y_name)
print("Method: ", "DA")
print("Divided by", divided_by)
# make dataframe
if feature_names:
xdf = pd.DataFrame(data=X, columns=feature_names)
else:
xdf = pd.DataFrame(data=X)
# divide X
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
# if power, only use the first four terms
if divided_by=='power': x_list, feature_names = x_list[0:4], feature_names[0:4]
print("bootstrapping ...")
coef_boot, comb_feature, r2_mean, r2_ci, da_data, ave_data = bootstrappingDA(x_list, Y)
da_df = daResultDf(da_data, ave_data, r2_mean, comb_feature, feature_name=feature_names)
printBootResultCA(da_df)
if __name__ == '__main__':
# da or ca
x_prefix = ["HM", "MMA"]
y_suffix = ["MPS95", "MPSCC95", "CSDM"]
x_main = "{}_X_ang_vel.npy"
y_main = "{}_{}.npy"
divided_list = ["order", "direction", "power"]
for ys in y_suffix:
for xp in x_prefix:
for divide in divided_list:
x_name = x_main.format(xp)
y_name = y_main.format(xp, ys)
mainCA(x_name,y_name,divide,feature_names)
mainDA(x_name,y_name,divide,feature_names) | feature_importance_v4.py | 2,403 | Calculate feature importance, but focus on "meta-features" which are categorized by rules from different perspectives: orders, directions, powers. for "comprehensive methods" INFO make dataframe divide X if power, only use the first four terms INFO make dataframe divide X if power, only use the first four terms da or ca | 321 | en | 0.822249 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import argparse
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.mgmt.containerregistry.v2018_09_01.models import (
PasswordName,
WebhookStatus,
WebhookAction,
PolicyStatus,
RunStatus,
TaskStatus,
BaseImageTriggerType
)
from azure.mgmt.containerregistry.v2018_02_01_preview.models import (
BuildTaskStatus,
OsType,
BuildStatus,
BaseImageTriggerType as BuildBaseImageTriggerType
)
from azure.cli.core.commands.parameters import (
resource_group_name_type,
get_location_type,
tags_type,
deployment_name_type,
get_resource_name_completion_list,
quotes,
get_three_state_flag,
get_enum_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from ._constants import (
STORAGE_RESOURCE_TYPE,
REGISTRY_RESOURCE_TYPE,
WEBHOOK_RESOURCE_TYPE,
REPLICATION_RESOURCE_TYPE,
BUILD_TASK_RESOURCE_TYPE,
BUILD_STEP_RESOURCE_TYPE,
TASK_RESOURCE_TYPE,
CLASSIC_REGISTRY_SKU,
MANAGED_REGISTRY_SKU,
)
from ._validators import (
validate_headers,
validate_build_arg,
validate_secret_build_arg,
validate_arg,
validate_secret_arg,
validate_set,
validate_set_secret
)
image_by_tag_type = CLIArgumentType(
options_list=['--image', '-t'],
help="The name of the image. May include a tag in the format 'name:tag'."
)
image_by_tag_or_digest_type = CLIArgumentType(
options_list=['--image', '-t'],
help="The name of the image. May include a tag in the format 'name:tag' or digest in the format 'name@digest'."
)
def load_arguments(self, _): # pylint: disable=too-many-statements
with self.argument_context('acr') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', arg_type=tags_type)
c.argument('registry_name', options_list=['--name', '-n'], help='The name of the container registry. You can configure the default registry name using `az configure --defaults acr=<registry name>`', completer=get_resource_name_completion_list(REGISTRY_RESOURCE_TYPE), configured_default='acr')
c.argument('storage_account_name', help='Provide the name of an existing storage account if you\'re recreating a container registry over a previous registry created storage account. Only applicable to Classic SKU.', completer=get_resource_name_completion_list(STORAGE_RESOURCE_TYPE))
c.argument('sku', help='The SKU of the container registry', arg_type=get_enum_type(MANAGED_REGISTRY_SKU + CLASSIC_REGISTRY_SKU))
c.argument('admin_enabled', help='Indicates whether the admin user is enabled', arg_type=get_three_state_flag())
c.argument('password_name', help='The name of password to regenerate', arg_type=get_enum_type(PasswordName))
c.argument('username', options_list=['--username', '-u'], help='The username used to log into a container registry')
c.argument('password', options_list=['--password', '-p'], help='The password used to log into a container registry')
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
c.argument('image_names', arg_type=image_by_tag_type, action='append')
c.argument('timeout', type=int, help='The timeout in seconds.')
c.argument('docker_file_path', options_list=['--file', '-f'], help="The relative path of the the docker file to the source code root folder.")
c.argument('no_logs', help="Do not show logs after successfully queuing the build.", action='store_true')
c.argument('no_wait', help="Do not wait for the run to complete and return immediately after queuing the run.", action='store_true')
c.argument('no_format', help="Indicates whether the logs should be displayed in raw format", action='store_true')
c.argument('os_type', options_list=['--os'], help='The operating system type required for the build.', arg_type=get_enum_type(OsType))
with self.argument_context('acr import') as c:
c.argument('source', help="The source identifier in the format '[registry.azurecr.io/]repository[:tag]' or '[registry.azurecr.io/]repository@digest'.")
c.argument('source_registry', options_list=['--registry', '-r'], help='The source container registry can be name, login server or resource ID of the source registry.')
c.argument('target_tags', arg_type=image_by_tag_type, action='append')
c.argument('repository', help='The repository name to do a manifest-only copy for images.', action='append')
c.argument('force', help='Overwrite the existing tag of the image to be imported.', action='store_true')
with self.argument_context('acr config content-trust') as c:
c.argument('status', help="Indicates whether content-trust is enabled or disabled.", arg_type=get_enum_type(PolicyStatus))
with self.argument_context('acr repository') as c:
c.argument('repository', help="The name of the repository.")
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('top', type=int, help='Limit the number of items in the results.')
c.argument('orderby', help='Order the items in the results. Default to alphabetical order of names.', arg_type=get_enum_type(['time_asc', 'time_desc']))
c.argument('detail', help='Show detailed information.', action='store_true')
c.argument('delete_enabled', help='Indicates whether delete operation is allowed.', arg_type=get_three_state_flag())
c.argument('list_enabled', help='Indicates whether this item shows in list operation results.', arg_type=get_three_state_flag())
c.argument('read_enabled', help='Indicates whether read operation is allowed.', arg_type=get_three_state_flag())
c.argument('write_enabled', help='Indicates whether write or delete operation is allowed.', arg_type=get_three_state_flag())
with self.argument_context('acr repository delete') as c:
c.argument('manifest', nargs='?', required=False, const='', default=None, help=argparse.SUPPRESS)
c.argument('tag', help=argparse.SUPPRESS)
with self.argument_context('acr repository untag') as c:
c.argument('image', arg_type=image_by_tag_type)
with self.argument_context('acr create') as c:
c.argument('registry_name', completer=None)
c.argument('deployment_name', arg_type=deployment_name_type, validator=None)
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
with self.argument_context('acr check-name') as c:
c.argument('registry_name', completer=None)
with self.argument_context('acr webhook') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('webhook_name', options_list=['--name', '-n'], help='The name of the webhook', completer=get_resource_name_completion_list(WEBHOOK_RESOURCE_TYPE))
c.argument('uri', help='The service URI for the webhook to post notifications.')
c.argument('headers', nargs='+', help="Space-separated custom headers in 'key[=value]' format that will be added to the webhook notifications. Use {} to clear existing headers.".format(quotes), validator=validate_headers)
c.argument('actions', nargs='+', help='Space-separated list of actions that trigger the webhook to post notifications.', arg_type=get_enum_type(WebhookAction))
c.argument('status', help='Indicates whether the webhook is enabled.', arg_type=get_enum_type(WebhookStatus))
c.argument('scope', help="The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means events for all repositories.")
with self.argument_context('acr webhook create') as c:
c.argument('webhook_name', completer=None)
with self.argument_context('acr replication') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('replication_name', options_list=['--name', '-n'], help='The name of the replication.', completer=get_resource_name_completion_list(REPLICATION_RESOURCE_TYPE))
with self.argument_context('acr replication create') as c:
c.argument('replication_name', help='The name of the replication. Default to the location name.', completer=None)
with self.argument_context('acr run') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.positional('source_location', help="The local source code directory path (e.g., './src') or the URL to a git repository (e.g., 'https://github.com/Azure-Samples/acr-build-helloworld-node.git') or a remote tarball (e.g., 'http://server/context.tar.gz').", completer=FilesCompleter())
c.argument('file', options_list=['--file', '-f'], help="The task template/definition file path relative to the source context.")
c.argument('values', help="The task values file path relative to the source context.")
c.argument('set_value', options_list=['--set'], help="Value in 'name[=value]' format.", action='append', validator=validate_set)
with self.argument_context('acr build') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.positional('source_location', help="The local source code directory path (e.g., './src') or the URL to a git repository (e.g., 'https://github.com/Azure-Samples/acr-build-helloworld-node.git') or a remote tarball (e.g., 'http://server/context.tar.gz').", completer=FilesCompleter())
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", action='store_true')
c.argument('arg', options_list=['--build-arg'], help="Build argument in 'name[=value]' format.", action='append', validator=validate_arg)
c.argument('secret_arg', options_list=['--secret-build-arg'], help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_arg)
with self.argument_context('acr build-task') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
# build task parameters
c.argument('build_task_name', options_list=['--name', '-n'], help='The name of the build task.', completer=get_resource_name_completion_list(BUILD_TASK_RESOURCE_TYPE))
c.argument('alias', help='The alternative name for build task. Default to the build task name.')
c.argument('status', help='The current status of build task.', arg_type=get_enum_type(BuildTaskStatus))
c.argument('cpu', type=int, help='The CPU configuration in terms of number of cores required for the build.')
c.argument('repository_url', options_list=['--context', '-c'], help="The full URL to the source code repository.")
c.argument('commit_trigger_enabled', help="Indicates whether the source control commit trigger is enabled.", arg_type=get_three_state_flag())
c.argument('git_access_token', help="The access token used to access the source control provider.")
c.argument('with_secure_properties', help="Indicates whether the secure properties of a build task should be returned.", action='store_true')
# build step parameters
c.argument('step_name', help='The name of the build step.', completer=get_resource_name_completion_list(BUILD_STEP_RESOURCE_TYPE))
c.argument('branch', help="The source control branch name.")
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", arg_type=get_three_state_flag())
c.argument('no_cache', help='Indicates whether the image cache is enabled.', arg_type=get_three_state_flag())
c.argument('base_image_trigger', help="The type of the auto trigger for base image dependency updates.", arg_type=get_enum_type(BuildBaseImageTriggerType))
# build parameters
c.argument('top', help='Limit the number of latest builds in the results.')
c.argument('build_id', help='The unique build identifier.')
c.argument('build_status', help='The current status of build.', arg_type=get_enum_type(BuildStatus))
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('no_archive', help='Indicates whether the build should be archived.', arg_type=get_three_state_flag())
c.argument('build_arg', help="Build argument in 'name[=value]' format.", action='append', validator=validate_build_arg)
c.argument('secret_build_arg', help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_build_arg)
with self.argument_context('acr task') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('task_name', options_list=['--name', '-n'], help='The name of the task.', completer=get_resource_name_completion_list(TASK_RESOURCE_TYPE))
c.argument('status', help='The current status of task.', arg_type=get_enum_type(TaskStatus))
c.argument('with_secure_properties', help="Indicates whether the secure properties of a task should be returned.", action='store_true')
# DockerBuildStep, FileTaskStep parameters
c.argument('file', options_list=['--file', '-f'], help="The relative path of the the task/docker file to the source code root folder. Task files must be suffixed with '.yaml'.")
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", arg_type=get_three_state_flag())
c.argument('no_cache', help='Indicates whether the image cache is enabled.', arg_type=get_three_state_flag())
c.argument('values', help="The task values/parameters file path relative to the source context.")
# common to DockerBuildStep, FileTaskStep and RunTaskStep
c.argument('context_path', options_list=['--context', '-c'], help="The full URL to the source code repository (Requires '.git' suffix for a github repo).")
c.argument('arg', help="Build argument in 'name[=value]' format.", action='append', validator=validate_arg)
c.argument('secret_arg', help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_arg)
c.argument('set_value', options_list=['--set'], help="Task value in 'name[=value]' format.", action='append', validator=validate_set)
c.argument('set_secret', help="Secret task value in 'name[=value]' format.", action='append', validator=validate_set_secret)
# Source Trigger parameters
c.argument('source_trigger_name', help="The name of the source trigger.")
c.argument('commit_trigger_enabled', help="Indicates whether the source control commit trigger is enabled.", arg_type=get_three_state_flag())
c.argument('git_access_token', help="The access token used to access the source control provider.")
c.argument('branch', help="The source control branch name.")
c.argument('base_image_trigger_name', help="The name of the base image trigger.")
c.argument('base_image_trigger_enabled', help="Indicates whether the base image trigger is enabled.", arg_type=get_three_state_flag())
c.argument('base_image_trigger_type', help="The type of the auto trigger for base image dependency updates.", arg_type=get_enum_type(BaseImageTriggerType))
# Run related parameters
c.argument('top', help='Limit the number of latest runs in the results.')
c.argument('run_id', help='The unique run identifier.')
c.argument('run_status', help='The current status of run.', arg_type=get_enum_type(RunStatus))
c.argument('no_archive', help='Indicates whether the run should be archived.', arg_type=get_three_state_flag())
# Run agent parameters
c.argument('cpu', type=int, help='The CPU configuration in terms of number of cores required for the run.')
with self.argument_context('acr task create') as c:
c.argument('task_name', completer=None)
with self.argument_context('acr build-task create') as c:
c.argument('build_task_name', completer=None)
with self.argument_context('acr helm') as c:
c.argument('resource_group_name', help=argparse.SUPPRESS)
c.argument('repository', help=argparse.SUPPRESS)
c.argument('version', help='The helm chart version.')
with self.argument_context('acr helm show') as c:
c.positional('chart', help='The helm chart name.')
with self.argument_context('acr helm delete') as c:
c.positional('chart', help='The helm chart name.')
c.argument('prov', help='Only delete the provenance file.', action='store_true')
with self.argument_context('acr helm push') as c:
c.positional('chart_package', help="The helm chart package.", completer=FilesCompleter())
c.argument('force', help='Overwrite the existing chart package.', action='store_true')
| src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_params.py | 17,588 | -------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------------------------- pylint: disable=line-too-long pylint: disable=too-many-statements build task parameters build step parameters build parameters DockerBuildStep, FileTaskStep parameters common to DockerBuildStep, FileTaskStep and RunTaskStep Source Trigger parameters Run related parameters Run agent parameters | 630 | en | 0.257626 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'sample'
copyright = '2020, Sample Author'
author = 'Sample Author'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | docs/conf.py | 1,857 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". | 1,546 | en | 0.664142 |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from blueapps.account import views
app_name = 'account'
urlpatterns = [
url(r'^login_success/$', views.login_success, name="login_success"),
url(r'^login_page/$', views.login_page, name="login_page"),
url(r'^send_code/$', views.send_code_view, name="send_code")
]
| blueapps/account/urls.py | 336 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from turbogears.decorator import weak_signature_decorator
import xhtml2pdf.pisa as pisa
from six import StringIO
import cherrypy
def to_pdf(filename=None, content_type="application/pdf"):
def entangle(func):
def decorated(func, *args, **kw):
output = func(*args, **kw)
dst = StringIO.StringIO()
result = pisa.CreatePDF(
StringIO.StringIO(output),
dst
)
if not result.err:
cherrypy.response.headers["Content-Type"] = content_type
if filename:
cherrypy.response.headers["Content-Disposition"] = "attachment; filename=" + filename
output = dst.getvalue()
return output
return decorated
return weak_signature_decorator(entangle)
topdf = to_pdf
| xhtml2pdf/turbogears.py | 1,458 | -*- coding: utf-8 -*- Copyright 2010 Dirk Holtwick, holtwick.it Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 585 | en | 0.857206 |
#!/usr/bin/env python
# Copyright 2015 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coursera's asynchronous grader command line SDK.
You may install it from source, or via pip.
"""
from courseraprogramming.commands import oauth2
import requests
import logging
import time
import sys
def check_auth(args):
"""
Checks courseraprogramming's connectivity to the coursera.org API servers
"""
oauth2_instance = oauth2.build_oauth2(args)
auth = oauth2_instance.build_authorizer()
my_profile_url = (
'https://api.coursera.org/api/externalBasicProfiles.v1?'
'q=me&fields=name'
)
r = requests.get(my_profile_url, auth=auth)
if r.status_code != 200:
logging.error('Received response code %s from the basic profile API.',
r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error(
'Could not parse the external id out of the response body %s',
r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error(
'Could not parse the name out of the response body %s',
r.text)
name = None
if not args.quiet or args.quiet == 0:
print('Name: %s' % name)
print('External ID: %s' % external_id)
if name is None or external_id is None:
sys.exit(1)
def display_auth_cache(args):
'''
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
'''
oauth2_instance = oauth2.build_oauth2(args)
if not args.quiet or args.quiet == 0:
token = oauth2_instance.token_cache['token']
if not args.no_truncate and token is not None:
token = token[:10] + '...'
print("Auth token: %s" % token)
expires_time = oauth2_instance.token_cache['expires']
expires_in = int((expires_time - time.time()) * 10) / 10.0
print("Auth token expires in: %s seconds." % expires_in)
if 'refresh' in oauth2_instance.token_cache:
refresh = oauth2_instance.token_cache['refresh']
if not args.no_truncate and refresh is not None:
refresh = refresh[:10] + '...'
print("Refresh token: %s" % refresh)
else:
print("No refresh token found.")
def parser(subparsers):
"Build an argparse argument parser to parse the command line."
# create the parser for the configure subcommand. (authentication / etc.)
parser_config = subparsers.add_parser(
'configure',
help='Configure %(prog)s for operation!')
config_subparsers = parser_config.add_subparsers()
# Local subsubcommand of the grade subcommand
parser_check_auth = config_subparsers.add_parser(
'check-auth',
help=check_auth.__doc__)
parser_check_auth.set_defaults(func=check_auth)
parser_local_cache = config_subparsers.add_parser(
'display-auth-cache',
help=display_auth_cache.__doc__)
parser_local_cache.set_defaults(func=display_auth_cache)
parser_local_cache.add_argument(
'--no-truncate',
action='store_true',
help='Do not truncate the keys [DANGER!!]')
return parser_config
| courseraprogramming/commands/config.py | 3,985 | Checks courseraprogramming's connectivity to the coursera.org API servers
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
Build an argparse argument parser to parse the command line.
Coursera's asynchronous grader command line SDK.
You may install it from source, or via pip.
!/usr/bin/env python Copyright 2015 Coursera Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. create the parser for the configure subcommand. (authentication / etc.) Local subsubcommand of the grade subcommand | 1,120 | en | 0.818649 |
import torch
import torch.nn.functional as F
def spatial_argmax(logit):
weights = F.softmax(logit.view(logit.size(0), -1), dim=-1).view_as(logit)
return torch.stack(((weights.sum(1) * torch.linspace(-1, 1, logit.size(2)).to(logit.device)[None]).sum(1),
(weights.sum(2) * torch.linspace(-1, 1, logit.size(1)).to(logit.device)[None]).sum(1)), 1)
class CNNClassifier(torch.nn.Module):
class Block(torch.nn.Module):
def __init__(self, n_input, n_output, kernel_size=3, stride=2):
super().__init__()
self.c1 = torch.nn.Conv2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,
stride=stride, bias=False)
self.c2 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
self.c3 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
self.b1 = torch.nn.BatchNorm2d(n_output)
self.b2 = torch.nn.BatchNorm2d(n_output)
self.b3 = torch.nn.BatchNorm2d(n_output)
self.skip = torch.nn.Conv2d(n_input, n_output, kernel_size=1, stride=stride)
def forward(self, x):
return F.relu(self.b3(self.c3(F.relu(self.b2(self.c2(F.relu(self.b1(self.c1(x)))))))) + self.skip(x))
def __init__(self, layers=[16, 32, 32, 32], n_output_channels=2, kernel_size=3):
super().__init__()
L = []
c = 3
for l in layers:
L.append(self.Block(c, l, kernel_size, 2))
c = l
self.network = torch.nn.Sequential(*L)
self.classifier = torch.nn.Linear(c, n_output_channels)
def forward(self, x):
z = self.network(x)
return self.classifier(z.mean(dim=[2, 3]))
class Planner_reg(torch.nn.Module):
def __init__(self, channels=[16, 32, 32, 32]):
super().__init__()
conv_block = lambda c, h: [torch.nn.BatchNorm2d(h), torch.nn.Conv2d(h, c, 5, 2, 2), torch.nn.ReLU(True)]
h, _conv = 3, []
for c in channels:
_conv += conv_block(c, h)
h = c
self._conv = torch.nn.Sequential(*_conv, torch.nn.Conv2d(h, 1, 1))
# self.classifier = torch.nn.Linear(h, 2)
# self.classifier = torch.nn.Conv2d(h, 1, 1)
def forward(self, img):
"""
Your code here
Predict the aim point in image coordinate, given the supertuxkart image
@img: (B,3,96,128)
return (B,2)
"""
x = self._conv(img)
return spatial_argmax(x[:, 0])
class FCN(torch.nn.Module):
class UpBlock(torch.nn.Module):
def __init__(self, n_input, n_output, kernel_size=3, stride=2):
super().__init__()
self.c1 = torch.nn.ConvTranspose2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,
stride=stride, output_padding=1)
def forward(self, x):
return F.relu(self.c1(x))
def __init__(self, layers=[16, 32, 64, 128], n_output_channels=5, kernel_size=3, use_skip=True):
super().__init__()
self.input_mean = torch.Tensor([0.3521554, 0.30068502, 0.28527516])
self.input_std = torch.Tensor([0.18182722, 0.18656468, 0.15938024])
c = 3
self.use_skip = use_skip
self.n_conv = len(layers)
skip_layer_size = [3] + layers[:-1]
for i, l in enumerate(layers):
self.add_module('conv%d' % i, CNNClassifier.Block(c, l, kernel_size, 2))
c = l
for i, l in list(enumerate(layers))[::-1]:
self.add_module('upconv%d' % i, self.UpBlock(c, l, kernel_size, 2))
c = l
if self.use_skip:
c += skip_layer_size[i]
self.classifier = torch.nn.Conv2d(c, n_output_channels, 1)
def forward(self, x):
z = (x - self.input_mean[None, :, None, None].to(x.device)) / self.input_std[None, :, None, None].to(x.device)
up_activation = []
for i in range(self.n_conv):
# Add all the information required for skip connections
up_activation.append(z)
z = self._modules['conv%d'%i](z)
for i in reversed(range(self.n_conv)):
z = self._modules['upconv%d'%i](z)
# Fix the padding
z = z[:, :, :up_activation[i].size(2), :up_activation[i].size(3)]
# Add the skip connection
if self.use_skip:
z = torch.cat([z, up_activation[i]], dim=1)
return self.classifier(z)
model_factory = {
'cnn': CNNClassifier,
'fcn': FCN,
'planner_reg':Planner_reg
}
def save_model(model):
from torch import save
from os import path
for n, m in model_factory.items():
if isinstance(model, m):
return save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), '%s.th' % n))
raise ValueError("model type '%s' not supported!" % str(type(model)))
def load_model(model):
from torch import load
from os import path
r = model_factory[model]()
r.load_state_dict(load(path.join(path.dirname(path.abspath(__file__)), '%s.th' % model), map_location='cpu'))
return r
| planner/regressor/models.py | 5,248 | Your code here
Predict the aim point in image coordinate, given the supertuxkart image
@img: (B,3,96,128)
return (B,2)
self.classifier = torch.nn.Linear(h, 2) self.classifier = torch.nn.Conv2d(h, 1, 1) Add all the information required for skip connections Fix the padding Add the skip connection | 297 | en | 0.520751 |
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import chat.routing
application = ProtocolTypeRouter({
# Empty for now (http->django views is added by default)
'websocket': AuthMiddlewareStack(
URLRouter(
chat.routing.websocket_urlpatterns
)
),
}) | chat_app/routing.py | 346 | Empty for now (http->django views is added by default) | 54 | en | 0.931003 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from airflow.providers.amazon.aws.hooks.cloud_formation import AWSCloudFormationHook
try:
from moto import mock_cloudformation
except ImportError:
mock_cloudformation = None
@unittest.skipIf(mock_cloudformation is None, 'moto package not present')
class TestAWSCloudFormationHook(unittest.TestCase):
def setUp(self):
self.hook = AWSCloudFormationHook(aws_conn_id='aws_default')
def create_stack(self, stack_name):
timeout = 15
template_body = json.dumps(
{'Resources': {"myResource": {"Type": "emr", "Properties": {"myProperty": "myPropertyValue"}}}}
)
self.hook.create_stack(
stack_name=stack_name,
params={
'TimeoutInMinutes': timeout,
'TemplateBody': template_body,
'Parameters': [{'ParameterKey': 'myParam', 'ParameterValue': 'myParamValue'}],
},
)
@mock_cloudformation
def test_get_conn_returns_a_boto3_connection(self):
self.assertIsNotNone(self.hook.get_conn().describe_stacks())
@mock_cloudformation
def test_get_stack_status(self):
stack_name = 'my_test_get_stack_status_stack'
stack_status = self.hook.get_stack_status(stack_name=stack_name)
self.assertIsNone(stack_status)
self.create_stack(stack_name)
stack_status = self.hook.get_stack_status(stack_name=stack_name)
self.assertEqual(stack_status, 'CREATE_COMPLETE', 'Incorrect stack status returned.')
@mock_cloudformation
def test_create_stack(self):
stack_name = 'my_test_create_stack_stack'
self.create_stack(stack_name)
stacks = self.hook.get_conn().describe_stacks()['Stacks']
self.assertGreater(len(stacks), 0, 'CloudFormation should have stacks')
matching_stacks = [x for x in stacks if x['StackName'] == stack_name]
self.assertEqual(len(matching_stacks), 1, f'stack with name {stack_name} should exist')
stack = matching_stacks[0]
self.assertEqual(stack['StackStatus'], 'CREATE_COMPLETE', 'Stack should be in status CREATE_COMPLETE')
@mock_cloudformation
def test_delete_stack(self):
stack_name = 'my_test_delete_stack_stack'
self.create_stack(stack_name)
self.hook.delete_stack(stack_name=stack_name)
stacks = self.hook.get_conn().describe_stacks()['Stacks']
matching_stacks = [x for x in stacks if x['StackName'] == stack_name]
self.assertEqual(len(matching_stacks), 0, f'stack with name {stack_name} should not exist')
| tests/providers/amazon/aws/hooks/test_cloud_formation.py | 3,387 | Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 752 | en | 0.883564 |
"""sls.py
An implementation of the robust adaptive controller.
Both FIR SLS version with CVXPY and the common
Lyapunov relaxation.
"""
import numpy as np
import cvxpy as cvx
import utils
import logging
import math
import scipy.linalg
from abc import ABC, abstractmethod
from adaptive import AdaptiveMethod
class SLSInfeasibleException(Exception):
def __init__(self, msg=None):
super().__init__(msg)
def make_state_space_controller(Phi_x, Phi_u, n, p):
"""
Converts FIR transfer functions to a state
space realization of the dynamic controller,
mapping states to inputs.
"""
assert len(Phi_x.shape) == 2
assert len(Phi_u.shape) == 2
assert Phi_x.shape[1] == n
assert Phi_u.shape[1] == n
nT, _ = Phi_x.shape
pT, _ = Phi_u.shape
assert (nT % n) == 0
assert (pT % p) == 0
T = nT // n
assert T == (pT // p)
# See Theorem 2 of:
# https://nikolaimatni.github.io/papers/sls_state_space.pdf
Z = np.diag(np.ones(n*(T-2)), k=-n)
assert Z.shape == ((T-1)*n, (T-1)*n)
calI = np.zeros((n*(T-1), n))
calI[:n, :] = np.eye(n)
Rhat = np.hstack([Phi_x[n*k:n*(k+1), :] for k in range(1, T)])
Mhat = np.hstack([Phi_u[p*k:p*(k+1), :] for k in range(1, T)])
M1 = Phi_u[:p, :]
R1 = Phi_x[:n, :]
A = Z - calI.dot(Rhat)
B = -calI
C = M1.dot(Rhat) - Mhat
D = M1
return (A, B, C, D)
def h2_squared_norm(A, B, Phi_x, Phi_u, Q, R, sigma_w):
"""
Gets the squared infinite horizon LQR cost for system
(A,B) in feedback with the controller defined by Phi_x
and Phi_u.
"""
n, p = B.shape
A_k, B_k, C_k, D_k = make_state_space_controller(Phi_x, Phi_u, n, p)
A_cl = np.block([
[A + B.dot(D_k), B.dot(C_k)],
[B_k, A_k]
])
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
C_cl = np.block([
[Q_sqrt, np.zeros((n, A_k.shape[0]))],
[R_sqrt.dot(D_k), R_sqrt.dot(C_k)]
])
B_cl = np.vstack((np.eye(n), np.zeros((A_k.shape[0], n))))
P = utils.solve_discrete_lyapunov(A_cl.T, B_cl.dot(B_cl.T))
return (sigma_w ** 2) * np.trace(C_cl.dot(P).dot(C_cl.T))
def _assert_AB_consistent(A, B):
assert len(A.shape) == 2 and A.shape[0] == A.shape[1]
assert len(B.shape) == 2
assert A.shape[0] == B.shape[0]
def _assert_ABCD_consistent(A, B, C, D):
_assert_AB_consistent(A, B)
assert len(C.shape) == 2
assert len(D.shape) == 2
assert C.shape[1] == A.shape[0]
assert C.shape[0] == D.shape[0]
assert D.shape[1] == B.shape[1]
def roll_forward(A, B, K, x0, psi0, sigma_w, horizon, rng=None):
"""Apply an LTI controller K = (A_k,B_k,C_k,D_k)
Roll the true system (A, B) forward with the SS realization of the LTI
controller given. horizon is the length of the trajectory, and
sigma_w is the stddev of the Gaussian process noise.
"""
if rng is None:
rng = np.random
_assert_AB_consistent(A, B)
A_k, B_k, C_k, D_k = K
_assert_ABCD_consistent(A_k, B_k, C_k, D_k)
state_dim, input_dim = B.shape
psi_dim = A_k.shape[0]
assert C_k.shape[0] == input_dim
assert B_k.shape[1] == state_dim
if x0 is None:
x0 = np.zeros((state_dim,))
if psi0 is None:
psi0 = np.zeros((psi_dim,))
assert x0.shape == (state_dim,)
assert psi0.shape == (psi_dim,)
process = sigma_w*rng.normal(size=(horizon, state_dim))
xt = np.array(x0)
psit = np.array(psi0)
states = np.zeros((horizon+1, state_dim))
inputs = np.zeros((horizon, input_dim))
controller_states = np.zeros((horizon+1, psi_dim))
states[0, :] = x0
controller_states[0, :] = psi0
for t in range(horizon):
psitp1 = A_k.dot(psit) + B_k.dot(xt)
ut = C_k.dot(psit) + D_k.dot(xt)
xtp1 = A.dot(xt) + B.dot(ut) + process[t]
inputs[t, :] = ut
states[t+1, :] = xtp1
controller_states[t+1, :] = psitp1
xt = xtp1
psit = psitp1
return states, inputs, controller_states
def sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, T, gamma, alpha, logger=None):
"""
Solves the SLS synthesis problem for length T FIR filters
using CVXPY
"""
assert len(Q.shape) == 2 and Q.shape[0] == Q.shape[1]
assert len(R.shape) == 2 and R.shape[0] == R.shape[1]
assert len(Ahat.shape) == 2 and Ahat.shape[0] == Ahat.shape[1]
assert len(Bhat.shape) == 2 and Bhat.shape[0] == Ahat.shape[0]
assert Q.shape[0] == Ahat.shape[0]
assert R.shape[0] == Bhat.shape[1]
assert eps_A >= 0
assert eps_B >= 0
assert T >= 1
assert gamma > 0 and gamma < 1
assert alpha > 0 and alpha < 1
if logger is None:
logger = logging.getLogger(__name__)
n, p = Bhat.shape
Q_sqrt = utils.psd_sqrt(Q)
R_sqrt = utils.psd_sqrt(R)
# Phi_x = \sum_{k=1}^{T} Phi_x[k] z^{-k}
Phi_x = cvx.Variable(T*n, n, name="Phi_x")
# Phi_u = \sum_{k=1}^{T} Phi_u[k] z^{-k}
Phi_u = cvx.Variable(T*p, n, name="Phi_u")
# htwo_cost
htwo_cost = cvx.Variable(name="htwo_cost")
# subspace constraint:
# [zI - Ah, -Bh] * [Phi_x; Phi_u] = I
#
# Note that:
# z Phi_x = \sum_{k=0}^{T-1} Phi_x[k+1] z^{-k}
#
# This means that:
# 1) Phi_x[1] = I
# 2) Phi_x[k+1] = Ah*Phi_x[k] + Bh*Phi_u[k] for k=1, ..., T-1
# 3) Ah*Phi_x[T] + Bh*Phi_u[T] = 0
constr = []
constr.append(Phi_x[:n, :] == np.eye(n))
for k in range(T-1):
constr.append(Phi_x[n*(k+1):n*(k+1+1), :] == Ahat*Phi_x[n*k:n*(k+1), :] + Bhat*Phi_u[p*k:p*(k+1), :])
constr.append(Ahat*Phi_x[n*(T-1):, :] + Bhat*Phi_u[p*(T-1):, :] == 0)
# H2 constraint:
# By Parseval's identity, this is equal (up to constants) to
#
# frobenius_norm(
# [ Q_sqrt*Phi_x[1] ;
# ...
# Q_sqrt*Phi_x[T] ;
# R_sqrt*Phi_u[1] ;
# ...
# R_sqrt*Phi_u[T]
# ]
# ) <= htwo_cost
# TODO: what is the best way to implement this in cvxpy?
constr.append(
cvx.norm(
cvx.bmat(
[[Q_sqrt*Phi_x[n*k:n*(k+1), :]] for k in range(T)] +
[[R_sqrt*Phi_u[p*k:p*(k+1), :]] for k in range(T)]),
'fro') <= htwo_cost)
# H-infinity constraint
#
# We want to enforce ||H(z)||_inf <= gamma, where
#
# H(z) = \sum_{k=1}^{T} [ mult_x * Phi_x[k] ; mult_u * Phi_u[k] ] z^{-k}.
#
# Here, each of the FIR coefficients has size (n+p) x n. Since n+p>n, we enforce
# the constraint on the transpose system H^T(z). The LMI constraint
# for this comes from Theorem 5.8 of
# Positive trigonometric polynomials and signal processing applications (2007) by
# B. Dumitrescu.
#
# Here is a table to map the variable names in the text to this program
#
# Text Program Comment
# -------------------------------------------------------------
# p n Output dim
# m n+p Input dim
# n T FIR horizon
# p(n+1) n(T+1) SDP variable size
# p(n+1) x m n(T+1) x (n+p)
mult_x = eps_A/np.sqrt(alpha)
mult_u = eps_B/np.sqrt(1-alpha)
# Hbar has size (T+1)*n x (n+p)
Hbar = cvx.bmat(
[[np.zeros((n, n)), np.zeros((n, p))]] +
[[mult_x*Phi_x[n*k:n*(k+1), :].T, mult_u*Phi_u[p*k:p*(k+1), :].T] for k in range(T)])
Q = cvx.Semidef(n*(T+1), name="Q")
# Constraint (5.44)
# Case k==0: the block diag of Q has to sum to gamma^2 * eye(n)
gamma_sq = gamma ** 2
constr.append(
sum([Q[n*t:n*(t+1), n*t:n*(t+1)] for t in range(T+1)]) == gamma_sq*np.eye(n))
# Case k>0: the block off-diag of Q has to sum to zero
for k in range(1, T+1):
constr.append(
sum([Q[n*t:n*(t+1), n*(t+k):n*(t+1+k)] for t in range(T+1-k)]) == np.zeros((n, n)))
# Constraint (5.45)
constr.append(
cvx.bmat([
[Q, Hbar],
[Hbar.T, np.eye(n+p)]]) == cvx.Semidef(n*(T+1) + (n+p)))
prob = cvx.Problem(cvx.Minimize(htwo_cost), constr)
prob.solve(solver=cvx.SCS)
if prob.status == cvx.OPTIMAL:
logging.debug("successfully solved!")
Phi_x = np.array(Phi_x.value)
Phi_u = np.array(Phi_u.value)
return (True, prob.value, Phi_x, Phi_u)
else:
logging.debug("could not solve: {}".format(prob.status))
return (False, None, None, None)
def sls_common_lyapunov(A, B, Q, R, eps_A, eps_B, tau, logger=None):
"""
Solves the common Lyapunov relaxation to the robust
synthesis problem.
Taken from
lstd-lqr/blob/master/code/policy_iteration.ipynb
learning-lqr/experiments/matlab/sls_synth_yalmip/common_lyap_synth_var2_alpha.m
"""
if logger is None:
logger = logging.getLogger(__name__)
d, p = B.shape
X = cvx.Symmetric(d) # inverse Lyapunov function
Z = cvx.Variable(p, d) # -K*X
W_11 = cvx.Symmetric(d)
W_12 = cvx.Variable(d, p)
W_22 = cvx.Symmetric(p)
alph = cvx.Variable() # scalar for tuning the H_inf constraint
constraints = []
# H2 cost: trace(W)=H2 cost
mat1 = cvx.bmat([
[X, X, Z.T],
[X, W_11, W_12],
[Z, W_12.T, W_22]])
constraints.append(mat1 == cvx.Semidef(2*d + p))
# H_infinity constraint
mat2 = cvx.bmat([
[X-np.eye(d), (A*X+B*Z), np.zeros((d, d)), np.zeros((d, p))],
[(X*A.T+Z.T*B.T), X, eps_A*X, eps_B*Z.T],
[np.zeros((d, d)), eps_A*X, alph*(tau**2)*np.eye(d), np.zeros((d, p))],
[np.zeros((p, d)), eps_B*Z, np.zeros((p, d)), (1-alph)*(tau**2)*np.eye(p)]])
constraints.append(mat2 == cvx.Semidef(3*d + p))
# constrain alpha to be in [0,1]:
constraints.append(alph >= 0)
constraints.append(alph <= 1)
# Solve!
objective = cvx.Minimize(cvx.trace(Q*W_11) + cvx.trace(R*W_22))
prob = cvx.Problem(objective, constraints)
try:
obj = prob.solve(solver=cvx.MOSEK)
except cvx.SolverError:
logger.warn("SolverError encountered")
return (False, None, None, None)
if prob.status == cvx.OPTIMAL:
logging.debug("common_lyapunov: found optimal solution")
X_value = np.array(X.value)
P_value = scipy.linalg.solve(X_value, np.eye(d), sym_pos=True)
# NOTE: the K returned here is meant to be used
# as A + BK **NOT** A - BK
K_value = np.array(Z.value).dot(P_value)
return (True, obj, P_value, K_value)
else:
logging.debug("common_lyapunov: could not solve (status={})".format(prob.status))
return (False, None, None, None)
class SLS_Implementation(ABC):
@abstractmethod
def open(self):
"""
"""
pass
@abstractmethod
def synth(self, Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger):
"""
"""
pass
class SLS_CVXPY(SLS_Implementation):
def open(self):
pass
def synth(self, Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger):
return sls_synth(Q, R, Ahat, Bhat, eps_A, eps_B, truncation_length, gamma, alpha, logger)
class SLS_FIRStrategy(AdaptiveMethod):
"""Adaptive control based on FIR truncated SLS
"""
def __init__(self, Q, R, A_star, B_star, sigma_w, rls_lam,
sigma_explore, reg, epoch_multiplier,
truncation_length, actual_error_multiplier,
use_gamma=0.98, sls_impl=None):
super().__init__(Q, R, A_star, B_star, sigma_w, rls_lam)
self._sigma_explore = sigma_explore
self._reg = reg
self._epoch_multiplier = epoch_multiplier
# TODO(stephentu):
# the truncation length should grow with time, but for now
# we keep it constant
# Additionally, gamma should be searched over as an optimization
# variable. For how, we fix the value.
# Finally, the optimization problem should be modified
# to involve the variable V as in https://arxiv.org/abs/1805.09388
self._truncation_length = truncation_length
self._actual_error_multiplier = actual_error_multiplier
self._sls_impl = sls_impl if sls_impl is not None else SLS_CVXPY()
self._logger = logging.getLogger(__name__)
self._use_gamma = use_gamma
self._controller_state = None
def _get_logger(self):
return self._logger
def reset(self, rng):
super().reset(rng)
self._sls_impl.open()
self._midway_infeasible = 0
def _design_controller(self, states, inputs, transitions, rng):
logger = self._get_logger()
Anom, Bnom, _ = utils.solve_least_squares(states, inputs, transitions, reg=self._reg)
eps_A = np.linalg.norm(Anom - self._A_star, ord=2)
eps_B = np.linalg.norm(Bnom - self._B_star, ord=2)
effective_eps_A = self._actual_error_multiplier * eps_A
effective_eps_B = self._actual_error_multiplier * eps_B
epoch_id = self._epoch_idx + 1 if self._has_primed else 0
logger.info("_design_controller(epoch={}): effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
# if SLS is not feasible, we fallback to the current
# control policy if it exists, otherwise we throw an SLSInfeasibleException
if self._use_gamma is None:
# bisect for gamma
logger.info("_design_controller(epoch={}): bisecting for gamma".format(epoch_id))
INF = 1e12
def fn(gamma):
is_feasible, obj, _, _ = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=gamma, alpha=0.5, logger=logger)
if not is_feasible:
return INF
else:
return 1/(1-gamma) * obj
disp_lvl = 3 if logger.isEnabledFor(logging.DEBUG) else 0
gamma_star, _, error_flag, _ = scipy.optimize.fminbound(fn, 0, 1 - 1e-5, xtol=1e-2, maxfun=20, full_output=True, disp=disp_lvl)
if error_flag:
logger.warn("_design_controller(epoch={}): maxfun exceeded during bisection, gamma_star={}".format(epoch_id, gamma_star))
logger.info("_design_controller(epoch={}): using gamma_star={}".format(epoch_id, gamma_star))
is_feasible, _, Phi_x, Phi_u = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=gamma_star, alpha=0.5, logger=logger)
else:
assert self._use_gamma > 0 and self._use_gamma < 1
logger.info("_design_controller(epoch={}): using fixed gamma={}".format(epoch_id, self._use_gamma))
is_feasible, _, Phi_x, Phi_u = self._sls_impl.synth(self._Q, self._R, Anom, Bnom,
effective_eps_A, effective_eps_B, self._truncation_length,
gamma=self._use_gamma, alpha=0.5, logger=logger)
if not is_feasible:
logger.info("_design_controller(epoch={}): SLS was not feasible...".format(epoch_id))
try:
self._current_K
# keep current controller
assert self._current_K is not None
logger.warn("_design_controller(epoch={}): SLS not feasible: keeping current controller".format(epoch_id))
self._midway_infeasible += 1
except AttributeError:
logger.warn("_design_controller(epoch={}): SLS not feasible: no existing controller to fallback on, effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
raise SLSInfeasibleException()
else:
logger.info("_design_controller(epoch={}): SLS was feasible. updating controller".format(epoch_id))
self._Phi_x = Phi_x
self._Phi_u = Phi_u
self._current_K = make_state_space_controller(Phi_x, Phi_u, self._n, self._p)
# compute the infinite horizon cost of this controller
Jnom = h2_squared_norm(self._A_star,
self._B_star,
self._Phi_x,
self._Phi_u,
self._Q,
self._R,
self._sigma_w)
return Anom, Bnom, Jnom
def _should_terminate_epoch(self):
if (self._iteration_within_epoch_idx >=
self._epoch_multiplier * (self._epoch_idx + 1)):
logger = self._get_logger()
logger.debug("terminating epoch... exploration noise will now have stddev {}".format(
self._sigma_explore * 1/math.pow(self._epoch_idx + 2, 1/3)))
return True
else:
return False
def _get_input(self, state, rng):
rng = self._get_rng(rng)
A_k, B_k, C_k, D_k = self._current_K
psit = self._controller_state
if psit is None:
psit = np.zeros((A_k.shape[0],))
psitp1 = A_k.dot(psit) + B_k.dot(state)
ctrl_input = C_k.dot(psit) + D_k.dot(state)
self._controller_state = psitp1
sigma_explore_decay = 1/math.pow(self._epoch_idx + 1, 1/3)
explore_input = self._sigma_explore * sigma_explore_decay * rng.normal(size=(self._p,))
return ctrl_input + explore_input
class SLS_CommonLyapunovStrategy(AdaptiveMethod):
"""
Adaptive control based on common Lyapunov relaxation
of robust control problem
"""
def __init__(self, Q, R, A_star, B_star, sigma_w, rls_lam,
sigma_explore, reg, epoch_multiplier, actual_error_multiplier):
super().__init__(Q, R, A_star, B_star, sigma_w, rls_lam)
self._sigma_explore = sigma_explore
self._reg = reg
self._epoch_multiplier = epoch_multiplier
self._actual_error_multiplier = actual_error_multiplier
self._logger = logging.getLogger(__name__)
self._midway_infeasible = 0
def reset(self, rng):
super().reset(rng)
self._midway_infeasible = 0
def _get_logger(self):
return self._logger
def _design_controller(self, states, inputs, transitions, rng):
logger = self._get_logger()
Anom, Bnom, _ = utils.solve_least_squares(states, inputs, transitions, reg=self._reg)
eps_A = np.linalg.norm(Anom - self._A_star, ord=2)
eps_B = np.linalg.norm(Bnom - self._B_star, ord=2)
effective_eps_A = self._actual_error_multiplier * eps_A
effective_eps_B = self._actual_error_multiplier * eps_B
epoch_id = self._epoch_idx + 1 if self._has_primed else 0
logger.info("_design_controller(epoch={}): effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
is_feasible, _, _, K = sls_common_lyapunov(
Anom, Bnom, self._Q, self._R,
effective_eps_A, effective_eps_B, tau=0.999, logger=logger)
if not is_feasible:
try:
self._current_K
# keep current controller
assert self._current_K is not None
logger.warn("_design_controller(epoch={}): SLS not feasible: keeping current controller".format(epoch_id))
self._midway_infeasible += 1
except AttributeError:
logger.warn("_design_controller(epoch={}): SLS not feasible: no existing controller to fallback on, effective_eps_A={}, effective_eps_B={}".format(epoch_id, effective_eps_A, effective_eps_B))
raise SLSInfeasibleException()
else:
logger.info("_design_controller(epoch={}): SLS was feasible. updating controller".format(epoch_id))
self._current_K = K
# compute the infinite horizon cost of this controller
Jnom = utils.LQR_cost(self._A_star, self._B_star, self._current_K, self._Q, self._R, self._sigma_w)
return Anom, Bnom, Jnom
def _should_terminate_epoch(self):
if (self._iteration_within_epoch_idx >=
self._epoch_multiplier * (self._epoch_idx + 1)):
logger = self._get_logger()
logger.debug("terminating epoch... exploration noise will now have stddev {}".format(
self._sigma_explore * 1/math.pow(self._epoch_idx + 2, 1/3)))
return True
else:
return False
def _get_input(self, state, rng):
rng = self._get_rng(rng)
ctrl_input = self._current_K.dot(state)
sigma_explore_decay = 1/math.pow(self._epoch_idx + 1, 1/3)
explore_input = self._sigma_explore * sigma_explore_decay * rng.normal(size=(self._p,))
return ctrl_input + explore_input
def _main():
import examples
A_star, B_star = examples.unstable_laplacian_dynamics()
# define costs
Q = 1e-3 * np.eye(3)
R = np.eye(3)
# initial controller
_, K_init = utils.dlqr(A_star, B_star, 1e-3*np.eye(3), np.eye(3))
rng = np.random
env = SLS_FIRStrategy(Q=Q,
R=R,
A_star=A_star,
B_star=B_star,
sigma_w=1,
sigma_explore=0.1,
reg=1e-5,
epoch_multiplier=10,
truncation_length=12,
actual_error_multiplier=1,
rls_lam=None)
env.reset(rng)
env.prime(250, K_init, 0.5, rng)
for idx in range(500):
env.step(rng)
env = SLS_CommonLyapunovStrategy(Q=Q,
R=R,
A_star=A_star,
B_star=B_star,
sigma_w=1,
sigma_explore=0.1,
reg=1e-5,
epoch_multiplier=10,
actual_error_multiplier=1,
rls_lam=None)
env.reset(rng)
env.prime(250, K_init, 0.5, rng)
for idx in range(500):
env.step(rng)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
np.set_printoptions(linewidth=200)
_main()
| python/sls.py | 22,553 | Adaptive control based on common Lyapunov relaxation
of robust control problem
Adaptive control based on FIR truncated SLS
Gets the squared infinite horizon LQR cost for system
(A,B) in feedback with the controller defined by Phi_x
and Phi_u.
Converts FIR transfer functions to a state
space realization of the dynamic controller,
mapping states to inputs.
Apply an LTI controller K = (A_k,B_k,C_k,D_k)
Roll the true system (A, B) forward with the SS realization of the LTI
controller given. horizon is the length of the trajectory, and
sigma_w is the stddev of the Gaussian process noise.
Solves the common Lyapunov relaxation to the robust
synthesis problem.
Taken from
lstd-lqr/blob/master/code/policy_iteration.ipynb
learning-lqr/experiments/matlab/sls_synth_yalmip/common_lyap_synth_var2_alpha.m
Solves the SLS synthesis problem for length T FIR filters
using CVXPY
sls.py
An implementation of the robust adaptive controller.
Both FIR SLS version with CVXPY and the common
Lyapunov relaxation.
See Theorem 2 of: https://nikolaimatni.github.io/papers/sls_state_space.pdf Phi_x = \sum_{k=1}^{T} Phi_x[k] z^{-k} Phi_u = \sum_{k=1}^{T} Phi_u[k] z^{-k} htwo_cost subspace constraint: [zI - Ah, -Bh] * [Phi_x; Phi_u] = I Note that: z Phi_x = \sum_{k=0}^{T-1} Phi_x[k+1] z^{-k} This means that: 1) Phi_x[1] = I 2) Phi_x[k+1] = Ah*Phi_x[k] + Bh*Phi_u[k] for k=1, ..., T-1 3) Ah*Phi_x[T] + Bh*Phi_u[T] = 0 H2 constraint: By Parseval's identity, this is equal (up to constants) to frobenius_norm( [ Q_sqrt*Phi_x[1] ; ... Q_sqrt*Phi_x[T] ; R_sqrt*Phi_u[1] ; ... R_sqrt*Phi_u[T] ] ) <= htwo_cost TODO: what is the best way to implement this in cvxpy? H-infinity constraint We want to enforce ||H(z)||_inf <= gamma, where H(z) = \sum_{k=1}^{T} [ mult_x * Phi_x[k] ; mult_u * Phi_u[k] ] z^{-k}. Here, each of the FIR coefficients has size (n+p) x n. Since n+p>n, we enforce the constraint on the transpose system H^T(z). The LMI constraint for this comes from Theorem 5.8 of Positive trigonometric polynomials and signal processing applications (2007) by B. Dumitrescu. Here is a table to map the variable names in the text to this program Text Program Comment ------------------------------------------------------------- p n Output dim m n+p Input dim n T FIR horizon p(n+1) n(T+1) SDP variable size p(n+1) x m n(T+1) x (n+p) Hbar has size (T+1)*n x (n+p) Constraint (5.44) Case k==0: the block diag of Q has to sum to gamma^2 * eye(n) Case k>0: the block off-diag of Q has to sum to zero Constraint (5.45) inverse Lyapunov function -K*X scalar for tuning the H_inf constraint H2 cost: trace(W)=H2 cost H_infinity constraint constrain alpha to be in [0,1]: Solve! NOTE: the K returned here is meant to be used as A + BK **NOT** A - BK TODO(stephentu): the truncation length should grow with time, but for now we keep it constant Additionally, gamma should be searched over as an optimization variable. For how, we fix the value. Finally, the optimization problem should be modified to involve the variable V as in https://arxiv.org/abs/1805.09388 if SLS is not feasible, we fallback to the current control policy if it exists, otherwise we throw an SLSInfeasibleException bisect for gamma keep current controller compute the infinite horizon cost of this controller keep current controller compute the infinite horizon cost of this controller define costs initial controller | 3,609 | en | 0.77174 |
# partesanato/__init__.py
| src/partesanato/__init__.py | 27 | partesanato/__init__.py | 23 | es | 0.257923 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 17771)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 27771)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| contrib/seeds/generate-seeds.py | 4,382 | Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
!/usr/bin/env python3 Copyright (c) 2014-2017 Wladimir J. van der Laan Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. ipv4 in ipv6 prefix tor-specific ipv6 prefix IPv4 IPv6 prefix, suffix skip empty component at beginning or end :: skips to suffix two bytes per component IPv4-in-little-endian ipv6 ipv6, no port | 989 | en | 0.603813 |
"""
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
"""
from __future__ import division
import math
import random
from itertools import count
from neat.config import ConfigParameter, DefaultClassConfig
from neat.math_util import mean
from neat.six_util import iteritems, itervalues
# TODO: Provide some sort of optional cross-species performance criteria, which
# are then used to control stagnation and possibly the mutation rate
# configuration. This scheme should be adaptive so that species do not evolve
# to become "cautious" and only make very slow progress.
class DefaultReproduction(DefaultClassConfig):
"""
Implements the default NEAT-python reproduction scheme:
explicit fitness sharing with fixed-time species stagnation.
"""
@classmethod
def parse_config(cls, param_dict):
return DefaultClassConfig(param_dict,
[ConfigParameter('elitism', int, 0),
ConfigParameter('survival_threshold', float, 0.2),
ConfigParameter('min_species_size', int, 2)])
def __init__(self, config, reporters, stagnation):
# pylint: disable=super-init-not-called
self.reproduction_config = config
self.reporters = reporters
self.genome_indexer = count(1)
self.stagnation = stagnation
self.ancestors = {}
def create_new(self, genome_type, genome_config, num_genomes):
new_genomes = {}
for i in range(num_genomes):
key = next(self.genome_indexer)
g = genome_type(key)
g.configure_new(genome_config)
new_genomes[key] = g
self.ancestors[key] = tuple()
return new_genomes
@staticmethod
def compute_spawn(adjusted_fitness, previous_sizes, pop_size, min_species_size):
"""Compute the proper number of offspring per species (proportional to fitness)."""
af_sum = sum(adjusted_fitness)
spawn_amounts = []
for af, ps in zip(adjusted_fitness, previous_sizes):
if af_sum > 0:
s = max(min_species_size, af / af_sum * pop_size)
else:
s = min_species_size
d = (s - ps) * 0.5
c = int(round(d))
spawn = ps
if abs(c) > 0:
spawn += c
elif d > 0:
spawn += 1
elif d < 0:
spawn -= 1
spawn_amounts.append(spawn)
# Normalize the spawn amounts so that the next generation is roughly
# the population size requested by the user.
total_spawn = sum(spawn_amounts)
norm = pop_size / total_spawn
spawn_amounts = [max(min_species_size, int(round(n * norm))) for n in spawn_amounts]
return spawn_amounts
def reproduce(self, config, species, pop_size, generation):
"""
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
"""
# TODO: I don't like this modification of the species and stagnation objects,
# because it requires internal knowledge of the objects.
# Filter out stagnated species, collect the set of non-stagnated
# species members, and compute their average adjusted fitness.
# The average adjusted fitness scheme (normalized to the interval
# [0, 1]) allows the use of negative fitness values without
# interfering with the shared fitness scheme.
all_fitnesses = []
remaining_species = []
for stag_sid, stag_s, stagnant in self.stagnation.update(species, generation):
if stagnant:
self.reporters.species_stagnant(stag_sid, stag_s)
else:
all_fitnesses.extend(m.fitness for m in itervalues(stag_s.members))
remaining_species.append(stag_s)
# The above comment was not quite what was happening - now getting fitnesses
# only from members of non-stagnated species.
# No species left.
if not remaining_species:
species.species = {}
return {} # was []
# Find minimum/maximum fitness across the entire population, for use in
# species adjusted fitness computation.
min_fitness = min(all_fitnesses)
max_fitness = max(all_fitnesses)
# Do not allow the fitness range to be zero, as we divide by it below.
# TODO: The ``1.0`` below is rather arbitrary, and should be configurable.
fitness_range = max(1.0, max_fitness - min_fitness)
for afs in remaining_species:
# Compute adjusted fitness.
msf = mean([m.fitness for m in itervalues(afs.members)])
af = (msf - min_fitness) / fitness_range
afs.adjusted_fitness = af
adjusted_fitnesses = [s.adjusted_fitness for s in remaining_species]
avg_adjusted_fitness = mean(adjusted_fitnesses) # type: float
self.reporters.info("Average adjusted fitness: {:.3f}".format(avg_adjusted_fitness))
# Compute the number of new members for each species in the new generation.
previous_sizes = [len(s.members) for s in remaining_species]
min_species_size = self.reproduction_config.min_species_size
# Isn't the effective min_species_size going to be max(min_species_size,
# self.reproduction_config.elitism)? That would probably produce more accurate tracking
# of population sizes and relative fitnesses... doing. TODO: document.
min_species_size = max(min_species_size,self.reproduction_config.elitism)
# TODO: THIS PROBABLY CAUSES POPULATION TO DOUBLE. Is an array of 2s of len ~232 here but ~ 112 in original
# TODO: BECAUSE OF ADJUSTED_FITNESSES ALSO BEING 232 INSTEAD OF 112
# TODO: 232 is number of species.. so probably rather an effect of increased population, not the cause...
spawn_amounts = self.compute_spawn(adjusted_fitnesses, previous_sizes,
pop_size, min_species_size)
new_population = {}
species.species = {}
for spawn, s in zip(spawn_amounts, remaining_species):
# If elitism is enabled, each species always at least gets to retain its elites.
spawn = max(spawn, self.reproduction_config.elitism)
assert spawn > 0
# The species has at least one member for the next generation, so retain it.
old_members = list(iteritems(s.members))
s.members = {}
species.species[s.key] = s
# Sort members in order of descending fitness.
old_members.sort(reverse=True, key=lambda x: x[1].fitness)
# Transfer elites to new generation.
if self.reproduction_config.elitism > 0:
for i, m in old_members[:self.reproduction_config.elitism]:
new_population[i] = m
spawn -= 1
if spawn <= 0:
continue
# Only use the survival threshold fraction to use as parents for the next generation.
repro_cutoff = int(math.ceil(self.reproduction_config.survival_threshold *
len(old_members)))
# Use at least two parents no matter what the threshold fraction result is.
repro_cutoff = max(repro_cutoff, 2)
old_members = old_members[:repro_cutoff]
# Randomly choose parents and produce the number of offspring allotted to the species.
while spawn > 0:
spawn -= 1
parent1_id, parent1 = random.choice(old_members)
parent2_id, parent2 = random.choice(old_members)
# Note that if the parents are not distinct, crossover will produce a
# genetically identical clone of the parent (but with a different ID).
gid = next(self.genome_indexer)
child = config.genome_type(gid)
child.configure_crossover(parent1, parent2, config.genome_config)
child.mutate(config.genome_config)
new_population[gid] = child
self.ancestors[gid] = (parent1_id, parent2_id)
return new_population
| neat_local/reproduction.py | 8,371 | Implements the default NEAT-python reproduction scheme:
explicit fitness sharing with fixed-time species stagnation.
Compute the proper number of offspring per species (proportional to fitness).
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
TODO: Provide some sort of optional cross-species performance criteria, which are then used to control stagnation and possibly the mutation rate configuration. This scheme should be adaptive so that species do not evolve to become "cautious" and only make very slow progress. pylint: disable=super-init-not-called Normalize the spawn amounts so that the next generation is roughly the population size requested by the user. TODO: I don't like this modification of the species and stagnation objects, because it requires internal knowledge of the objects. Filter out stagnated species, collect the set of non-stagnated species members, and compute their average adjusted fitness. The average adjusted fitness scheme (normalized to the interval [0, 1]) allows the use of negative fitness values without interfering with the shared fitness scheme. The above comment was not quite what was happening - now getting fitnesses only from members of non-stagnated species. No species left. was [] Find minimum/maximum fitness across the entire population, for use in species adjusted fitness computation. Do not allow the fitness range to be zero, as we divide by it below. TODO: The ``1.0`` below is rather arbitrary, and should be configurable. Compute adjusted fitness. type: float Compute the number of new members for each species in the new generation. Isn't the effective min_species_size going to be max(min_species_size, self.reproduction_config.elitism)? That would probably produce more accurate tracking of population sizes and relative fitnesses... doing. TODO: document. TODO: THIS PROBABLY CAUSES POPULATION TO DOUBLE. Is an array of 2s of len ~232 here but ~ 112 in original TODO: BECAUSE OF ADJUSTED_FITNESSES ALSO BEING 232 INSTEAD OF 112 TODO: 232 is number of species.. so probably rather an effect of increased population, not the cause... If elitism is enabled, each species always at least gets to retain its elites. The species has at least one member for the next generation, so retain it. Sort members in order of descending fitness. Transfer elites to new generation. Only use the survival threshold fraction to use as parents for the next generation. Use at least two parents no matter what the threshold fraction result is. Randomly choose parents and produce the number of offspring allotted to the species. Note that if the parents are not distinct, crossover will produce a genetically identical clone of the parent (but with a different ID). | 2,862 | en | 0.905464 |
import logging
from functools import reduce
from typing import Text, Set, Dict, Optional, List, Union, Any
import os
import rasa.shared.data
import rasa.shared.utils.io
from rasa.shared.core.domain import Domain
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.importers import utils
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.core.training_data.structures import StoryGraph
from rasa.shared.utils.common import mark_as_experimental_feature
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
logger = logging.getLogger(__name__)
class MultiProjectImporter(TrainingDataImporter):
def __init__(
self,
config_file: Text,
domain_path: Optional[Text] = None,
training_data_paths: Optional[Union[List[Text], Text]] = None,
project_directory: Optional[Text] = None,
):
self.config = rasa.shared.utils.io.read_model_configuration(config_file)
if domain_path:
self._domain_paths = [domain_path]
else:
self._domain_paths = []
self._story_paths = []
self._e2e_story_paths = []
self._nlu_paths = []
self._imports = []
self._additional_paths = training_data_paths or []
self._project_directory = project_directory or os.path.dirname(config_file)
self._init_from_dict(self.config, self._project_directory)
extra_nlu_files = rasa.shared.data.get_data_files(
training_data_paths, rasa.shared.data.is_nlu_file
)
extra_story_files = rasa.shared.data.get_data_files(
training_data_paths, YAMLStoryReader.is_stories_file
)
self._story_paths += extra_story_files
self._nlu_paths += extra_nlu_files
logger.debug(
"Selected projects: {}".format("".join([f"\n-{i}" for i in self._imports]))
)
mark_as_experimental_feature(feature_name="MultiProjectImporter")
def get_config_file_for_auto_config(self) -> Optional[Text]:
"""Returns config file path for auto-config only if there is a single one."""
return None
def _init_from_path(self, path: Text) -> None:
if os.path.isfile(path):
self._init_from_file(path)
elif os.path.isdir(path):
self._init_from_directory(path)
def _init_from_file(self, path: Text) -> None:
path = os.path.abspath(path)
if os.path.exists(path) and rasa.shared.data.is_config_file(path):
config = rasa.shared.utils.io.read_config_file(path)
parent_directory = os.path.dirname(path)
self._init_from_dict(config, parent_directory)
else:
rasa.shared.utils.io.raise_warning(
f"'{path}' does not exist or is not a valid config file."
)
def _init_from_dict(self, _dict: Dict[Text, Any], parent_directory: Text) -> None:
imports = _dict.get("imports") or []
imports = [os.path.join(parent_directory, i) for i in imports]
# clean out relative paths
imports = [os.path.abspath(i) for i in imports]
# remove duplication
import_candidates = []
for i in imports:
if i not in import_candidates and not self._is_explicitly_imported(i):
import_candidates.append(i)
self._imports.extend(import_candidates)
# import config files from paths which have not been processed so far
for p in import_candidates:
self._init_from_path(p)
def _is_explicitly_imported(self, path: Text) -> bool:
return not self.no_skills_selected() and self.is_imported(path)
def _init_from_directory(self, path: Text) -> None:
for parent, _, files in os.walk(path, followlinks=True):
for file in files:
full_path = os.path.join(parent, file)
if not self.is_imported(full_path):
# Check next file
continue
if YAMLStoryReader.is_test_stories_file(full_path):
self._e2e_story_paths.append(full_path)
elif Domain.is_domain_file(full_path):
self._domain_paths.append(full_path)
elif rasa.shared.data.is_nlu_file(full_path):
self._nlu_paths.append(full_path)
elif YAMLStoryReader.is_stories_file(full_path):
self._story_paths.append(full_path)
elif rasa.shared.data.is_config_file(full_path):
self._init_from_file(full_path)
def no_skills_selected(self) -> bool:
return not self._imports
def training_paths(self) -> Set[Text]:
"""Returns the paths which should be searched for training data."""
# only include extra paths if they are not part of the current project directory
training_paths = {
i
for i in self._imports
if not self._project_directory or self._project_directory not in i
}
if self._project_directory:
training_paths.add(self._project_directory)
return training_paths
def is_imported(self, path: Text) -> bool:
"""
Checks whether a path is imported by a skill.
Args:
path: File or directory path which should be checked.
Returns:
`True` if path is imported by a skill, `False` if not.
"""
absolute_path = os.path.abspath(path)
return (
self.no_skills_selected()
or self._is_in_project_directory(absolute_path)
or self._is_in_additional_paths(absolute_path)
or self._is_in_imported_paths(absolute_path)
)
def _is_in_project_directory(self, path: Text) -> bool:
if os.path.isfile(path):
parent_directory = os.path.abspath(os.path.dirname(path))
return parent_directory == self._project_directory
else:
return path == self._project_directory
def _is_in_additional_paths(self, path: Text) -> bool:
included = path in self._additional_paths
if not included and os.path.isfile(path):
parent_directory = os.path.abspath(os.path.dirname(path))
included = parent_directory in self._additional_paths
return included
def _is_in_imported_paths(self, path: Text) -> bool:
return any(
[rasa.shared.utils.io.is_subdirectory(path, i) for i in self._imports]
)
def get_domain(self) -> Domain:
"""Retrieves model domain (see parent class for full docstring)."""
domains = [Domain.load(path) for path in self._domain_paths]
return reduce(
lambda merged, other: merged.merge(other), domains, Domain.empty()
)
def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph:
"""Retrieves training stories / rules (see parent class for full docstring)."""
return utils.story_graph_from_paths(
self._story_paths, self.get_domain(), exclusion_percentage
)
def get_conversation_tests(self) -> StoryGraph:
"""Retrieves conversation test stories (see parent class for full docstring)."""
return utils.story_graph_from_paths(self._e2e_story_paths, self.get_domain())
def get_config(self) -> Dict:
"""Retrieves model config (see parent class for full docstring)."""
return self.config
def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData:
"""Retrieves NLU training data (see parent class for full docstring)."""
return utils.training_data_from_paths(self._nlu_paths, language)
| rasa/shared/importers/multi_project.py | 7,787 | Retrieves model config (see parent class for full docstring).
Returns config file path for auto-config only if there is a single one.
Retrieves conversation test stories (see parent class for full docstring).
Retrieves model domain (see parent class for full docstring).
Retrieves NLU training data (see parent class for full docstring).
Retrieves training stories / rules (see parent class for full docstring).
Checks whether a path is imported by a skill.
Args:
path: File or directory path which should be checked.
Returns:
`True` if path is imported by a skill, `False` if not.
Returns the paths which should be searched for training data.
clean out relative paths remove duplication import config files from paths which have not been processed so far Check next file only include extra paths if they are not part of the current project directory | 861 | en | 0.840131 |
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from __future__ import absolute_import, division, unicode_literals
import decimal
import warnings
import sys
import types
from itertools import chain, islice
from . import compat
from . import util
from . import tags
from . import handlers
from .backend import json
from .compat import numeric_types, string_types, PY3, PY2
def encode(
value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
use_decimal=False,
numeric_keys=False,
use_base85=False,
fail_safe=None,
indent=None,
separators=None,
):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}'
"""
backend = backend or json
context = context or Pickler(
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys,
use_decimal=use_decimal,
use_base85=use_base85,
fail_safe=fail_safe,
)
return backend.encode(
context.flatten(value, reset=reset), indent=indent, separators=separators
)
class Pickler(object):
def __init__(
self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False,
use_decimal=False,
use_base85=False,
fail_safe=None,
):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = backend or json
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self.use_base85 = use_base85 and (not PY2)
# The current recursion depth
self._depth = -1
# The maximal recursion depth
self._max_depth = max_depth
# Maps id(obj) to reference IDs
self._objs = {}
# Avoids garbage collection
self._seen = []
# maximum amount of items to take from a pickled iterator
self._max_iter = max_iter
# Whether to allow decimals to pass-through
self._use_decimal = use_decimal
if self.use_base85:
self._bytes_tag = tags.B85
self._bytes_encoder = util.b85encode
else:
self._bytes_tag = tags.B64
self._bytes_encoder = util.b64encode
# ignore exceptions
self.fail_safe = fail_safe
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
"""Steps down one level in the namespace."""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
"""
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
"""
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
"""
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
"""
is_new = self._log_ref(obj)
# Pretend the object is new
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
"""
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
#########################################
# if obj is nonrecursive return immediately
# for performance reasons we don't want to do recursive checks
if PY2 and isinstance(obj, types.FileType):
return self._flatten_file(obj)
if util.is_bytes(obj):
return self._flatten_bytestring(obj)
if util.is_primitive(obj):
return obj
# Decimal is a primitive when use_decimal is True
if self._use_decimal and isinstance(obj, decimal.Decimal):
return obj
#########################################
self._push()
return self._pop(self._flatten_obj(obj))
def _max_reached(self):
return self._depth == self._max_depth
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._max_reached()
try:
in_cycle = _in_cycle(obj, self._objs, max_reached, self.make_refs)
if in_cycle:
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
if self.fail_safe is None:
raise e
else:
return self.fail_safe(e)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new"""
if self.unpicklable:
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
# A circular becomes None.
return None
self._mkref(obj)
return self._flatten_obj_instance(obj)
def _flatten_file(self, obj):
"""
Special case file objects
"""
assert not PY3 and isinstance(obj, types.FileType)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
pass
return {self._bytes_tag: self._bytes_encoder(obj)}
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, string_types):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (
state
and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)
):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = len(reduce_args) - 1
while last_index >= 2 and reduce_args[last_index] is None:
last_index -= 1
data[tags.REDUCE] = reduce_args[: last_index + 1]
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
"""Recursively call flatten() and return json-friendly dict"""
if data is None:
data = obj.__class__()
# If we allow non-string keys then we have to do a two-phase
# encoding to ensure that the reference IDs are deterministic.
if self.keys:
# Phase 1: serialize regular objects, ignore fancy keys.
flatten = self._flatten_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# Phase 2: serialize non-string keys.
flatten = self._flatten_non_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
else:
# If we have string keys only then we only need a single pass.
flatten = self._flatten_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
# Sub-classes of dict
if hasattr(obj, '__dict__') and self.unpicklable:
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
"""Return a json-friendly dict for new-style objects with __slots__."""
allslots = [
_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()
]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [
x for x in dir(obj) if not x.startswith('__') and not x.endswith('__')
]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
"""Flatten a key/value pair into the passed-in dictionary."""
if not util.is_picklable(k, v):
return data
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_non_string_key_value_pair(self, k, v, data):
"""Flatten only non-string key/value pairs"""
if not util.is_picklable(k, v):
return data
if self.keys and not isinstance(k, string_types):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data
def _flatten_string_key_value_pair(self, k, v, data):
"""Flatten string key/value pairs only."""
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, string_types):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
"""Return a json-friendly dict for a sequence subclass."""
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(
k,
reset=False,
keys=True,
context=self,
backend=self.backend,
make_refs=self.make_refs,
)
def _getstate(self, obj, data):
state = self._flatten(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _in_cycle(obj, objs, max_reached, make_refs):
"""Detect cyclic structures that would lead to infinite recursion"""
return (
(max_reached or (not make_refs and id(obj) in objs))
and not util.is_primitive(obj)
and not util.is_enum(obj)
)
def _mktyperef(obj):
"""Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True
"""
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
"""Converts __slots__ = 'a' into __slots__ = ('a',)"""
if isinstance(string, string_types):
return (string,)
return string
| jsonpickle/pickler.py | 26,149 | Recursively call flatten() and return json-friendly dict
Special case file objects
Flatten a key/value pair into the passed-in dictionary.
Return a json-friendly dict for new-style objects with __slots__.
Flatten only non-string key/value pairs
Recursively flatten an instance and return a json-friendly dict
Return a json-friendly dict for a sequence subclass.
Flatten string key/value pairs only.
Detect cyclic structures that would lead to infinite recursion
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True
Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
Steps down one level in the namespace.
Reference an existing object or flatten if new
Converts __slots__ = 'a' into __slots__ = ('a',)
Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}'
Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
Copyright (C) 2008 John Paulett (john -at- paulett.org) Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com) All rights reserved. This software is licensed as described in the file COPYING, which you should have received as part of this distribution. The current recursion depth The maximal recursion depth Maps id(obj) to reference IDs Avoids garbage collection maximum amount of items to take from a pickled iterator Whether to allow decimals to pass-through ignore exceptions Pretend the object is new if obj is nonrecursive return immediately for performance reasons we don't want to do recursive checks Decimal is a primitive when use_decimal is True break the cycle We handle tuples and sets by encoding them in a "(tuple|set)dict" instance methods, lambdas, old style classes... We've never seen this object so return its json representation. We've seen this object before so place an object reference tag in the data. This avoids infinite recursion when processing cyclical objects. A circular becomes None. Support objects with __getstate__(); this ensures that both __setstate__() and __getstate__() are implemented not using has_method since __getstate__() is handled separately below Check for a custom handler A lot of builtin types have a reduce which just raises a TypeError we ignore those test for a reduce implementation, and redirect before doing anything else if that is what reduce requests we're implementing protocol 2 A lot of builtin types have a reduce which just raises a TypeError we ignore those curmod will be transformed by the loop into the value to pickle replace obj with value retrieved well, we can't do anything with that, so we ignore it at this point, reduce_val should be some kind of iterable pad out to len 5 check that getstate/setstate is sane turn iterators to iterables for convenient serialization Has getstate but it cannot be called, e.g. file descriptors in Python3 force list in python 3 Support objects that subclasses list and set hack for zope persistent objects; this unghostifies the object catchall return for data created above without a return (e.g. __getnewargs__ is not supposed to be the end of the story) If we allow non-string keys then we have to do a two-phase encoding to ensure that the reference IDs are deterministic. Phase 1: serialize regular objects, ignore fancy keys. Phase 2: serialize non-string keys. If we have string keys only then we only need a single pass. the collections.defaultdict protocol Reference the class/type The factory is not a type and could reference e.g. functions or even the object instance itself, which creates a cycle. We've never seen this object before so pickle it in-place. Create an instance from the factory and assume that the resulting instance is a suitable examplar. We've seen this object before. Break the cycle by emitting a reference. Sub-classes of dict The attribute may have been deleted for compatibility with common json encoders for compatibility with common json encoders | 8,530 | en | 0.769188 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.