max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
dcn/__init__.py | draklowell/DCNNode | 0 | 6622151 | from . import packet
from . import server
from . import handler
from . import info | from . import packet
from . import server
from . import handler
from . import info | none | 1 | 1.114228 | 1 | |
PythonExercicios/ex101.py | raulgranja/Python-Course | 0 | 6622152 | def voto(ano):
from datetime import datetime
idade = datetime.now().year - ano
if 16 <= idade < 18 or idade > 65:
return f'Com {idade} anos: VOTO OPCIONAL'
elif idade >= 18:
return f'Com {idade} anos: VOTO OBRIGATÓRIO'
else:
return f'Com {idade} anos: VOTO NEGADO'
# main
nasc = int(input('Em que ano você nasceu? '))
print(voto(nasc))
| def voto(ano):
from datetime import datetime
idade = datetime.now().year - ano
if 16 <= idade < 18 or idade > 65:
return f'Com {idade} anos: VOTO OPCIONAL'
elif idade >= 18:
return f'Com {idade} anos: VOTO OBRIGATÓRIO'
else:
return f'Com {idade} anos: VOTO NEGADO'
# main
nasc = int(input('Em que ano você nasceu? '))
print(voto(nasc))
| none | 1 | 3.805221 | 4 | |
interpret.py | Timothy102/covid-ct | 1 | 6622153 | <reponame>Timothy102/covid-ct
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import savgol_filter
import seaborn as sns
from tqdm import tqdm
from config import OUTPUT_CSV
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, default=TRAIN_PATH,
help="File path to the CSV file that contains walking data.")
parser.add_argument("--output_dir", type=str, default=OUTPUT_VIS,
help="Directory where to save outputs.")
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
else:
shutil.rmtree(args.output_dir)
os.makedirs(args.output_dir)
return args
class Interpreter():
def __init__(self, csv_file, output_csv = OUTPUT_CSV):
self.csv_file = csv_file
self.output_csv = output_csv
def get_data(self):
data = pd.DataFrame(self.csv_file)
data["all_percent"] = (data["ggo_vol"] + data["cons_vol"]) / data["lung_vol"]
data["ggo_percent"] = data["ggo_vol"] / data["lung_vol"]
data["cons_percent"] = data["cons_vol"] / data["lung_vol"]
dataA = data[data["label"] == "A"]
dataB = data[data["label"] == "B"]
dataC = data[data["label"] == "C"]
return dataA, dataB, dataC
def calculate_thresholds(self, epsilon=1e-7):
dataA, dataB, dataC = self.get_data()
num_A = len(dataA)
num_B = len(dataB)
num_C = len(dataC)
maximum = 0.0
thresholds = dict()
for i in tqdm(range(0,1000,1)):
for j in range(i,1000, 1):
temp = float(i) / 1000
j = float(j) / 1000
percA = float(len(dataA[dataA.all_percent < temp]))
percB = float(len(dataB[(dataB.all_percent >= temp) & (dataB.all_percent < j)]))
percC = float(len(dataC[dataC.all_percent >= j]))
if percA != 0.0:
percA = percA / num_A
if percB != 0.0:
percB = percB / num_B
if percC != 0.0:
percC = percC / num_C
total = percA + percB + percC
if total > maximum:
thresholds["AB"] = temp
thresholds["BC"] = j
thresholds["maximum"] = total / 3
maximum = total
return thresholds
def plot(self):
combined_df = self.get_data()
thresholds = self.calculate_thresholds(combined_df)
sns.violinplot(x="all_percent",y="label", data=combined_df, split=True, linewidth=1)
# Prvo je treba izračunat thresholde s calculate_thresholds()
plt.axvline(thresholds["AB"]) # AB diskriminacija
plt.axvline(thresholds["BC"]) # AC diskriminacija
print("Total discriminative power: ", thresholds["maximum"])
print(thresholds)
def output(self):
combined_df = self.get_data()
thresholds = self.calculate_thresholds(combined_df)
def toabc(x):
if x < thresholds["AB"]: return 'A'
if x >= thresholds["AB"] and x < thresholds["BC"]: return 'B'
return 'C'
combined_df["class"] = combined_df["all_percent"].apply(lambda x: toabc(x))
combined_df[["filename_img", "class"]].to_csv(self.output_csv, index=False)
def main(args = sys.argv[1:]):
args = parseArguments()
interpreter = Interpreter(args.path, args.output_dir)
interpreter.output()
if name == "__main__":
main()
| import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import savgol_filter
import seaborn as sns
from tqdm import tqdm
from config import OUTPUT_CSV
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, default=TRAIN_PATH,
help="File path to the CSV file that contains walking data.")
parser.add_argument("--output_dir", type=str, default=OUTPUT_VIS,
help="Directory where to save outputs.")
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
else:
shutil.rmtree(args.output_dir)
os.makedirs(args.output_dir)
return args
class Interpreter():
def __init__(self, csv_file, output_csv = OUTPUT_CSV):
self.csv_file = csv_file
self.output_csv = output_csv
def get_data(self):
data = pd.DataFrame(self.csv_file)
data["all_percent"] = (data["ggo_vol"] + data["cons_vol"]) / data["lung_vol"]
data["ggo_percent"] = data["ggo_vol"] / data["lung_vol"]
data["cons_percent"] = data["cons_vol"] / data["lung_vol"]
dataA = data[data["label"] == "A"]
dataB = data[data["label"] == "B"]
dataC = data[data["label"] == "C"]
return dataA, dataB, dataC
def calculate_thresholds(self, epsilon=1e-7):
dataA, dataB, dataC = self.get_data()
num_A = len(dataA)
num_B = len(dataB)
num_C = len(dataC)
maximum = 0.0
thresholds = dict()
for i in tqdm(range(0,1000,1)):
for j in range(i,1000, 1):
temp = float(i) / 1000
j = float(j) / 1000
percA = float(len(dataA[dataA.all_percent < temp]))
percB = float(len(dataB[(dataB.all_percent >= temp) & (dataB.all_percent < j)]))
percC = float(len(dataC[dataC.all_percent >= j]))
if percA != 0.0:
percA = percA / num_A
if percB != 0.0:
percB = percB / num_B
if percC != 0.0:
percC = percC / num_C
total = percA + percB + percC
if total > maximum:
thresholds["AB"] = temp
thresholds["BC"] = j
thresholds["maximum"] = total / 3
maximum = total
return thresholds
def plot(self):
combined_df = self.get_data()
thresholds = self.calculate_thresholds(combined_df)
sns.violinplot(x="all_percent",y="label", data=combined_df, split=True, linewidth=1)
# Prvo je treba izračunat thresholde s calculate_thresholds()
plt.axvline(thresholds["AB"]) # AB diskriminacija
plt.axvline(thresholds["BC"]) # AC diskriminacija
print("Total discriminative power: ", thresholds["maximum"])
print(thresholds)
def output(self):
combined_df = self.get_data()
thresholds = self.calculate_thresholds(combined_df)
def toabc(x):
if x < thresholds["AB"]: return 'A'
if x >= thresholds["AB"] and x < thresholds["BC"]: return 'B'
return 'C'
combined_df["class"] = combined_df["all_percent"].apply(lambda x: toabc(x))
combined_df[["filename_img", "class"]].to_csv(self.output_csv, index=False)
def main(args = sys.argv[1:]):
args = parseArguments()
interpreter = Interpreter(args.path, args.output_dir)
interpreter.output()
if name == "__main__":
main() | sl | 0.260245 | # Prvo je treba izračunat thresholde s calculate_thresholds() # AB diskriminacija # AC diskriminacija | 2.660109 | 3 |
synclottery/sd.py | beiji-zhouqi/syncLottery | 2 | 6622154 | #!/usr/bin/env python
# encoding: utf-8
import re
import time
import datetime
from synclottery.requestData import GetData
'''
url: 使用的是360彩票官网接口数据,修改startTime和endTime获取期间数据
sd_re: 获取数据正则表达式
'''
def runSql(start_Time, end_Time):
url = "https://chart.cp.360.cn/kaijiang/sd?lotId=210053&spanType=2&span=" + start_Time + "_" + end_Time
sdRe = re.compile(r'<tr week=.*?<td>(.*?)</td><td>(.*?)</td>.*?<span .*?>(.*?)</span>.*?<span .*?>(.*?)</span>.*?<span .*?>(.*?)</span>.*?<td>(.*?)</td>.*?</tr>')
instance = GetData(url, sdRe)
data = instance.requestData()
for i in reversed(data):
period = i[0]
r = i[1][:10]
dataPeriod = i[1][:10]
testhaoma = i[5][:3]
haoma = i[2] + i[3] + i[4]
a = i[2]
b = i[3]
c = i[4]
ab = i[2] + i[3]
ac = i[2] + i[4]
bc = i[3] + i[4]
insertData = (str(period),str(dataPeriod),str(testhaoma),str(haoma),str(a),str(b),str(c),str(ab),str(ac),str(bc))
sql = "insert into sdhaoma(period,data_period,testhaoma,haoma,a,b,c,ab,ac,bc)values(\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')"% insertData
instance.sqlExecute(sql, "insert")
def getYesterday():
today = datetime.date.today()
oneday = datetime.timedelta(days=1)
yesterday = today - oneday
return yesterday
def getTomorrow():
today = datetime.date.today()
oneday = datetime.timedelta(days=1)
tomorrow = today + oneday
return tomorrow
def sdRun():
instance = GetData('', '')
select_result = instance.sqlExecute("select data_period from sdhaoma order by data_period desc limit 1", "select")
timeArray = time.localtime(int(time.time()))
endTime = time.strftime("%Y-%m-%d",timeArray)
if len(select_result) == 0:
startTime = "2017-01-01"
runSql(startTime, endTime)
elif select_result[0][0] == getYesterday() and int(time.time()) > 79200:
runSql(getTomorrow(), endTime)
elif select_result[0][0] != getYesterday():
startTime = select_result[0][0]
runSql(startTime, endTime)
else:
print('no run_sql')
| #!/usr/bin/env python
# encoding: utf-8
import re
import time
import datetime
from synclottery.requestData import GetData
'''
url: 使用的是360彩票官网接口数据,修改startTime和endTime获取期间数据
sd_re: 获取数据正则表达式
'''
def runSql(start_Time, end_Time):
url = "https://chart.cp.360.cn/kaijiang/sd?lotId=210053&spanType=2&span=" + start_Time + "_" + end_Time
sdRe = re.compile(r'<tr week=.*?<td>(.*?)</td><td>(.*?)</td>.*?<span .*?>(.*?)</span>.*?<span .*?>(.*?)</span>.*?<span .*?>(.*?)</span>.*?<td>(.*?)</td>.*?</tr>')
instance = GetData(url, sdRe)
data = instance.requestData()
for i in reversed(data):
period = i[0]
r = i[1][:10]
dataPeriod = i[1][:10]
testhaoma = i[5][:3]
haoma = i[2] + i[3] + i[4]
a = i[2]
b = i[3]
c = i[4]
ab = i[2] + i[3]
ac = i[2] + i[4]
bc = i[3] + i[4]
insertData = (str(period),str(dataPeriod),str(testhaoma),str(haoma),str(a),str(b),str(c),str(ab),str(ac),str(bc))
sql = "insert into sdhaoma(period,data_period,testhaoma,haoma,a,b,c,ab,ac,bc)values(\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')"% insertData
instance.sqlExecute(sql, "insert")
def getYesterday():
today = datetime.date.today()
oneday = datetime.timedelta(days=1)
yesterday = today - oneday
return yesterday
def getTomorrow():
today = datetime.date.today()
oneday = datetime.timedelta(days=1)
tomorrow = today + oneday
return tomorrow
def sdRun():
instance = GetData('', '')
select_result = instance.sqlExecute("select data_period from sdhaoma order by data_period desc limit 1", "select")
timeArray = time.localtime(int(time.time()))
endTime = time.strftime("%Y-%m-%d",timeArray)
if len(select_result) == 0:
startTime = "2017-01-01"
runSql(startTime, endTime)
elif select_result[0][0] == getYesterday() and int(time.time()) > 79200:
runSql(getTomorrow(), endTime)
elif select_result[0][0] != getYesterday():
startTime = select_result[0][0]
runSql(startTime, endTime)
else:
print('no run_sql')
| zh | 0.765544 | #!/usr/bin/env python # encoding: utf-8 url: 使用的是360彩票官网接口数据,修改startTime和endTime获取期间数据 sd_re: 获取数据正则表达式 | 2.810466 | 3 |
createtest_images.py | mrrocketraccoon/AdvancedLaneDetection | 0 | 6622155 | <gh_stars>0
from CameraCalibration import CameraCalibration
from Thresholds import abs_sobel_thresh, mag_thresh, dir_threshold, color_r_threshold
from SlidingWindows import sliding_windows
from FitPolynomial import fit_polynomial
import matplotlib.image as mpimg
import cv2
import numpy as np
import matplotlib.pyplot as plt
#Calibrate camera
image = mpimg.imread('test_images/test4.jpg')
#img = mpimg.imread('test_images/test4.jpg')
img_size = (image.shape[1], image.shape[0])
calibration = CameraCalibration()
objpoints, imgpoints = calibration.calibrate()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
undst = cv2.undistort(image, mtx, dist, None, mtx)
#Threshold
#Sobel kernel size
ksize = 3
#Apply each of the thresholding functions
gradx = abs_sobel_thresh(undst, orient='x', sobel_kernel=ksize, thresh=(50, 255))
mag_binary = mag_thresh(undst, sobel_kernel=ksize, mag_thresh=(50, 255))
dir_binary = dir_threshold(undst, sobel_kernel=ksize, thresh=(0.7, 1.3))
color_binary = color_r_threshold(undst, thresh=(170, 255))
#Try a combination
combined = np.zeros_like(dir_binary)
combined[(gradx == 1 | ((mag_binary == 1) & (dir_binary == 1))) | color_binary == 1] = 1
#Perform perspective transform from source to bird's eyeview
src = np.float32([[600, 450], [720, 450], [1160, 720], [220, 720]])
dst = np.float32([[300,0], [980,0], [980,720], [300,720]])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(combined, M, (undst.shape[1],undst.shape[0]), flags=cv2.INTER_LINEAR)
#cv2.imshow('test_images/calibrated_image.jpg',warped)
#cv2.waitKey(0)
######The histogram shows that the lanes are located at around x = 400 and x = 1020######
normalized_undst = warped/255
# Take a histogram of the bottom half of the image
histogram = np.sum(normalized_undst[normalized_undst.shape[0]//2:,:], axis=0)
#plt.plot(histogram)
#plt.show()
# Create an output image to draw on and visualize the result
out_img = np.dstack((warped, warped, warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
#Set up windows and window hyperparameters
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
leftx, lefty, rightx, righty = sliding_windows(warped, nwindows, leftx_base, rightx_base, margin, out_img, minpix)
ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/650 # meters per pixel in x dimension
left_fitx, right_fitx, left_fit_cr, right_fit_cr = fit_polynomial(lefty, leftx, righty, rightx, ym_per_pix, xm_per_pix, ploty)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(out_img.shape[0])-1
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
left_curverad = (1+(2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**(3/2)/(2*abs(left_fit_cr[0])) ## Implement the calculation of the left line here
right_curverad = (1+(2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**(3/2)/(2*abs(right_fit_cr[0])) ## Implement the calculation of the right line here
offset = (out_img.shape[1]/2 - (left_fitx[y_eval]+right_fitx[y_eval])/2)*xm_per_pix
print(left_curverad, 'm', right_curverad, 'm', offset, 'm')
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
Minv = cv2.getPerspectiveTransform(dst,src)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (out_img.shape[1], out_img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undst, 1, newwarp, 0.3, 0)
cv2.putText(result,'Curve Radius [m]: '+str((left_curverad+right_curverad)/2)[:7],(40,70), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.6, (0,255,0),2,cv2.LINE_AA)
cv2.putText(result,'Center Offset [m]: '+str(offset)[:7],(40,150), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.6,(0,255,0),2,cv2.LINE_AA)
#plt.imshow(result)
#plt.axis('off')
#plt.show()
#plt.savefig('output_images/result.jpg', bbox_inches='tight', pad_inches=0)
# Plot the result
#f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
#f.tight_layout()
#ax1.imshow(img_mod1)
# Plots the left and right polynomials on the lane lines
#ax1.set_title('Undistorted image with src drawn', fontsize=50)
#ax2.imshow(img_mod2)
#ax2.set_title('Warped result with dst drawn', fontsize=50)
#plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
#plt.savefig('output_images/warped.jpg')
#f.savefig('')
| from CameraCalibration import CameraCalibration
from Thresholds import abs_sobel_thresh, mag_thresh, dir_threshold, color_r_threshold
from SlidingWindows import sliding_windows
from FitPolynomial import fit_polynomial
import matplotlib.image as mpimg
import cv2
import numpy as np
import matplotlib.pyplot as plt
#Calibrate camera
image = mpimg.imread('test_images/test4.jpg')
#img = mpimg.imread('test_images/test4.jpg')
img_size = (image.shape[1], image.shape[0])
calibration = CameraCalibration()
objpoints, imgpoints = calibration.calibrate()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
undst = cv2.undistort(image, mtx, dist, None, mtx)
#Threshold
#Sobel kernel size
ksize = 3
#Apply each of the thresholding functions
gradx = abs_sobel_thresh(undst, orient='x', sobel_kernel=ksize, thresh=(50, 255))
mag_binary = mag_thresh(undst, sobel_kernel=ksize, mag_thresh=(50, 255))
dir_binary = dir_threshold(undst, sobel_kernel=ksize, thresh=(0.7, 1.3))
color_binary = color_r_threshold(undst, thresh=(170, 255))
#Try a combination
combined = np.zeros_like(dir_binary)
combined[(gradx == 1 | ((mag_binary == 1) & (dir_binary == 1))) | color_binary == 1] = 1
#Perform perspective transform from source to bird's eyeview
src = np.float32([[600, 450], [720, 450], [1160, 720], [220, 720]])
dst = np.float32([[300,0], [980,0], [980,720], [300,720]])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(combined, M, (undst.shape[1],undst.shape[0]), flags=cv2.INTER_LINEAR)
#cv2.imshow('test_images/calibrated_image.jpg',warped)
#cv2.waitKey(0)
######The histogram shows that the lanes are located at around x = 400 and x = 1020######
normalized_undst = warped/255
# Take a histogram of the bottom half of the image
histogram = np.sum(normalized_undst[normalized_undst.shape[0]//2:,:], axis=0)
#plt.plot(histogram)
#plt.show()
# Create an output image to draw on and visualize the result
out_img = np.dstack((warped, warped, warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
#Set up windows and window hyperparameters
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
leftx, lefty, rightx, righty = sliding_windows(warped, nwindows, leftx_base, rightx_base, margin, out_img, minpix)
ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/650 # meters per pixel in x dimension
left_fitx, right_fitx, left_fit_cr, right_fit_cr = fit_polynomial(lefty, leftx, righty, rightx, ym_per_pix, xm_per_pix, ploty)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(out_img.shape[0])-1
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
left_curverad = (1+(2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**(3/2)/(2*abs(left_fit_cr[0])) ## Implement the calculation of the left line here
right_curverad = (1+(2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**(3/2)/(2*abs(right_fit_cr[0])) ## Implement the calculation of the right line here
offset = (out_img.shape[1]/2 - (left_fitx[y_eval]+right_fitx[y_eval])/2)*xm_per_pix
print(left_curverad, 'm', right_curverad, 'm', offset, 'm')
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
Minv = cv2.getPerspectiveTransform(dst,src)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (out_img.shape[1], out_img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undst, 1, newwarp, 0.3, 0)
cv2.putText(result,'Curve Radius [m]: '+str((left_curverad+right_curverad)/2)[:7],(40,70), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.6, (0,255,0),2,cv2.LINE_AA)
cv2.putText(result,'Center Offset [m]: '+str(offset)[:7],(40,150), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.6,(0,255,0),2,cv2.LINE_AA)
#plt.imshow(result)
#plt.axis('off')
#plt.show()
#plt.savefig('output_images/result.jpg', bbox_inches='tight', pad_inches=0)
# Plot the result
#f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
#f.tight_layout()
#ax1.imshow(img_mod1)
# Plots the left and right polynomials on the lane lines
#ax1.set_title('Undistorted image with src drawn', fontsize=50)
#ax2.imshow(img_mod2)
#ax2.set_title('Warped result with dst drawn', fontsize=50)
#plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
#plt.savefig('output_images/warped.jpg')
#f.savefig('') | en | 0.595038 | #Calibrate camera #img = mpimg.imread('test_images/test4.jpg') #Threshold #Sobel kernel size #Apply each of the thresholding functions #Try a combination #Perform perspective transform from source to bird's eyeview #cv2.imshow('test_images/calibrated_image.jpg',warped) #cv2.waitKey(0) ######The histogram shows that the lanes are located at around x = 400 and x = 1020###### # Take a histogram of the bottom half of the image #plt.plot(histogram) #plt.show() # Create an output image to draw on and visualize the result # Find the peak of the left and right halves of the histogram # These will be the starting point for the left and right lines #Set up windows and window hyperparameters # HYPERPARAMETERS # Choose the number of sliding windows # Set the width of the windows +/- margin # Set minimum number of pixels found to recenter window # meters per pixel in y dimension # meters per pixel in x dimension # Define y-value where we want radius of curvature # We'll choose the maximum y-value, corresponding to the bottom of the image ##### TO-DO: Implement the calculation of R_curve (radius of curvature) ##### ## Implement the calculation of the left line here ## Implement the calculation of the right line here # Create an image to draw the lines on # Recast the x and y points into usable format for cv2.fillPoly() # Draw the lane onto the warped blank image # Warp the blank back to original image space using inverse perspective matrix (Minv) # Combine the result with the original image #plt.imshow(result) #plt.axis('off') #plt.show() #plt.savefig('output_images/result.jpg', bbox_inches='tight', pad_inches=0) # Plot the result #f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9)) #f.tight_layout() #ax1.imshow(img_mod1) # Plots the left and right polynomials on the lane lines #ax1.set_title('Undistorted image with src drawn', fontsize=50) #ax2.imshow(img_mod2) #ax2.set_title('Warped result with dst drawn', fontsize=50) #plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.) #plt.savefig('output_images/warped.jpg') #f.savefig('') | 2.363641 | 2 |
restore.py | CurryEleison/workdocs-disaster-recovery | 0 | 6622156 | <reponame>CurryEleison/workdocs-disaster-recovery
from argparse import ArgumentParser, ArgumentTypeError
from os.path import isdir
from pathlib import Path
import logging
from workdocs_dr.cli_arguments import clients_from_input, bucket_url_from_input, logging_setup, organization_id_from_input, wdfilter_from_input
from workdocs_dr.directory_restore import DirectoryRestoreRunner
rootlogger = logging.getLogger()
rootlogger.setLevel(logging.INFO)
def main():
parser = ArgumentParser()
parser.add_argument("--profile", help="AWS profile", default=None)
parser.add_argument("--region", help="AWS region", default=None)
parser.add_argument("--user-query", help="Query of user", default=None)
parser.add_argument("--folder", help="Folder(s) to restore", default=None)
parser.add_argument("--organization-id",
help="Workdocs organization id (directory id)", default=None)
parser.add_argument(
"--prefix", help="Prefix for bucket access", default=None)
parser.add_argument("--bucket-name", help="Name of bucket", default=None)
parser.add_argument("--path", type=dir_path, default=Path("."))
parser.add_argument(
"--bucket-role-arn",
help="ARN of role that puts/gets disaster recovery documents", default=None)
parser.add_argument("--verbose", help="Verbose output",
dest="verbose", action="store_true")
args = parser.parse_args()
clients = clients_from_input(profile_name=args.profile, region_name=args.region,
workdocs_role_arn=None, bucket_role_arn=args.bucket_role_arn)
bucket = bucket_url_from_input(args.bucket_name, args.prefix)
filter = wdfilter_from_input(args.user_query, args.folder)
organization_id = organization_id_from_input(args.organization_id)
# Restorer goes here
drr = DirectoryRestoreRunner(
clients,
organization_id,
bucket,
filter,
args.path
)
drr.runall()
logging_setup(rootlogger=rootlogger, verbose=args.verbose)
def dir_path(path):
if isdir(path):
return path
else:
raise ArgumentTypeError(f"readable_dir:{path} is not a valid path")
if __name__ == '__main__':
main()
| from argparse import ArgumentParser, ArgumentTypeError
from os.path import isdir
from pathlib import Path
import logging
from workdocs_dr.cli_arguments import clients_from_input, bucket_url_from_input, logging_setup, organization_id_from_input, wdfilter_from_input
from workdocs_dr.directory_restore import DirectoryRestoreRunner
rootlogger = logging.getLogger()
rootlogger.setLevel(logging.INFO)
def main():
parser = ArgumentParser()
parser.add_argument("--profile", help="AWS profile", default=None)
parser.add_argument("--region", help="AWS region", default=None)
parser.add_argument("--user-query", help="Query of user", default=None)
parser.add_argument("--folder", help="Folder(s) to restore", default=None)
parser.add_argument("--organization-id",
help="Workdocs organization id (directory id)", default=None)
parser.add_argument(
"--prefix", help="Prefix for bucket access", default=None)
parser.add_argument("--bucket-name", help="Name of bucket", default=None)
parser.add_argument("--path", type=dir_path, default=Path("."))
parser.add_argument(
"--bucket-role-arn",
help="ARN of role that puts/gets disaster recovery documents", default=None)
parser.add_argument("--verbose", help="Verbose output",
dest="verbose", action="store_true")
args = parser.parse_args()
clients = clients_from_input(profile_name=args.profile, region_name=args.region,
workdocs_role_arn=None, bucket_role_arn=args.bucket_role_arn)
bucket = bucket_url_from_input(args.bucket_name, args.prefix)
filter = wdfilter_from_input(args.user_query, args.folder)
organization_id = organization_id_from_input(args.organization_id)
# Restorer goes here
drr = DirectoryRestoreRunner(
clients,
organization_id,
bucket,
filter,
args.path
)
drr.runall()
logging_setup(rootlogger=rootlogger, verbose=args.verbose)
def dir_path(path):
if isdir(path):
return path
else:
raise ArgumentTypeError(f"readable_dir:{path} is not a valid path")
if __name__ == '__main__':
main() | en | 0.677008 | # Restorer goes here | 2.380693 | 2 |
lib3to2/tests/test_itertools.py | hajs/lib3to2_fork | 3 | 6622157 | <filename>lib3to2/tests/test_itertools.py
from lib3to2.tests.support import lib3to2FixerTestCase
class Test_itertoools(lib3to2FixerTestCase):
fixer = "itertools"
def test_map(self):
b = """map(a, b)"""
a = """from itertools import imap\nimap(a, b)"""
self.check(b, a)
def test_unchanged_nobuiltin(self):
s = """obj.filter(a, b)"""
self.unchanged(s)
s = """
def map():
pass
"""
self.unchanged(s)
def test_filter(self):
b = "a = filter( a, b)"
a = "from itertools import ifilter\na = ifilter( a, b)"
self.check(b, a)
def test_zip(self):
b = """for key, val in zip(a, b):\n\tdct[key] = val"""
a = """from itertools import izip\nfor key, val in izip(a, b):\n\tdct[key] = val"""
self.check(b, a)
def test_filterfalse(self):
b = """from itertools import function, filterfalse, other_function"""
a = """from itertools import function, ifilterfalse, other_function"""
self.check( b, a)
b = """filterfalse(a, b)"""
a = """ifilterfalse(a, b)"""
self.check(b, a )
| <filename>lib3to2/tests/test_itertools.py
from lib3to2.tests.support import lib3to2FixerTestCase
class Test_itertoools(lib3to2FixerTestCase):
fixer = "itertools"
def test_map(self):
b = """map(a, b)"""
a = """from itertools import imap\nimap(a, b)"""
self.check(b, a)
def test_unchanged_nobuiltin(self):
s = """obj.filter(a, b)"""
self.unchanged(s)
s = """
def map():
pass
"""
self.unchanged(s)
def test_filter(self):
b = "a = filter( a, b)"
a = "from itertools import ifilter\na = ifilter( a, b)"
self.check(b, a)
def test_zip(self):
b = """for key, val in zip(a, b):\n\tdct[key] = val"""
a = """from itertools import izip\nfor key, val in izip(a, b):\n\tdct[key] = val"""
self.check(b, a)
def test_filterfalse(self):
b = """from itertools import function, filterfalse, other_function"""
a = """from itertools import function, ifilterfalse, other_function"""
self.check( b, a)
b = """filterfalse(a, b)"""
a = """ifilterfalse(a, b)"""
self.check(b, a )
| en | 0.408972 | map(a, b) from itertools import imap\nimap(a, b) obj.filter(a, b) def map(): pass for key, val in zip(a, b):\n\tdct[key] = val from itertools import izip\nfor key, val in izip(a, b):\n\tdct[key] = val from itertools import function, filterfalse, other_function from itertools import function, ifilterfalse, other_function filterfalse(a, b) ifilterfalse(a, b) | 2.549675 | 3 |
plugin/utils/include_parser.py | LexouDuck/EasyClangComplete | 648 | 6622158 | """Find all includes."""
import os
import logging
from os import path
import sublime
from ..utils import thread_job
log = logging.getLogger("ECC")
FILE_TAG = "📄 "
FOLDER_TAG = "📂 "
class IncludeCompleter():
"""Handle the include completion in the quick panel."""
MATCHING_CHAR = {
'<': '>',
'"': '"'
}
def __init__(self, view, opening_char, thread_pool):
"""Initialize the object."""
self.view = view
self.opening_char = opening_char
self.thread_pool = thread_pool
self.folders_and_headers = None
self.max_lines_per_item = 1
self.full_include_path = None
def start_completion(self, initial_folders, force_unix_includes=False):
"""Start completing includes."""
job = thread_job.ThreadJob(
name=thread_job.ThreadJob.COMPLETE_INCLUDES_TAG,
function=IncludeCompleter.__get_all_headers,
callback=self.__on_folders_loaded,
args=[initial_folders, force_unix_includes])
self.thread_pool.new_job(job)
def on_include_picked(self, idx):
"""Pick this error to navigate to a file."""
log.debug("Picked index: %s", idx)
if not self.folders_and_headers:
log.debug("No folders to show for includes yet.")
return IncludeCompleter.__commit_include_path(
self.view, self.opening_char)
if idx < 0 or idx >= len(self.folders_and_headers):
return IncludeCompleter.__commit_include_path(
self.view, self.opening_char)
tag, name, paths = self.folders_and_headers[idx]
if not self.full_include_path:
self.full_include_path = ''
self.full_include_path = path.join(self.full_include_path, name)
if tag == FOLDER_TAG:
self.start_completion(paths)
return None
return IncludeCompleter.__commit_include_path(
self.view, self.opening_char, self.full_include_path)
@staticmethod
def __commit_include_path(view, opening_char, contents=None):
if contents:
full_include_str = "{opening_char}{path}{closing_char}".format(
opening_char=opening_char,
path=contents,
closing_char=IncludeCompleter.MATCHING_CHAR[opening_char])
else:
full_include_str = opening_char
view.run_command("insert", {"characters": full_include_str})
def __on_folders_loaded(self, future):
if future.cancelled() or not future.done():
log.debug("Could not load includes -> cancelled")
return
loaded_includes_dict = future.result().items()
self.folders_and_headers = []
if loaded_includes_dict:
self.folders_and_headers = [
[tag, name, list(paths)]
for (tag, name), paths in loaded_includes_dict]
self.max_lines_per_item = max(
[len(paths) for (_, _), paths in loaded_includes_dict])
self.view.window().show_quick_panel(
self.__generate_items_to_show(),
self.on_include_picked,
sublime.MONOSPACE_FONT, 0)
def __generate_items_to_show(self):
if not self.folders_and_headers:
return []
contents = []
for tag, name, paths in self.folders_and_headers:
padding = self.max_lines_per_item - len(paths)
contents.append([tag + name] + paths + [''] * padding)
return contents
@staticmethod
def __get_all_headers(folders, force_unix_includes):
"""Parse all the folders and return all headers."""
def to_platform_specific_paths(folders):
"""We might want to have back slashes intead of slashes."""
for idx, folder in enumerate(folders):
folders[idx] = path.normpath(folder)
return folders
matches = {}
if force_unix_includes:
folders = to_platform_specific_paths(folders)
for folder in folders:
if not path.exists(folder) or not path.isdir(folder):
continue
log.debug("Going through: %s", folder)
for name in os.listdir(folder):
full_path = path.realpath(path.join(folder, name))
if path.isdir(full_path):
key = (FOLDER_TAG, name)
if key not in matches:
matches[key] = set([full_path])
else:
matches[key].add(full_path)
continue
_, ext = path.splitext(name)
if not ext or ext.startswith(".h"):
key = (FILE_TAG, name)
if key not in matches:
matches[key] = set([full_path])
else:
matches[key].add(full_path)
continue
log.debug("Includes completion list size: %s", len(matches))
return matches
| """Find all includes."""
import os
import logging
from os import path
import sublime
from ..utils import thread_job
log = logging.getLogger("ECC")
FILE_TAG = "📄 "
FOLDER_TAG = "📂 "
class IncludeCompleter():
"""Handle the include completion in the quick panel."""
MATCHING_CHAR = {
'<': '>',
'"': '"'
}
def __init__(self, view, opening_char, thread_pool):
"""Initialize the object."""
self.view = view
self.opening_char = opening_char
self.thread_pool = thread_pool
self.folders_and_headers = None
self.max_lines_per_item = 1
self.full_include_path = None
def start_completion(self, initial_folders, force_unix_includes=False):
"""Start completing includes."""
job = thread_job.ThreadJob(
name=thread_job.ThreadJob.COMPLETE_INCLUDES_TAG,
function=IncludeCompleter.__get_all_headers,
callback=self.__on_folders_loaded,
args=[initial_folders, force_unix_includes])
self.thread_pool.new_job(job)
def on_include_picked(self, idx):
"""Pick this error to navigate to a file."""
log.debug("Picked index: %s", idx)
if not self.folders_and_headers:
log.debug("No folders to show for includes yet.")
return IncludeCompleter.__commit_include_path(
self.view, self.opening_char)
if idx < 0 or idx >= len(self.folders_and_headers):
return IncludeCompleter.__commit_include_path(
self.view, self.opening_char)
tag, name, paths = self.folders_and_headers[idx]
if not self.full_include_path:
self.full_include_path = ''
self.full_include_path = path.join(self.full_include_path, name)
if tag == FOLDER_TAG:
self.start_completion(paths)
return None
return IncludeCompleter.__commit_include_path(
self.view, self.opening_char, self.full_include_path)
@staticmethod
def __commit_include_path(view, opening_char, contents=None):
if contents:
full_include_str = "{opening_char}{path}{closing_char}".format(
opening_char=opening_char,
path=contents,
closing_char=IncludeCompleter.MATCHING_CHAR[opening_char])
else:
full_include_str = opening_char
view.run_command("insert", {"characters": full_include_str})
def __on_folders_loaded(self, future):
if future.cancelled() or not future.done():
log.debug("Could not load includes -> cancelled")
return
loaded_includes_dict = future.result().items()
self.folders_and_headers = []
if loaded_includes_dict:
self.folders_and_headers = [
[tag, name, list(paths)]
for (tag, name), paths in loaded_includes_dict]
self.max_lines_per_item = max(
[len(paths) for (_, _), paths in loaded_includes_dict])
self.view.window().show_quick_panel(
self.__generate_items_to_show(),
self.on_include_picked,
sublime.MONOSPACE_FONT, 0)
def __generate_items_to_show(self):
if not self.folders_and_headers:
return []
contents = []
for tag, name, paths in self.folders_and_headers:
padding = self.max_lines_per_item - len(paths)
contents.append([tag + name] + paths + [''] * padding)
return contents
@staticmethod
def __get_all_headers(folders, force_unix_includes):
"""Parse all the folders and return all headers."""
def to_platform_specific_paths(folders):
"""We might want to have back slashes intead of slashes."""
for idx, folder in enumerate(folders):
folders[idx] = path.normpath(folder)
return folders
matches = {}
if force_unix_includes:
folders = to_platform_specific_paths(folders)
for folder in folders:
if not path.exists(folder) or not path.isdir(folder):
continue
log.debug("Going through: %s", folder)
for name in os.listdir(folder):
full_path = path.realpath(path.join(folder, name))
if path.isdir(full_path):
key = (FOLDER_TAG, name)
if key not in matches:
matches[key] = set([full_path])
else:
matches[key].add(full_path)
continue
_, ext = path.splitext(name)
if not ext or ext.startswith(".h"):
key = (FILE_TAG, name)
if key not in matches:
matches[key] = set([full_path])
else:
matches[key].add(full_path)
continue
log.debug("Includes completion list size: %s", len(matches))
return matches
| en | 0.883893 | Find all includes. Handle the include completion in the quick panel. Initialize the object. Start completing includes. Pick this error to navigate to a file. Parse all the folders and return all headers. We might want to have back slashes intead of slashes. | 2.790229 | 3 |
train.py | xinyuan-liu/NL2PL | 0 | 6622159 | <reponame>xinyuan-liu/NL2PL<filename>train.py
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import dataset
from transformer import *
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
hs=dataset.hearthstone()
X_train,Y_train=hs.dataset('train')
trainloader = DataLoader(TensorDataset(torch.from_numpy(X_train),torch.from_numpy(Y_train)), batch_size=32,shuffle=True, num_workers=12)
X_dev,Y_dev=hs.dataset('dev')
devloader = DataLoader(TensorDataset(torch.from_numpy(X_dev),torch.from_numpy(Y_dev)), batch_size=32,shuffle=True, num_workers=12)
# encoder
input_dim = len(hs.NL_voc)
hid_dim = 128 * 3
n_layers = 6
n_heads = 8
pf_dim = 2048
dropout = 0.1
enc = Encoder(input_dim, hid_dim, n_layers, n_heads, pf_dim, EncoderLayer, SelfAttention, PositionwiseFeedforward, dropout, device)
# decoder
output_dim = len(hs.PL_voc)
hid_dim = 128 * 3
n_layers = 6
n_heads = 8
pf_dim = 2048
dropout = 0.1
dec = Decoder(output_dim, hid_dim, n_layers, n_heads, pf_dim, DecoderLayer, SelfAttention, PositionwiseFeedforward, dropout, device)
pad_idx = hs.NL_voc[dataset.PAD]
model = Seq2Seq(enc, dec, pad_idx, device)
#model = torch.nn.DataParallel(model)
model.to(device)
print('The model has %d trainable parameters'%sum(p.numel() for p in model.parameters() if p.requires_grad))
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
optimizer = NoamOpt(hid_dim, 1, 2000, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
steps=len(iterator)
for i, sample_batched in enumerate(iterator):
src,trg=sample_batched
src,trg=src.to(device),trg.to(device)
optimizer.optimizer.zero_grad()
parent,name,trg = trg.split(1, 1)
parent.squeeze_()
name.squeeze_()
trg.squeeze_()
output = model(src, parent[:,:-1], name[:,:-1], trg[:,:-1])
output = output.contiguous().view(-1, output.shape[-1])
trg = trg[:,1:].contiguous().view(-1)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return (epoch_loss / steps)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src,trg=batch
src,trg=src.to(device),trg.to(device)
parent,name,trg = trg.split(1, 1)
parent.squeeze_()
name.squeeze_()
trg.squeeze_()
output = model(src, parent[:,:-1], name[:,:-1], trg[:,:-1])
output = output.contiguous().view(-1, output.shape[-1])
trg = trg[:,1:].contiguous().view(-1)
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def decode(model, src):
model.eval()
with torch.no_grad():
src=torch.from_numpy(src).to(device)
parent0=[hs.PL_voc['root']]
name0=[hs.PL_voc['root']]
trg0=[hs.PL_voc[dataset.SOS]]
output = model(src, )
clip=1
num_epochs=50
best=1000
for epoch in range(num_epochs):
train_loss = train(model, trainloader, optimizer, criterion, clip)
valid_loss = evaluate(model, devloader, criterion)
print("epoch:%s train_loss:%s valid_loss:%s"%(epoch,train_loss,valid_loss))
if valid_loss<best:
best=valid_loss
torch.save(model,'model.weights')
| import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import dataset
from transformer import *
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
hs=dataset.hearthstone()
X_train,Y_train=hs.dataset('train')
trainloader = DataLoader(TensorDataset(torch.from_numpy(X_train),torch.from_numpy(Y_train)), batch_size=32,shuffle=True, num_workers=12)
X_dev,Y_dev=hs.dataset('dev')
devloader = DataLoader(TensorDataset(torch.from_numpy(X_dev),torch.from_numpy(Y_dev)), batch_size=32,shuffle=True, num_workers=12)
# encoder
input_dim = len(hs.NL_voc)
hid_dim = 128 * 3
n_layers = 6
n_heads = 8
pf_dim = 2048
dropout = 0.1
enc = Encoder(input_dim, hid_dim, n_layers, n_heads, pf_dim, EncoderLayer, SelfAttention, PositionwiseFeedforward, dropout, device)
# decoder
output_dim = len(hs.PL_voc)
hid_dim = 128 * 3
n_layers = 6
n_heads = 8
pf_dim = 2048
dropout = 0.1
dec = Decoder(output_dim, hid_dim, n_layers, n_heads, pf_dim, DecoderLayer, SelfAttention, PositionwiseFeedforward, dropout, device)
pad_idx = hs.NL_voc[dataset.PAD]
model = Seq2Seq(enc, dec, pad_idx, device)
#model = torch.nn.DataParallel(model)
model.to(device)
print('The model has %d trainable parameters'%sum(p.numel() for p in model.parameters() if p.requires_grad))
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
optimizer = NoamOpt(hid_dim, 1, 2000, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
steps=len(iterator)
for i, sample_batched in enumerate(iterator):
src,trg=sample_batched
src,trg=src.to(device),trg.to(device)
optimizer.optimizer.zero_grad()
parent,name,trg = trg.split(1, 1)
parent.squeeze_()
name.squeeze_()
trg.squeeze_()
output = model(src, parent[:,:-1], name[:,:-1], trg[:,:-1])
output = output.contiguous().view(-1, output.shape[-1])
trg = trg[:,1:].contiguous().view(-1)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return (epoch_loss / steps)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src,trg=batch
src,trg=src.to(device),trg.to(device)
parent,name,trg = trg.split(1, 1)
parent.squeeze_()
name.squeeze_()
trg.squeeze_()
output = model(src, parent[:,:-1], name[:,:-1], trg[:,:-1])
output = output.contiguous().view(-1, output.shape[-1])
trg = trg[:,1:].contiguous().view(-1)
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def decode(model, src):
model.eval()
with torch.no_grad():
src=torch.from_numpy(src).to(device)
parent0=[hs.PL_voc['root']]
name0=[hs.PL_voc['root']]
trg0=[hs.PL_voc[dataset.SOS]]
output = model(src, )
clip=1
num_epochs=50
best=1000
for epoch in range(num_epochs):
train_loss = train(model, trainloader, optimizer, criterion, clip)
valid_loss = evaluate(model, devloader, criterion)
print("epoch:%s train_loss:%s valid_loss:%s"%(epoch,train_loss,valid_loss))
if valid_loss<best:
best=valid_loss
torch.save(model,'model.weights') | en | 0.379099 | # encoder # decoder #model = torch.nn.DataParallel(model) | 2.339157 | 2 |
diagrams/firebase/extentions.py | bry-c/diagrams | 17,037 | 6622160 | <filename>diagrams/firebase/extentions.py<gh_stars>1000+
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Firebase
class _Extentions(_Firebase):
_type = "extentions"
_icon_dir = "resources/firebase/extentions"
class Extensions(_Extentions):
_icon = "extensions.png"
# Aliases
| <filename>diagrams/firebase/extentions.py<gh_stars>1000+
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Firebase
class _Extentions(_Firebase):
_type = "extentions"
_icon_dir = "resources/firebase/extentions"
class Extensions(_Extentions):
_icon = "extensions.png"
# Aliases
| en | 0.645514 | # This module is automatically generated by autogen.sh. DO NOT EDIT. # Aliases | 1.288313 | 1 |
pipeline/0x00-pandas/3-rename.py | Naouali/holbertonschool-machine_learning | 0 | 6622161 | #!/usr/bin/env python3
import pandas as pd
from_file = __import__('2-from_file').from_file
df = from_file('coinbaseUSD_1-min_data_2014-12-01_to_2019-01-09.csv', ',')
# YOUR CODE HERE
df.rename(columns={"Timestamp": "Datetime"}, inplace=True)
df['Datetime'] = pd.to_datetime(df['Datetime'])
df = df[['Datetime', 'Close']]
print(df.tail())
| #!/usr/bin/env python3
import pandas as pd
from_file = __import__('2-from_file').from_file
df = from_file('coinbaseUSD_1-min_data_2014-12-01_to_2019-01-09.csv', ',')
# YOUR CODE HERE
df.rename(columns={"Timestamp": "Datetime"}, inplace=True)
df['Datetime'] = pd.to_datetime(df['Datetime'])
df = df[['Datetime', 'Close']]
print(df.tail())
| fr | 0.272059 | #!/usr/bin/env python3 # YOUR CODE HERE | 3.214015 | 3 |
robotics/openrave/fixed_tamp_holding.py | nishadg246/stripstream-ivan-nishad | 0 | 6622162 | <gh_stars>0
from manipulation.motion.single_query import cspace_traj_helper, vector_traj_helper
from stripstream.pddl.examples.openrave.utils import solve_inverse_kinematics, \
object_trans_from_manip_trans, set_manipulator_conf, Conf, \
sample_manipulator_trajectory
from manipulation.bodies.robot import manip_from_pose_grasp
from manipulation.primitives.transforms import set_pose
DISABLE_TRAJECTORIES = True
DISABLE_TRAJ_COLLISIONS = True
assert not DISABLE_TRAJECTORIES or DISABLE_TRAJ_COLLISIONS
if DISABLE_TRAJECTORIES:
print 'Warning: trajectories are disabled'
if DISABLE_TRAJ_COLLISIONS:
print 'Warning: trajectory collisions are disabled'
def enable_all(all_bodies, enable): # Enables or disables all bodies for collision checking
for body in all_bodies:
body.Enable(enable)
####################
def cfree_pose_fn(env, body1, body2):
def cfree_pose(pose1, pose2): # Collision free test between an object at pose1 and an object at pose2
body1.Enable(True)
set_pose(body1, pose1.value)
body2.Enable(True)
set_pose(body2, pose2.value)
return not env.CheckCollision(body1, body2)
return cfree_pose
####################
def cfree_traj_fn(env, robot, manipulator, body1, body2, all_bodies):
def _cfree_traj_pose(traj, pose): # Collision free test between a robot executing traj and an object at pose
enable_all(all_bodies, False)
body2.Enable(True)
set_pose(body2, pose.value)
for conf in traj.path():
set_manipulator_conf(manipulator, conf)
if env.CheckCollision(robot, body2):
return False
return True
def _cfree_traj_grasp_pose(traj, grasp, pose): # Collision free test between an object held at grasp while executing traj and an object at pose
enable_all(all_bodies, False)
body1.Enable(True)
body2.Enable(True)
set_pose(body2, pose.value)
for conf in traj.path():
set_manipulator_conf(manipulator, conf)
manip_trans = manipulator.GetTransform()
set_pose(body1, object_trans_from_manip_trans(manip_trans, grasp.grasp_trans))
if env.CheckCollision(body1, body2):
return False
return True
def cfree_traj(traj, pose): # Collision free test between a robot executing traj (which may or may not involve a grasp) and an object at pose
if DISABLE_TRAJ_COLLISIONS:
return True
return _cfree_traj_pose(traj, pose) and (traj.grasp is None or
_cfree_traj_grasp_pose(traj, traj.grasp, pose))
return cfree_traj
####################
def sample_grasp_traj_fn(env, robot, manipulator, body1, all_bodies):
def sample_grasp_traj(pose, grasp): # Sample pregrasp config and motion plan that performs a grasp
enable_all(all_bodies, False)
body1.Enable(True)
set_pose(body1, pose.value)
manip_trans, approach_vector = manip_from_pose_grasp(pose, grasp)
grasp_conf = solve_inverse_kinematics(env, manipulator, manip_trans) # Grasp configuration
if grasp_conf is None: return
if DISABLE_TRAJECTORIES:
yield [(Conf(grasp_conf), object())]
return
set_manipulator_conf(manipulator, grasp_conf)
robot.Grab(body1)
grasp_traj = vector_traj_helper(env, robot, approach_vector) # Trajectory from grasp configuration to pregrasp
#grasp_traj = workspace_traj_helper(base_manip, approach_vector)
robot.Release(body1)
if grasp_traj is None: return
grasp_traj.grasp = grasp
pregrasp_conf = Conf(grasp_traj.end()) # Pregrasp configuration
yield [(pregrasp_conf, grasp_traj)]
return sample_grasp_traj
####################
def sample_free_motion_fn(manipulator, base_manip, cspace, all_bodies):
def sample_free_motion(conf1, conf2): # Sample motion while not holding
if DISABLE_TRAJECTORIES:
yield [(object(),)] # [(True,)]
return
enable_all(all_bodies, False)
set_manipulator_conf(manipulator, conf1.value)
#traj = motion_plan(env, cspace, conf2.value, self_collisions=True)
traj = cspace_traj_helper(base_manip, cspace, conf2.value, max_iterations=10)
if not traj: return
traj.grasp = None
yield [(traj,)]
return sample_free_motion
####################
def sample_holding_motion_fn(robot, manipulator, base_manip, cspace, body1, all_bodies):
def sample_holding_motion(conf1, conf2, grasp): # Sample motion while holding
if DISABLE_TRAJECTORIES:
yield [(object(),)] # [(True,)]
return
enable_all(all_bodies, False)
body1.Enable(True)
set_manipulator_conf(manipulator, conf1.value)
manip_trans = manipulator.GetTransform()
set_pose(body1, object_trans_from_manip_trans(manip_trans, grasp.grasp_trans))
robot.Grab(body1)
#traj = motion_plan(env, cspace, conf2.value, self_collisions=True)
traj = cspace_traj_helper(base_manip, cspace, conf2.value, max_iterations=10)
robot.Release(body1)
if not traj: return
traj.grasp = grasp
yield [(traj,)]
return sample_holding_motion
####################
def visualize_solution(env, problem, initial_conf, robot, manipulator, bodies, plan):
def execute_traj(traj):
#for j, conf in enumerate(traj.path()):
#for j, conf in enumerate([traj.end()]):
path = list(sample_manipulator_trajectory(manipulator, traj.traj()))
for j, conf in enumerate(path):
set_manipulator_conf(manipulator, conf)
raw_input('%s/%s) Step?'%(j, len(path)))
# Resets the initial state
set_manipulator_conf(manipulator, initial_conf.value)
for obj, pose in problem.initial_poses.iteritems():
set_pose(bodies[obj], pose.value)
for i, (action, args) in enumerate(plan):
raw_input('\n%s/%s) Next?'%(i, len(plan)))
if action.name == 'move':
_, _, traj = args
execute_traj(traj)
elif action.name == 'move_holding':
_, _, traj, _, _ = args
execute_traj(traj)
elif action.name == 'pick':
obj, _, _, _, traj = args
execute_traj(traj.reverse())
robot.Grab(bodies[obj])
execute_traj(traj)
elif action.name == 'place':
obj, _, _, _, traj = args
execute_traj(traj.reverse())
robot.Release(bodies[obj])
execute_traj(traj)
else:
raise ValueError(action.name)
env.UpdatePublishedBodies()
| from manipulation.motion.single_query import cspace_traj_helper, vector_traj_helper
from stripstream.pddl.examples.openrave.utils import solve_inverse_kinematics, \
object_trans_from_manip_trans, set_manipulator_conf, Conf, \
sample_manipulator_trajectory
from manipulation.bodies.robot import manip_from_pose_grasp
from manipulation.primitives.transforms import set_pose
DISABLE_TRAJECTORIES = True
DISABLE_TRAJ_COLLISIONS = True
assert not DISABLE_TRAJECTORIES or DISABLE_TRAJ_COLLISIONS
if DISABLE_TRAJECTORIES:
print 'Warning: trajectories are disabled'
if DISABLE_TRAJ_COLLISIONS:
print 'Warning: trajectory collisions are disabled'
def enable_all(all_bodies, enable): # Enables or disables all bodies for collision checking
for body in all_bodies:
body.Enable(enable)
####################
def cfree_pose_fn(env, body1, body2):
def cfree_pose(pose1, pose2): # Collision free test between an object at pose1 and an object at pose2
body1.Enable(True)
set_pose(body1, pose1.value)
body2.Enable(True)
set_pose(body2, pose2.value)
return not env.CheckCollision(body1, body2)
return cfree_pose
####################
def cfree_traj_fn(env, robot, manipulator, body1, body2, all_bodies):
def _cfree_traj_pose(traj, pose): # Collision free test between a robot executing traj and an object at pose
enable_all(all_bodies, False)
body2.Enable(True)
set_pose(body2, pose.value)
for conf in traj.path():
set_manipulator_conf(manipulator, conf)
if env.CheckCollision(robot, body2):
return False
return True
def _cfree_traj_grasp_pose(traj, grasp, pose): # Collision free test between an object held at grasp while executing traj and an object at pose
enable_all(all_bodies, False)
body1.Enable(True)
body2.Enable(True)
set_pose(body2, pose.value)
for conf in traj.path():
set_manipulator_conf(manipulator, conf)
manip_trans = manipulator.GetTransform()
set_pose(body1, object_trans_from_manip_trans(manip_trans, grasp.grasp_trans))
if env.CheckCollision(body1, body2):
return False
return True
def cfree_traj(traj, pose): # Collision free test between a robot executing traj (which may or may not involve a grasp) and an object at pose
if DISABLE_TRAJ_COLLISIONS:
return True
return _cfree_traj_pose(traj, pose) and (traj.grasp is None or
_cfree_traj_grasp_pose(traj, traj.grasp, pose))
return cfree_traj
####################
def sample_grasp_traj_fn(env, robot, manipulator, body1, all_bodies):
def sample_grasp_traj(pose, grasp): # Sample pregrasp config and motion plan that performs a grasp
enable_all(all_bodies, False)
body1.Enable(True)
set_pose(body1, pose.value)
manip_trans, approach_vector = manip_from_pose_grasp(pose, grasp)
grasp_conf = solve_inverse_kinematics(env, manipulator, manip_trans) # Grasp configuration
if grasp_conf is None: return
if DISABLE_TRAJECTORIES:
yield [(Conf(grasp_conf), object())]
return
set_manipulator_conf(manipulator, grasp_conf)
robot.Grab(body1)
grasp_traj = vector_traj_helper(env, robot, approach_vector) # Trajectory from grasp configuration to pregrasp
#grasp_traj = workspace_traj_helper(base_manip, approach_vector)
robot.Release(body1)
if grasp_traj is None: return
grasp_traj.grasp = grasp
pregrasp_conf = Conf(grasp_traj.end()) # Pregrasp configuration
yield [(pregrasp_conf, grasp_traj)]
return sample_grasp_traj
####################
def sample_free_motion_fn(manipulator, base_manip, cspace, all_bodies):
def sample_free_motion(conf1, conf2): # Sample motion while not holding
if DISABLE_TRAJECTORIES:
yield [(object(),)] # [(True,)]
return
enable_all(all_bodies, False)
set_manipulator_conf(manipulator, conf1.value)
#traj = motion_plan(env, cspace, conf2.value, self_collisions=True)
traj = cspace_traj_helper(base_manip, cspace, conf2.value, max_iterations=10)
if not traj: return
traj.grasp = None
yield [(traj,)]
return sample_free_motion
####################
def sample_holding_motion_fn(robot, manipulator, base_manip, cspace, body1, all_bodies):
def sample_holding_motion(conf1, conf2, grasp): # Sample motion while holding
if DISABLE_TRAJECTORIES:
yield [(object(),)] # [(True,)]
return
enable_all(all_bodies, False)
body1.Enable(True)
set_manipulator_conf(manipulator, conf1.value)
manip_trans = manipulator.GetTransform()
set_pose(body1, object_trans_from_manip_trans(manip_trans, grasp.grasp_trans))
robot.Grab(body1)
#traj = motion_plan(env, cspace, conf2.value, self_collisions=True)
traj = cspace_traj_helper(base_manip, cspace, conf2.value, max_iterations=10)
robot.Release(body1)
if not traj: return
traj.grasp = grasp
yield [(traj,)]
return sample_holding_motion
####################
def visualize_solution(env, problem, initial_conf, robot, manipulator, bodies, plan):
def execute_traj(traj):
#for j, conf in enumerate(traj.path()):
#for j, conf in enumerate([traj.end()]):
path = list(sample_manipulator_trajectory(manipulator, traj.traj()))
for j, conf in enumerate(path):
set_manipulator_conf(manipulator, conf)
raw_input('%s/%s) Step?'%(j, len(path)))
# Resets the initial state
set_manipulator_conf(manipulator, initial_conf.value)
for obj, pose in problem.initial_poses.iteritems():
set_pose(bodies[obj], pose.value)
for i, (action, args) in enumerate(plan):
raw_input('\n%s/%s) Next?'%(i, len(plan)))
if action.name == 'move':
_, _, traj = args
execute_traj(traj)
elif action.name == 'move_holding':
_, _, traj, _, _ = args
execute_traj(traj)
elif action.name == 'pick':
obj, _, _, _, traj = args
execute_traj(traj.reverse())
robot.Grab(bodies[obj])
execute_traj(traj)
elif action.name == 'place':
obj, _, _, _, traj = args
execute_traj(traj.reverse())
robot.Release(bodies[obj])
execute_traj(traj)
else:
raise ValueError(action.name)
env.UpdatePublishedBodies() | en | 0.667034 | # Enables or disables all bodies for collision checking #################### # Collision free test between an object at pose1 and an object at pose2 #################### # Collision free test between a robot executing traj and an object at pose # Collision free test between an object held at grasp while executing traj and an object at pose # Collision free test between a robot executing traj (which may or may not involve a grasp) and an object at pose #################### # Sample pregrasp config and motion plan that performs a grasp # Grasp configuration # Trajectory from grasp configuration to pregrasp #grasp_traj = workspace_traj_helper(base_manip, approach_vector) # Pregrasp configuration #################### # Sample motion while not holding # [(True,)] #traj = motion_plan(env, cspace, conf2.value, self_collisions=True) #################### # Sample motion while holding # [(True,)] #traj = motion_plan(env, cspace, conf2.value, self_collisions=True) #################### #for j, conf in enumerate(traj.path()): #for j, conf in enumerate([traj.end()]): # Resets the initial state | 2.454939 | 2 |
standaloneBeta/DIGFL_vfl/DIG-FL_LinR.py | qmkakaxi/DIG_FL | 1 | 6622163 | import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import pickle
class LinearRegression_DIGFL():
def __init__(self, n_iterations=3000, learning_rate=0.00001, num_participant=1, gradient=True):
self.n_iterations = n_iterations
self.learning_rate = learning_rate
self.gradient = gradient
self.num_participant = num_participant
def initialize_weights(self, X):
n_features = 0
for i in range(self.num_participant):
_,temp = X[i].shape
n_features = n_features +temp
self.w = []
for i in range(self.num_participant):
_,n_feature = X[i].shape
w = np.random.uniform(0, 0, (n_feature, 1))
self.w.append(w)
def calculate_contribution(self,X,X_test,y,y_test):
if self.num_participant >1 :
X_c = np.concatenate(X,axis=1)
X_test_c = np.concatenate(X_test,axis=1)
t = [ self.w[i] for i in range(self.num_participant)]
w = np.concatenate(t,axis=0)
else:
X_c = X[0]
X_test_c = X_test[0]
w = self.w[0]
y_test = np.reshape(y_test, (len(y_test), 1))
y_pred = np.zeros(y_test.shape)
for j in range(self.num_participant):
y_pred = y_pred+X_test[j].dot(self.w[j])
z = y_pred - y_test
grad = []
for j in range(self.num_participant):
temp = z.T.dot(X_test[j])
grad.append(temp)
loss_test = np.mean(0.5*(z)**2)
y_pred = X_c.dot(w)
z = y_pred - y
contribution_epoch = []
for j in range(len(grad)):
c_temp = z.T.dot(X[j])*grad[j]
contribution_epoch.append(sum(sum(c_temp),0))
print(contribution_epoch)
contribution_epoch_normalization = contribution_epoch
contribution_epoch_normalization_sum = sum(contribution_epoch_normalization)
if contribution_epoch_normalization != 0 :
for i in range(self.num_participant):
contribution_epoch_normalization[i] = contribution_epoch_normalization[i]/contribution_epoch_normalization_sum
print("contribution_epoch:",contribution_epoch_normalization)
return contribution_epoch, loss_test
def fit(self, X, y, X_test, y_test):
m_samples = len(y)
print(m_samples)
self.initialize_weights(X)
y = np.reshape(y, (m_samples, 1))
self.training_errors = []
contribution_epoch = []
loss_test = []
if self.gradient == True:
for i in range(self.n_iterations):
y_pred = np.zeros(y.shape)
for j in range(self.num_participant):
y_pred = y_pred+X[j].dot(self.w[j])
loss = np.mean(0.5 * (y_pred -y )** 2)
z = y_pred - y
print("iteration: ",i," train loss :",loss)
self.training_errors.append(loss)
w ,loss_test_ = self.calculate_contribution(X,X_test,y,y_test)
contribution_epoch.append(w)
loss_test.append(loss_test_)
for j in range(self.num_participant):
w_grad = X[j].T.dot(z)
self.w[j] = self.w[j] - self.learning_rate * w_grad
#save
f1 = open(r"data/LinR/house/contribution_epoch.pickle",'wb')
pickle.dump(contribution_epoch,f1)
f1.close()
f2 = open(r"data/LinR/house/loss_test.pickle",'wb')
pickle.dump(loss_test,f2)
f2.close()
else:
X = np.matrix(X)
y = np.matrix(y)
X_T_X = X.T.dot(X)
X_T_X_I_X_T = X_T_X.I.dot(X.T)
X_T_X_I_X_T_X_T_y = X_T_X_I_X_T.dot(y)
self.w = X_T_X_I_X_T_X_T_y
def main():
house = pd.read_csv('data/LinR/house/house_data.csv')
house = house.drop(index=[0])
num_participant = 8
data = house.iloc[:,:-1]
target = house["Price"]
data = preprocessing.scale(data)
target = preprocessing.scale(target)
target = np.array(target)
X_train,X_test, y_train, y_test = train_test_split(data,target,test_size=0.1, random_state=0)
X_train = np.split(np.array(X_train),num_participant, axis=1)
y_train = y_train
X_test= np.split(np.array(X_test),num_participant, axis=1)
y_test = y_test
model = LinearRegression_DIGFL(n_iterations=200, num_participant=num_participant)
model.fit(X_train, y_train, X_test, y_test)
if __name__ == "__main__":
main()
| import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import pickle
class LinearRegression_DIGFL():
def __init__(self, n_iterations=3000, learning_rate=0.00001, num_participant=1, gradient=True):
self.n_iterations = n_iterations
self.learning_rate = learning_rate
self.gradient = gradient
self.num_participant = num_participant
def initialize_weights(self, X):
n_features = 0
for i in range(self.num_participant):
_,temp = X[i].shape
n_features = n_features +temp
self.w = []
for i in range(self.num_participant):
_,n_feature = X[i].shape
w = np.random.uniform(0, 0, (n_feature, 1))
self.w.append(w)
def calculate_contribution(self,X,X_test,y,y_test):
if self.num_participant >1 :
X_c = np.concatenate(X,axis=1)
X_test_c = np.concatenate(X_test,axis=1)
t = [ self.w[i] for i in range(self.num_participant)]
w = np.concatenate(t,axis=0)
else:
X_c = X[0]
X_test_c = X_test[0]
w = self.w[0]
y_test = np.reshape(y_test, (len(y_test), 1))
y_pred = np.zeros(y_test.shape)
for j in range(self.num_participant):
y_pred = y_pred+X_test[j].dot(self.w[j])
z = y_pred - y_test
grad = []
for j in range(self.num_participant):
temp = z.T.dot(X_test[j])
grad.append(temp)
loss_test = np.mean(0.5*(z)**2)
y_pred = X_c.dot(w)
z = y_pred - y
contribution_epoch = []
for j in range(len(grad)):
c_temp = z.T.dot(X[j])*grad[j]
contribution_epoch.append(sum(sum(c_temp),0))
print(contribution_epoch)
contribution_epoch_normalization = contribution_epoch
contribution_epoch_normalization_sum = sum(contribution_epoch_normalization)
if contribution_epoch_normalization != 0 :
for i in range(self.num_participant):
contribution_epoch_normalization[i] = contribution_epoch_normalization[i]/contribution_epoch_normalization_sum
print("contribution_epoch:",contribution_epoch_normalization)
return contribution_epoch, loss_test
def fit(self, X, y, X_test, y_test):
m_samples = len(y)
print(m_samples)
self.initialize_weights(X)
y = np.reshape(y, (m_samples, 1))
self.training_errors = []
contribution_epoch = []
loss_test = []
if self.gradient == True:
for i in range(self.n_iterations):
y_pred = np.zeros(y.shape)
for j in range(self.num_participant):
y_pred = y_pred+X[j].dot(self.w[j])
loss = np.mean(0.5 * (y_pred -y )** 2)
z = y_pred - y
print("iteration: ",i," train loss :",loss)
self.training_errors.append(loss)
w ,loss_test_ = self.calculate_contribution(X,X_test,y,y_test)
contribution_epoch.append(w)
loss_test.append(loss_test_)
for j in range(self.num_participant):
w_grad = X[j].T.dot(z)
self.w[j] = self.w[j] - self.learning_rate * w_grad
#save
f1 = open(r"data/LinR/house/contribution_epoch.pickle",'wb')
pickle.dump(contribution_epoch,f1)
f1.close()
f2 = open(r"data/LinR/house/loss_test.pickle",'wb')
pickle.dump(loss_test,f2)
f2.close()
else:
X = np.matrix(X)
y = np.matrix(y)
X_T_X = X.T.dot(X)
X_T_X_I_X_T = X_T_X.I.dot(X.T)
X_T_X_I_X_T_X_T_y = X_T_X_I_X_T.dot(y)
self.w = X_T_X_I_X_T_X_T_y
def main():
house = pd.read_csv('data/LinR/house/house_data.csv')
house = house.drop(index=[0])
num_participant = 8
data = house.iloc[:,:-1]
target = house["Price"]
data = preprocessing.scale(data)
target = preprocessing.scale(target)
target = np.array(target)
X_train,X_test, y_train, y_test = train_test_split(data,target,test_size=0.1, random_state=0)
X_train = np.split(np.array(X_train),num_participant, axis=1)
y_train = y_train
X_test= np.split(np.array(X_test),num_participant, axis=1)
y_test = y_test
model = LinearRegression_DIGFL(n_iterations=200, num_participant=num_participant)
model.fit(X_train, y_train, X_test, y_test)
if __name__ == "__main__":
main()
| none | 1 | 2.800606 | 3 | |
src/onegov/onboarding/models/assistant.py | politbuero-kampagnen/onegov-cloud | 0 | 6622164 | <gh_stars>0
import inspect
import time
class Assistant(object):
""" Describes an assistant guiding a user through onboarding. """
def __init__(self, app, current_step_number=1):
self.app = app
methods = (fn[1] for fn in inspect.getmembers(self))
methods = (fn for fn in methods if inspect.ismethod(fn))
methods = (fn for fn in methods if hasattr(fn, 'is_step'))
self.steps = [Step(fn, fn.order, fn.form) for fn in methods]
self.steps.sort()
if current_step_number < 1:
raise KeyError("Invalid current step")
if current_step_number > len(self.steps):
raise KeyError("Invalid current step")
self.current_step_number = current_step_number
@property
def current_step(self):
return self.steps[self.current_step_number - 1]
@property
def progress(self):
return self.current_step_number, len(self.steps)
@property
def is_first_step(self):
return self.current_step_number == 1
@property
def is_last_step(self):
return self.current_step_number == len(self.steps)
def for_next_step(self):
assert not self.is_last_step
return self.__class__(self.app, self.current_step_number + 1)
def for_prev_step(self):
assert not self.is_first_step
return self.__class__(self.app, self.current_step_number - 1)
def for_first_step(self):
return self.__class__(self.app, 1)
@classmethod
def step(cls, form=None):
def decorator(fn):
fn.is_step = True
fn.order = time.process_time()
fn.form = form
return fn
return decorator
class Step(object):
""" Describes a step in an assistant. """
def __init__(self, view_handler, order, form):
self.view_handler = view_handler
self.order = order
self.form = form
def __lt__(self, other):
return self.order < other.order
def handle_view(self, request, form):
if form is None:
return self.view_handler(request)
else:
return self.view_handler(request, form)
class DefaultAssistant(object):
def __init__(self, assistant):
self.assistant = assistant
| import inspect
import time
class Assistant(object):
""" Describes an assistant guiding a user through onboarding. """
def __init__(self, app, current_step_number=1):
self.app = app
methods = (fn[1] for fn in inspect.getmembers(self))
methods = (fn for fn in methods if inspect.ismethod(fn))
methods = (fn for fn in methods if hasattr(fn, 'is_step'))
self.steps = [Step(fn, fn.order, fn.form) for fn in methods]
self.steps.sort()
if current_step_number < 1:
raise KeyError("Invalid current step")
if current_step_number > len(self.steps):
raise KeyError("Invalid current step")
self.current_step_number = current_step_number
@property
def current_step(self):
return self.steps[self.current_step_number - 1]
@property
def progress(self):
return self.current_step_number, len(self.steps)
@property
def is_first_step(self):
return self.current_step_number == 1
@property
def is_last_step(self):
return self.current_step_number == len(self.steps)
def for_next_step(self):
assert not self.is_last_step
return self.__class__(self.app, self.current_step_number + 1)
def for_prev_step(self):
assert not self.is_first_step
return self.__class__(self.app, self.current_step_number - 1)
def for_first_step(self):
return self.__class__(self.app, 1)
@classmethod
def step(cls, form=None):
def decorator(fn):
fn.is_step = True
fn.order = time.process_time()
fn.form = form
return fn
return decorator
class Step(object):
""" Describes a step in an assistant. """
def __init__(self, view_handler, order, form):
self.view_handler = view_handler
self.order = order
self.form = form
def __lt__(self, other):
return self.order < other.order
def handle_view(self, request, form):
if form is None:
return self.view_handler(request)
else:
return self.view_handler(request, form)
class DefaultAssistant(object):
def __init__(self, assistant):
self.assistant = assistant | en | 0.867129 | Describes an assistant guiding a user through onboarding. Describes a step in an assistant. | 2.889534 | 3 |
libs/jinja/template_var.py | janbodnar/Python-Course | 13 | 6622165 | #!/usr/bin/python
from jinja2 import Template
tm = Template("{% set name='Peter' -%} My name is {{ name }}")
msg = tm.render()
print(msg)
| #!/usr/bin/python
from jinja2 import Template
tm = Template("{% set name='Peter' -%} My name is {{ name }}")
msg = tm.render()
print(msg)
| ru | 0.258958 | #!/usr/bin/python | 2.543889 | 3 |
stairs/solutions/stairs_ns_ok1.py | upmltech/hmopen2019 | 0 | 6622166 | n, x = map(int, input().split())
a = list(map(int, input().split()))
ans = 0
for i in range(n - 1):
ans += (a[i + 1] - a[i] - 1) // x
print(ans)
| n, x = map(int, input().split())
a = list(map(int, input().split()))
ans = 0
for i in range(n - 1):
ans += (a[i + 1] - a[i] - 1) // x
print(ans)
| none | 1 | 2.765993 | 3 | |
accounts/urls.py | Wings30306/callingmrschristmas | 4 | 6622167 | from django.urls import path, reverse_lazy
from django.contrib.auth.views import (
PasswordResetView, PasswordResetDoneView)
from .views import logout, login, register, user_profile
app_name = "accounts"
urlpatterns = [
path('logout', logout, name="logout"),
path('login', login, name="login"),
path('register', register, name="register"),
path('profile', user_profile, name="profile"),
path('password-reset/', PasswordResetView.as_view(
success_url=reverse_lazy('accounts:password_reset_done'),
template_name="password_reset_form.html"),
name="password_reset"),
path('password-reset/done', PasswordResetDoneView.as_view(
template_name="password_reset_done.html"),
name='password_reset_done'),
]
| from django.urls import path, reverse_lazy
from django.contrib.auth.views import (
PasswordResetView, PasswordResetDoneView)
from .views import logout, login, register, user_profile
app_name = "accounts"
urlpatterns = [
path('logout', logout, name="logout"),
path('login', login, name="login"),
path('register', register, name="register"),
path('profile', user_profile, name="profile"),
path('password-reset/', PasswordResetView.as_view(
success_url=reverse_lazy('accounts:password_reset_done'),
template_name="password_reset_form.html"),
name="password_reset"),
path('password-reset/done', PasswordResetDoneView.as_view(
template_name="password_reset_done.html"),
name='password_reset_done'),
]
| none | 1 | 1.946119 | 2 | |
proxies/detran/tests/test_views.py | MinisterioPublicoRJ/api-cadg | 6 | 6622168 | <reponame>MinisterioPublicoRJ/api-cadg<gh_stars>1-10
from unittest import mock
from django.conf import settings
from django.test import TestCase, override_settings
from django.urls import reverse
from proxies.exceptions import (
DataDoesNotExistException,
DetranAPIClientError,
WaitDBException,
)
TEST_TOKEN = "<PASSWORD>"
@override_settings(SIMPLE_AUTH_TOKEN=TEST_TOKEN)
class TestDetranProxyView(TestCase):
@mock.patch("proxies.detran.views.ImpalaGate")
@mock.patch("proxies.detran.views.HBaseGate")
@mock.patch("proxies.detran.views.DataTrafficController")
def test_correct_response(self, _DataController, _HBase, _Impala):
_HBase.return_value = "hbase object"
_Impala.return_value = "impala object"
controller_mock = mock.Mock()
controller_mock.get_data.return_value = {"data": 1}
_DataController.return_value = controller_mock
# View must remove padding zero
rg = "012345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": TEST_TOKEN})
expected_used_rg = str(int(rg))
_DataController.assert_called_once_with(
rg=expected_used_rg,
data_dao=_Impala.return_value,
photo_dao=_HBase.return_value,
)
_Impala.assert_called_once_with(
table_name=settings.EXADATA_DETRAN_DATA_ORIGIN,
)
_HBase.assert_called_once_with(
table_name=settings.EXADATA_DETRAN_PHOTO_ORIGIN,
server=settings.HBASE_SERVER,
timeout=settings.HBASE_TIMEOUT,
)
assert resp.status_code == 200
assert resp.json() == {"data": 1}
@mock.patch("proxies.detran.views.DataTrafficController")
def test_exception_detran_api(self, _DataController):
controller_mock = mock.Mock()
controller_mock.get_data.side_effect = DetranAPIClientError
_DataController.return_value = controller_mock
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": TEST_TOKEN})
assert resp.status_code == 503
@mock.patch("proxies.detran.views.DataTrafficController")
def test_data_do_not_exist(self, _DataController):
controller_mock = mock.Mock()
controller_mock.get_data.side_effect = DataDoesNotExistException
_DataController.return_value = controller_mock
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": TEST_TOKEN})
assert resp.status_code == 404
assert resp.json() == {"detail": f"Dado não encontrado para RG: {rg}"}
@mock.patch("proxies.detran.views.DataTrafficController")
def test_wait_database_exception(self, _DataController):
controller_mock = mock.Mock()
controller_mock.get_data.side_effect = WaitDBException
_DataController.return_value = controller_mock
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": TEST_TOKEN})
assert resp.status_code == 503
assert resp.json() == {
"detail": "Tempo de busca dos dados excedeu o tempo máximo"
}
@override_settings(SIMPLE_AUTH_TOKEN="very-secure-token")
def test_no_token_permission_denied(self):
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url)
assert resp.status_code == 403
@override_settings(SIMPLE_AUTH_TOKEN="even-more-secure-token")
@mock.patch("proxies.detran.views.DataTrafficController")
def test_with_token_permission_granted(self, _DataController):
_DataController.return_value.get_data.return_value = {"data": 1}
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": "<PASSWORD>"})
assert resp.status_code == 200
assert resp.data == {"data": 1}
| from unittest import mock
from django.conf import settings
from django.test import TestCase, override_settings
from django.urls import reverse
from proxies.exceptions import (
DataDoesNotExistException,
DetranAPIClientError,
WaitDBException,
)
TEST_TOKEN = "<PASSWORD>"
@override_settings(SIMPLE_AUTH_TOKEN=TEST_TOKEN)
class TestDetranProxyView(TestCase):
@mock.patch("proxies.detran.views.ImpalaGate")
@mock.patch("proxies.detran.views.HBaseGate")
@mock.patch("proxies.detran.views.DataTrafficController")
def test_correct_response(self, _DataController, _HBase, _Impala):
_HBase.return_value = "hbase object"
_Impala.return_value = "impala object"
controller_mock = mock.Mock()
controller_mock.get_data.return_value = {"data": 1}
_DataController.return_value = controller_mock
# View must remove padding zero
rg = "012345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": TEST_TOKEN})
expected_used_rg = str(int(rg))
_DataController.assert_called_once_with(
rg=expected_used_rg,
data_dao=_Impala.return_value,
photo_dao=_HBase.return_value,
)
_Impala.assert_called_once_with(
table_name=settings.EXADATA_DETRAN_DATA_ORIGIN,
)
_HBase.assert_called_once_with(
table_name=settings.EXADATA_DETRAN_PHOTO_ORIGIN,
server=settings.HBASE_SERVER,
timeout=settings.HBASE_TIMEOUT,
)
assert resp.status_code == 200
assert resp.json() == {"data": 1}
@mock.patch("proxies.detran.views.DataTrafficController")
def test_exception_detran_api(self, _DataController):
controller_mock = mock.Mock()
controller_mock.get_data.side_effect = DetranAPIClientError
_DataController.return_value = controller_mock
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": TEST_TOKEN})
assert resp.status_code == 503
@mock.patch("proxies.detran.views.DataTrafficController")
def test_data_do_not_exist(self, _DataController):
controller_mock = mock.Mock()
controller_mock.get_data.side_effect = DataDoesNotExistException
_DataController.return_value = controller_mock
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": TEST_TOKEN})
assert resp.status_code == 404
assert resp.json() == {"detail": f"Dado não encontrado para RG: {rg}"}
@mock.patch("proxies.detran.views.DataTrafficController")
def test_wait_database_exception(self, _DataController):
controller_mock = mock.Mock()
controller_mock.get_data.side_effect = WaitDBException
_DataController.return_value = controller_mock
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": TEST_TOKEN})
assert resp.status_code == 503
assert resp.json() == {
"detail": "Tempo de busca dos dados excedeu o tempo máximo"
}
@override_settings(SIMPLE_AUTH_TOKEN="very-secure-token")
def test_no_token_permission_denied(self):
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url)
assert resp.status_code == 403
@override_settings(SIMPLE_AUTH_TOKEN="even-more-secure-token")
@mock.patch("proxies.detran.views.DataTrafficController")
def test_with_token_permission_granted(self, _DataController):
_DataController.return_value.get_data.return_value = {"data": 1}
rg = "12345"
url = reverse("proxies:foto-detran", kwargs={"rg": rg})
resp = self.client.get(url, {"proxy-token": "<PASSWORD>"})
assert resp.status_code == 200
assert resp.data == {"data": 1} | en | 0.174723 | # View must remove padding zero | 2.374443 | 2 |
code/backend/billing/serializers.py | rollethu/noe | 16 | 6622169 | <filename>code/backend/billing/serializers.py
from django.utils.translation import gettext as _
from rest_framework.exceptions import ValidationError
from rest_framework import serializers
from . import models as m
class BillingDetailSerializer(serializers.HyperlinkedModelSerializer):
is_company = serializers.BooleanField(write_only=True, default=False)
class Meta:
model = m.BillingDetail
fields = [
"appointment",
"company_name",
"country",
"address_line1",
"address_line2",
"post_code",
"state",
"city",
"tax_number",
"is_company",
]
extra_kwargs = {"tax_number": {"required": False, "allow_blank": True}}
def create(self, validated_data):
is_company = validated_data.pop("is_company", False)
if is_company and not validated_data.get("tax_number"):
raise ValidationError({"tax_number": _("This field is required.")})
return super().create(validated_data)
| <filename>code/backend/billing/serializers.py
from django.utils.translation import gettext as _
from rest_framework.exceptions import ValidationError
from rest_framework import serializers
from . import models as m
class BillingDetailSerializer(serializers.HyperlinkedModelSerializer):
is_company = serializers.BooleanField(write_only=True, default=False)
class Meta:
model = m.BillingDetail
fields = [
"appointment",
"company_name",
"country",
"address_line1",
"address_line2",
"post_code",
"state",
"city",
"tax_number",
"is_company",
]
extra_kwargs = {"tax_number": {"required": False, "allow_blank": True}}
def create(self, validated_data):
is_company = validated_data.pop("is_company", False)
if is_company and not validated_data.get("tax_number"):
raise ValidationError({"tax_number": _("This field is required.")})
return super().create(validated_data)
| none | 1 | 2.15293 | 2 | |
mittens/interfaces/spatial.py | pfotiad/MITTENS | 7 | 6622170 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals, absolute_import
from .base import MittensBaseInterface, IFLOGGER
import os.path as op
import numpy as np
from nipype.interfaces.base import (traits, File, isdefined,
BaseInterfaceInputSpec, TraitedSpec)
from glob import glob
class FixAffineInputSpec(BaseInterfaceInputSpec):
dsi_studio_image = File(exists=True, usedefault=True,
desc=('NIfTI image with DSI Studio affine'))
real_affine_image = File(exists=True, mandatory=True,
desc=('NIfTI image with real affine to use'))
output_name = traits.Str("real_affine.nii.gz", mandatory=False, usedefault=True,
desc=('File name for output (ends with nii[.gz])'))
class FixAffineOutputSpec(TraitedSpec):
fixed_affine_image = File(desc='Data from DSI Studio image with a real affine')
class FixAffine(MittensBaseInterface):
"""
Replaces a DSI Studio affine with a real affine.
"""
input_spec = FixAffineInputSpec
output_spec = FixAffineOutputSpec
def _run_interface(self, runtime):
from os.path import abspath
import nibabel as nib
dsi_img = nib.load(self.inputs.dsi_studio_image)
ants_img = nib.load(self.inputs.real_affine_image)
dsi_affine = dsi_img.affine
ants_affine = ants_img.affine
data = dsi_img.get_data()
if np.sign(dsi_affine[0,0]) != np.sign(ants_affine[0,0]):
data = data[::-1,:,:]
if np.sign(dsi_affine[1,1]) != np.sign(ants_affine[1,1]):
data = data[:,::-1,:]
if np.sign(dsi_affine[2,2]) != np.sign(ants_affine[2,2]):
data = data[:,:,::-1]
nib.Nifti1Image(data,ants_affine,header=ants_img.get_header()
).to_filename(self.inputs.output_name)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['fixed_affine_image'] = op.abspath(self.inputs.output_name)
return outputs
| # -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals, absolute_import
from .base import MittensBaseInterface, IFLOGGER
import os.path as op
import numpy as np
from nipype.interfaces.base import (traits, File, isdefined,
BaseInterfaceInputSpec, TraitedSpec)
from glob import glob
class FixAffineInputSpec(BaseInterfaceInputSpec):
dsi_studio_image = File(exists=True, usedefault=True,
desc=('NIfTI image with DSI Studio affine'))
real_affine_image = File(exists=True, mandatory=True,
desc=('NIfTI image with real affine to use'))
output_name = traits.Str("real_affine.nii.gz", mandatory=False, usedefault=True,
desc=('File name for output (ends with nii[.gz])'))
class FixAffineOutputSpec(TraitedSpec):
fixed_affine_image = File(desc='Data from DSI Studio image with a real affine')
class FixAffine(MittensBaseInterface):
"""
Replaces a DSI Studio affine with a real affine.
"""
input_spec = FixAffineInputSpec
output_spec = FixAffineOutputSpec
def _run_interface(self, runtime):
from os.path import abspath
import nibabel as nib
dsi_img = nib.load(self.inputs.dsi_studio_image)
ants_img = nib.load(self.inputs.real_affine_image)
dsi_affine = dsi_img.affine
ants_affine = ants_img.affine
data = dsi_img.get_data()
if np.sign(dsi_affine[0,0]) != np.sign(ants_affine[0,0]):
data = data[::-1,:,:]
if np.sign(dsi_affine[1,1]) != np.sign(ants_affine[1,1]):
data = data[:,::-1,:]
if np.sign(dsi_affine[2,2]) != np.sign(ants_affine[2,2]):
data = data[:,:,::-1]
nib.Nifti1Image(data,ants_affine,header=ants_img.get_header()
).to_filename(self.inputs.output_name)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['fixed_affine_image'] = op.abspath(self.inputs.output_name)
return outputs
| en | 0.6807 | # -*- coding: utf-8 -*- Replaces a DSI Studio affine with a real affine. | 1.959779 | 2 |
desafio109/moeda109.py | marcelocmedeiros/RevisaoPython | 0 | 6622171 | # <NAME>
# ADS UNIFIP
# REVISÃO DE PYTHON
# AULA 22 Modularização---> <NAME>
'''
Modifique as funções que foram criadas no desafio 107 para que elas aceitem um parâmetro a mais,
informando se o valor retornado por elas vai ser ou não formatado pela função moeda(),
desenvolvida no desafio 108.
'''
print('='*30)
print('{:*^30}'.format(' Módulo Moeda109.py '))
print('='*30)
print()
# def passa ter 3 parametros preço taxa e formatado(foramtado=False) p
def aumentar(preço = 0, taxa = 0, formatado = False):
""" Módulo def de moeda formatado
Keyword Arguments:
preço vai receber o valor
taxa vai receber a porcentagem
formatado inicia com {False} sem formatação
Returns:
operação --> res if formatado is False else moeda(res)
"""
res = preço + (preço * taxa/100)
# retorna res se False/ se não retorne moeda(res)
return res if formatado is False else moeda(res)
def diminuir(preço = 0, taxa = 0, formatado = False):
res = preço - (preço * taxa/100)
return res if formatado is False else moeda(res)
def dobro(preço = 0, formatado = False):
res = preço * 2
# if not formatado == if formatado is False
return res if not formatado else moeda(res)
def metade(preço = 0, formatado = False):
res = preço / 2
return res if not formatado else moeda(res)
def moeda(preço = 0, moeda = 'R$'):
return f'{moeda}{preço:.2f}'.replace('.', ',') | # <NAME>
# ADS UNIFIP
# REVISÃO DE PYTHON
# AULA 22 Modularização---> <NAME>
'''
Modifique as funções que foram criadas no desafio 107 para que elas aceitem um parâmetro a mais,
informando se o valor retornado por elas vai ser ou não formatado pela função moeda(),
desenvolvida no desafio 108.
'''
print('='*30)
print('{:*^30}'.format(' Módulo Moeda109.py '))
print('='*30)
print()
# def passa ter 3 parametros preço taxa e formatado(foramtado=False) p
def aumentar(preço = 0, taxa = 0, formatado = False):
""" Módulo def de moeda formatado
Keyword Arguments:
preço vai receber o valor
taxa vai receber a porcentagem
formatado inicia com {False} sem formatação
Returns:
operação --> res if formatado is False else moeda(res)
"""
res = preço + (preço * taxa/100)
# retorna res se False/ se não retorne moeda(res)
return res if formatado is False else moeda(res)
def diminuir(preço = 0, taxa = 0, formatado = False):
res = preço - (preço * taxa/100)
return res if formatado is False else moeda(res)
def dobro(preço = 0, formatado = False):
res = preço * 2
# if not formatado == if formatado is False
return res if not formatado else moeda(res)
def metade(preço = 0, formatado = False):
res = preço / 2
return res if not formatado else moeda(res)
def moeda(preço = 0, moeda = 'R$'):
return f'{moeda}{preço:.2f}'.replace('.', ',') | pt | 0.95499 | # <NAME> # ADS UNIFIP # REVISÃO DE PYTHON # AULA 22 Modularização---> <NAME> Modifique as funções que foram criadas no desafio 107 para que elas aceitem um parâmetro a mais, informando se o valor retornado por elas vai ser ou não formatado pela função moeda(), desenvolvida no desafio 108. # def passa ter 3 parametros preço taxa e formatado(foramtado=False) p Módulo def de moeda formatado Keyword Arguments: preço vai receber o valor taxa vai receber a porcentagem formatado inicia com {False} sem formatação Returns: operação --> res if formatado is False else moeda(res) # retorna res se False/ se não retorne moeda(res) # if not formatado == if formatado is False | 3.990495 | 4 |
face.py | PDahal2871/Emotion-Detection | 0 | 6622172 | <gh_stars>0
import cv2
import numpy as np
from tensorflow.keras.models import load_model
classifier = load_model('model1.h5')
cap = cv2.VideoCapture(0)
haar = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while True:
ret, frame = cap.read()
if ret:
fcs=[]
bbx=[]
preds=[]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = haar.detectMultiScale(gray)
for face in faces:
x,y,w,h = face
x2 = x+w
y2 = y+h
fc = gray[y:y2, x:x2]
fc = cv2.resize(fc, (150, 150))
fc = np.array(fc, dtype='float32')
fc = np.reshape(fc, (150, 150, 1)) #reshaping from (1,150,150) to (150,150,1)
fc = np.expand_dims(fc, axis=0) # Changing to 4d for CNN
fcs.append(fc)
bbx.append((x,y,x2,y2))
preds = []
if(len(fcs))>0:
for fc in fcs:
pred = classifier.predict(fc)
preds.append(pred)
for (box,pred) in zip(bbx,preds):
(x,y,x2,y2) = box
prediction = np.argmax(pred)
if prediction == 0:
emotion = "Angry"
color = (0, 0, 255)
elif prediction == 1:
emotion = "Disgusted"
color = (0, 255, 255)
elif prediction == 2:
emotion = "Fearful"
color = (255, 25, 25)
elif prediction == 3:
emotion = "Happy"
color = (0, 255, 0)
elif prediction == 4:
emotion = "Neutral"
color = (100, 255, 10)
elif prediction == 5:
emotion = "Sad"
color = (100, 50, 150)
else:
emotion="Surprised"
color = (250, 255, 0)
cv2.rectangle(frame, (x, y), (x2, y2), color, 3) # Putting rectangle of bbox in frames
cv2.rectangle(frame, (x, y - 40), (x2, y), color, -1)
cv2.putText(frame, emotion, (x+100, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) # Putting text in live frames
cv2.imshow('frame', frame)
if (cv2.waitKey(20) == ord('q')) or (cv2.waitKey(20) == 27):
# pressing 'q' or 'esc' keys destroys the window
break
else:
print("No faces detected")
else:
print("No frames detected")
break
cap.release()
cv2.destroyAllWindows()
"""
The accuracy of the classifier can be increased by changing it with a new, more accurate classifier
or fine tuning the current one again
"""
| import cv2
import numpy as np
from tensorflow.keras.models import load_model
classifier = load_model('model1.h5')
cap = cv2.VideoCapture(0)
haar = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while True:
ret, frame = cap.read()
if ret:
fcs=[]
bbx=[]
preds=[]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = haar.detectMultiScale(gray)
for face in faces:
x,y,w,h = face
x2 = x+w
y2 = y+h
fc = gray[y:y2, x:x2]
fc = cv2.resize(fc, (150, 150))
fc = np.array(fc, dtype='float32')
fc = np.reshape(fc, (150, 150, 1)) #reshaping from (1,150,150) to (150,150,1)
fc = np.expand_dims(fc, axis=0) # Changing to 4d for CNN
fcs.append(fc)
bbx.append((x,y,x2,y2))
preds = []
if(len(fcs))>0:
for fc in fcs:
pred = classifier.predict(fc)
preds.append(pred)
for (box,pred) in zip(bbx,preds):
(x,y,x2,y2) = box
prediction = np.argmax(pred)
if prediction == 0:
emotion = "Angry"
color = (0, 0, 255)
elif prediction == 1:
emotion = "Disgusted"
color = (0, 255, 255)
elif prediction == 2:
emotion = "Fearful"
color = (255, 25, 25)
elif prediction == 3:
emotion = "Happy"
color = (0, 255, 0)
elif prediction == 4:
emotion = "Neutral"
color = (100, 255, 10)
elif prediction == 5:
emotion = "Sad"
color = (100, 50, 150)
else:
emotion="Surprised"
color = (250, 255, 0)
cv2.rectangle(frame, (x, y), (x2, y2), color, 3) # Putting rectangle of bbox in frames
cv2.rectangle(frame, (x, y - 40), (x2, y), color, -1)
cv2.putText(frame, emotion, (x+100, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) # Putting text in live frames
cv2.imshow('frame', frame)
if (cv2.waitKey(20) == ord('q')) or (cv2.waitKey(20) == 27):
# pressing 'q' or 'esc' keys destroys the window
break
else:
print("No faces detected")
else:
print("No frames detected")
break
cap.release()
cv2.destroyAllWindows()
"""
The accuracy of the classifier can be increased by changing it with a new, more accurate classifier
or fine tuning the current one again
""" | en | 0.85476 | #reshaping from (1,150,150) to (150,150,1) # Changing to 4d for CNN # Putting rectangle of bbox in frames # Putting text in live frames # pressing 'q' or 'esc' keys destroys the window The accuracy of the classifier can be increased by changing it with a new, more accurate classifier or fine tuning the current one again | 2.894622 | 3 |
MIHF/Bayes/BayesMatrixFactorEvaluator.py | revygabor/HWTester | 0 | 6622173 | import Evaluator
from io import StringIO
import numpy as np
class BayesMatrixFactorEvaluator(Evaluator.Evaluator):
def __init__(self, details):
pass
def evaluate(self, input, target_output, output, log):
try:
M = output.split('\n\n')
U = np.loadtxt(StringIO(unicode(M[0], "utf-8")), delimiter=',')
V = np.loadtxt(StringIO(unicode(M[1], "utf-8")), delimiter=',')
RMSE = np.sqrt(np.mean((np.dot(U, V.T) - input["R"]) ** 2))
ok = RMSE < input["RMSE_max"]
score = float(ok)
return (score, "RMSE: %f, required at most %f, accepted: %r\n" % (RMSE, input["RMSE_max"], ok))
except ValueError as err:
return (0, err.message)
except:
return (0, "Unknown error")
| import Evaluator
from io import StringIO
import numpy as np
class BayesMatrixFactorEvaluator(Evaluator.Evaluator):
def __init__(self, details):
pass
def evaluate(self, input, target_output, output, log):
try:
M = output.split('\n\n')
U = np.loadtxt(StringIO(unicode(M[0], "utf-8")), delimiter=',')
V = np.loadtxt(StringIO(unicode(M[1], "utf-8")), delimiter=',')
RMSE = np.sqrt(np.mean((np.dot(U, V.T) - input["R"]) ** 2))
ok = RMSE < input["RMSE_max"]
score = float(ok)
return (score, "RMSE: %f, required at most %f, accepted: %r\n" % (RMSE, input["RMSE_max"], ok))
except ValueError as err:
return (0, err.message)
except:
return (0, "Unknown error")
| none | 1 | 2.857721 | 3 | |
KNN.py | kongxiaoshuang/KNN | 2 | 6622174 | <reponame>kongxiaoshuang/KNN
#-*- coding: utf-8 -*-
from numpy import *
import operator
import matplotlib
import matplotlib.pyplot as plt
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
def classify0(inX, dataSet, labels, k): #inX为用于分类的输入向量,dataSet为输入的训练样本集, labels为训练标签,k表示用于选择最近的数目
dataSetSize = dataSet.shape[0] #dataSet的行数
diffMat = tile(inX, (dataSetSize, 1)) - dataSet #将inX数组复制成与dataSet相同行数,与dataSet相减,求坐标差
sqDiffMat = diffMat**2 #diffMat的平方
sqDistances = sqDiffMat.sum(axis=1) #将sqDiffMat每一行的所有数相加
distances = sqDistances**0.5 #开根号,求点和点之间的欧式距离
sortedDistIndicies = distances.argsort() #将distances中的元素从小到大排列,提取其对应的index,然后输出到sortedDistIndicies
classCount = {} #创建字典
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]] #前k个标签数据
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1 #判断classCount中有没有对应的voteIlabel,
# 如果有返回voteIlabel对应的值,如果没有则返回0,在最后加1。为了计算k个标签的类别数量
sortedClassCount = sorted(classCount.items(),
key=operator.itemgetter(1), reverse=True) #生成classCount的迭代器,进行排序,
# operator.itemgetter(1)以标签的个数降序排序
return sortedClassCount[0][0] #返回个数最多的标签
def file2matrix(filename):
fr = open(filename)
arrayOLines = fr.readlines() #读入所有行
numberOfLines = len(arrayOLines) #行数
returnMat = zeros((numberOfLines, 3)) #创建数组,数据集
classLabelVector = [] #标签集
index = 0
for line in arrayOLines:
line = line.strip() #移除所有的回车符
listFromLine = line.split('\t') #把一个字符串按\t分割成字符串数组
returnMat[index,:] = listFromLine[0:3] #取listFromLine的前三个元素放入returnMat
classLabelVector.append(int(listFromLine[-1])) #选取listFromLine的最后一个元素依次存入classLabelVector列表中
index += 1
return returnMat, classLabelVector
def autoNorm(dataSet):
minVals = dataSet.min(0) #0表示从列中选取最小值
maxVals = dataSet.max(0) #选取最大值
ranges = maxVals-minVals
normDataSet = zeros(shape(dataSet)) #创建一个与dataSet大小相同的零矩阵
m = dataSet.shape[0] #取dataSet得行数
normDataSet = dataSet - tile(minVals, (m, 1)) #将minVals复制m行 与dataSet数据集相减
#归一化相除
normDataSet = normDataSet/tile(ranges, (m, 1)) #将最大值-最小值的值复制m行 与normDataSet相除,即归一化
return normDataSet, ranges, minVals #normDataSet为归一化特征值,ranges为最大值-最小值
def datingClassTest():
hoRatio = 0.10 #测试数据占总数据的百分比
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt') #将文本信息转成numpy格式
#datingDataMat为数据集,datingLabels为标签集
normMat, ranges, minVals = autoNorm(datingDataMat) #将datingDataMat数据归一化
#normMat为归一化数据特征值,ranges为特征最大值-最小值,minVals为最小值
m = normMat.shape[0] #取normMat的行数
numTestVecs = int(m*hoRatio) #测试数据的行数
errorCount = 0.0 #错误数据数量
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3)
#classify0为kNN分类器,normMat为用于分类的输入向量,normMat为输入的训练样本集(剩余的90%)
#datingLabels为训练标签,3表示用于选择最近邻居的数目
print("the classifier came back with: %d, the real answer is: %d" %(classifierResult, datingLabels[i]))
if (classifierResult != datingLabels[i]):errorCount += 1.0 #分类器结果和原标签不一样,则errorCount加1
print("the total error rate is : %f" %(errorCount/float(numTestVecs)))
# datingClassTest()
# datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
#
# normDataSet, ranges, minVals = autoNorm(datingDataMat)
# fig = plt.figure()
# ax = fig.add_subplot(111) #一行一列一个
# ax.scatter(datingDataMat[:,1], datingDataMat[:,2],
# 15.0*array(datingLabels), 15.0*array(datingLabels)) #scatter画散点图,使用标签属性绘制不同颜色不同大小的点
# plt.show()
# #测试分类器
# group, labels = createDataSet()
# label = classify0([1,1], group, labels, 3)
# print(label)
from os import listdir
def img2vector (filename):
returnVect = zeros((1, 1024)) #创建一个1*1024的数组
fr = open(filename)
for i in range(32):
lineStr = fr.readline() #每次读入一行
for j in range(32):
returnVect[0, 32*i+j] = int(lineStr[j])
return returnVect
def handwritingClassTest():
hwLabels = [] #标签集
trainingFileList = listdir('E:/digits/trainingDigits') #listdir获取训练集的文件目录
m = len(trainingFileList) #文件数量
trainingMat = zeros((m, 1024)) #一个数字1024个字符,创建m*1024的数组
for i in range(m):
fileNameStr = trainingFileList[i] #获取文件名
fileStr = fileNameStr.split('.')[0] #以'.'将字符串分割,并取第一项,即0_0.txt取0_0
classNumStr = int(fileStr.split('_')[0]) #以'_'将字符串分割,并取第一项
hwLabels.append(classNumStr) #依次存入hwLabels标签集
trainingMat[i, :] = img2vector('E:/digits/trainingDigits/%s' % fileNameStr) #将每个数字的字符值依次存入trainingMat
testFileList = listdir('E:/digits/testDigits') #读入测试数据集
errorCount = 0.0 #测试错误数量
mTest = len(testFileList) #测试集的数量
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0]) #测试数据标签
vectorUnderTest = img2vector('E:/digits/testDigits/%s' % fileNameStr) #读入测试数据
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3) #分类器kNN算法,3为最近邻数目
print("the calssifier came back with: %d, the real answer is : %d" %(classifierResult, classNumStr))
if (classifierResult != classNumStr): errorCount +=1.0
print("\nthe total number of errors is : %f" % errorCount)
print("\nthe total error rate is :%f" % (errorCount/float(mTest)))
handwritingClassTest() | #-*- coding: utf-8 -*-
from numpy import *
import operator
import matplotlib
import matplotlib.pyplot as plt
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
def classify0(inX, dataSet, labels, k): #inX为用于分类的输入向量,dataSet为输入的训练样本集, labels为训练标签,k表示用于选择最近的数目
dataSetSize = dataSet.shape[0] #dataSet的行数
diffMat = tile(inX, (dataSetSize, 1)) - dataSet #将inX数组复制成与dataSet相同行数,与dataSet相减,求坐标差
sqDiffMat = diffMat**2 #diffMat的平方
sqDistances = sqDiffMat.sum(axis=1) #将sqDiffMat每一行的所有数相加
distances = sqDistances**0.5 #开根号,求点和点之间的欧式距离
sortedDistIndicies = distances.argsort() #将distances中的元素从小到大排列,提取其对应的index,然后输出到sortedDistIndicies
classCount = {} #创建字典
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]] #前k个标签数据
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1 #判断classCount中有没有对应的voteIlabel,
# 如果有返回voteIlabel对应的值,如果没有则返回0,在最后加1。为了计算k个标签的类别数量
sortedClassCount = sorted(classCount.items(),
key=operator.itemgetter(1), reverse=True) #生成classCount的迭代器,进行排序,
# operator.itemgetter(1)以标签的个数降序排序
return sortedClassCount[0][0] #返回个数最多的标签
def file2matrix(filename):
fr = open(filename)
arrayOLines = fr.readlines() #读入所有行
numberOfLines = len(arrayOLines) #行数
returnMat = zeros((numberOfLines, 3)) #创建数组,数据集
classLabelVector = [] #标签集
index = 0
for line in arrayOLines:
line = line.strip() #移除所有的回车符
listFromLine = line.split('\t') #把一个字符串按\t分割成字符串数组
returnMat[index,:] = listFromLine[0:3] #取listFromLine的前三个元素放入returnMat
classLabelVector.append(int(listFromLine[-1])) #选取listFromLine的最后一个元素依次存入classLabelVector列表中
index += 1
return returnMat, classLabelVector
def autoNorm(dataSet):
minVals = dataSet.min(0) #0表示从列中选取最小值
maxVals = dataSet.max(0) #选取最大值
ranges = maxVals-minVals
normDataSet = zeros(shape(dataSet)) #创建一个与dataSet大小相同的零矩阵
m = dataSet.shape[0] #取dataSet得行数
normDataSet = dataSet - tile(minVals, (m, 1)) #将minVals复制m行 与dataSet数据集相减
#归一化相除
normDataSet = normDataSet/tile(ranges, (m, 1)) #将最大值-最小值的值复制m行 与normDataSet相除,即归一化
return normDataSet, ranges, minVals #normDataSet为归一化特征值,ranges为最大值-最小值
def datingClassTest():
hoRatio = 0.10 #测试数据占总数据的百分比
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt') #将文本信息转成numpy格式
#datingDataMat为数据集,datingLabels为标签集
normMat, ranges, minVals = autoNorm(datingDataMat) #将datingDataMat数据归一化
#normMat为归一化数据特征值,ranges为特征最大值-最小值,minVals为最小值
m = normMat.shape[0] #取normMat的行数
numTestVecs = int(m*hoRatio) #测试数据的行数
errorCount = 0.0 #错误数据数量
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3)
#classify0为kNN分类器,normMat为用于分类的输入向量,normMat为输入的训练样本集(剩余的90%)
#datingLabels为训练标签,3表示用于选择最近邻居的数目
print("the classifier came back with: %d, the real answer is: %d" %(classifierResult, datingLabels[i]))
if (classifierResult != datingLabels[i]):errorCount += 1.0 #分类器结果和原标签不一样,则errorCount加1
print("the total error rate is : %f" %(errorCount/float(numTestVecs)))
# datingClassTest()
# datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
#
# normDataSet, ranges, minVals = autoNorm(datingDataMat)
# fig = plt.figure()
# ax = fig.add_subplot(111) #一行一列一个
# ax.scatter(datingDataMat[:,1], datingDataMat[:,2],
# 15.0*array(datingLabels), 15.0*array(datingLabels)) #scatter画散点图,使用标签属性绘制不同颜色不同大小的点
# plt.show()
# #测试分类器
# group, labels = createDataSet()
# label = classify0([1,1], group, labels, 3)
# print(label)
from os import listdir
def img2vector (filename):
returnVect = zeros((1, 1024)) #创建一个1*1024的数组
fr = open(filename)
for i in range(32):
lineStr = fr.readline() #每次读入一行
for j in range(32):
returnVect[0, 32*i+j] = int(lineStr[j])
return returnVect
def handwritingClassTest():
hwLabels = [] #标签集
trainingFileList = listdir('E:/digits/trainingDigits') #listdir获取训练集的文件目录
m = len(trainingFileList) #文件数量
trainingMat = zeros((m, 1024)) #一个数字1024个字符,创建m*1024的数组
for i in range(m):
fileNameStr = trainingFileList[i] #获取文件名
fileStr = fileNameStr.split('.')[0] #以'.'将字符串分割,并取第一项,即0_0.txt取0_0
classNumStr = int(fileStr.split('_')[0]) #以'_'将字符串分割,并取第一项
hwLabels.append(classNumStr) #依次存入hwLabels标签集
trainingMat[i, :] = img2vector('E:/digits/trainingDigits/%s' % fileNameStr) #将每个数字的字符值依次存入trainingMat
testFileList = listdir('E:/digits/testDigits') #读入测试数据集
errorCount = 0.0 #测试错误数量
mTest = len(testFileList) #测试集的数量
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0]) #测试数据标签
vectorUnderTest = img2vector('E:/digits/testDigits/%s' % fileNameStr) #读入测试数据
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3) #分类器kNN算法,3为最近邻数目
print("the calssifier came back with: %d, the real answer is : %d" %(classifierResult, classNumStr))
if (classifierResult != classNumStr): errorCount +=1.0
print("\nthe total number of errors is : %f" % errorCount)
print("\nthe total error rate is :%f" % (errorCount/float(mTest)))
handwritingClassTest() | zh | 0.841819 | #-*- coding: utf-8 -*- #inX为用于分类的输入向量,dataSet为输入的训练样本集, labels为训练标签,k表示用于选择最近的数目 #dataSet的行数 #将inX数组复制成与dataSet相同行数,与dataSet相减,求坐标差 #diffMat的平方 #将sqDiffMat每一行的所有数相加 #开根号,求点和点之间的欧式距离 #将distances中的元素从小到大排列,提取其对应的index,然后输出到sortedDistIndicies #创建字典 #前k个标签数据 #判断classCount中有没有对应的voteIlabel, # 如果有返回voteIlabel对应的值,如果没有则返回0,在最后加1。为了计算k个标签的类别数量 #生成classCount的迭代器,进行排序, # operator.itemgetter(1)以标签的个数降序排序 #返回个数最多的标签 #读入所有行 #行数 #创建数组,数据集 #标签集 #移除所有的回车符 #把一个字符串按\t分割成字符串数组 #取listFromLine的前三个元素放入returnMat #选取listFromLine的最后一个元素依次存入classLabelVector列表中 #0表示从列中选取最小值 #选取最大值 #创建一个与dataSet大小相同的零矩阵 #取dataSet得行数 #将minVals复制m行 与dataSet数据集相减 #归一化相除 #将最大值-最小值的值复制m行 与normDataSet相除,即归一化 #normDataSet为归一化特征值,ranges为最大值-最小值 #测试数据占总数据的百分比 #将文本信息转成numpy格式 #datingDataMat为数据集,datingLabels为标签集 #将datingDataMat数据归一化 #normMat为归一化数据特征值,ranges为特征最大值-最小值,minVals为最小值 #取normMat的行数 #测试数据的行数 #错误数据数量 #classify0为kNN分类器,normMat为用于分类的输入向量,normMat为输入的训练样本集(剩余的90%) #datingLabels为训练标签,3表示用于选择最近邻居的数目 #分类器结果和原标签不一样,则errorCount加1 # datingClassTest() # datingDataMat, datingLabels = file2matrix('datingTestSet2.txt') # # normDataSet, ranges, minVals = autoNorm(datingDataMat) # fig = plt.figure() # ax = fig.add_subplot(111) #一行一列一个 # ax.scatter(datingDataMat[:,1], datingDataMat[:,2], # 15.0*array(datingLabels), 15.0*array(datingLabels)) #scatter画散点图,使用标签属性绘制不同颜色不同大小的点 # plt.show() # #测试分类器 # group, labels = createDataSet() # label = classify0([1,1], group, labels, 3) # print(label) #创建一个1*1024的数组 #每次读入一行 #标签集 #listdir获取训练集的文件目录 #文件数量 #一个数字1024个字符,创建m*1024的数组 #获取文件名 #以'.'将字符串分割,并取第一项,即0_0.txt取0_0 #以'_'将字符串分割,并取第一项 #依次存入hwLabels标签集 #将每个数字的字符值依次存入trainingMat #读入测试数据集 #测试错误数量 #测试集的数量 #测试数据标签 #读入测试数据 #分类器kNN算法,3为最近邻数目 | 2.993564 | 3 |
DOSELECT/script.py | ritwik1503/Competitive-Coding-1 | 29 | 6622175 | import json
import requests
| import json
import requests
| none | 1 | 1.020308 | 1 | |
src/mta/model/mf.py | JalexChang/cross-media-attribution | 0 | 6622176 | <reponame>JalexChang/cross-media-attribution
import numpy
from copy import copy
from mta.dataset import Dataset
from mta.ds.rating_row import RatingRow
from mta.ds.touch_row import TouchRow
import time
class MF:
trained = False
dataset_loaded = False
def __init__ (self,max_iters=100, user_biased =False, item_biased =False, alpha=0.001, beta=0.01, delta=0.001,verbose=False):
self.max_iters = max_iters
self.user_biased = user_biased
self.item_biased = item_biased
self.alpha = alpha
self.beta = beta
self.delta = delta
self.verbose = verbose
def load_dataset(self,dataset):
matrix_shape = dataset.matrix_shape()
self._size_user = matrix_shape[0]
self._size_factor = matrix_shape[1]
self._size_item = matrix_shape[2]
self.ratings = copy(dataset.ratings)
self.touchs = copy(dataset.touchs)
self.dataset_loaded = True
if not self.trained :
self.trained_transaction_on_item = []
for i_id in range(self._size_item):
self.trained_transaction_on_item.append([])
if self.verbose:
print('dataset:', matrix_shape, ' is loaded')
def _init_latent_factors(self):
if not self.trained:
init_mean = self.ratings.mean() - self.mean
init_std = self.ratings.std()
self.W = numpy.zeros([self._size_user,self._size_factor])
for u_id,f_id in self.touchs.to_list():
self.W[u_id][f_id] = numpy.random.normal(init_mean, init_std)
self.H = numpy.random.normal(init_mean, init_std,(self._size_factor,self._size_item))
if self.verbose:
print('latent factors has been initialized')
def _init_biases(self):
if not self.trained :
R = self.ratings.to_matrix()
self.mean = 0
self.bias_user = numpy.zeros(self._size_user)
self.bias_item = numpy.zeros(self._size_item)
if self.user_biased or self.item_biased:
self.mean = self.ratings.mean()
if self.verbose:
print('biases has been initialized')
def _mark_matrix(self):
for u_id, i_id, rating in self.ratings.to_list():
self.trained_transaction_on_item[i_id].append(u_id)
def fit(self):
self._init_biases()
self._init_latent_factors()
self._mark_matrix()
best_W = numpy.copy(self.W)
best_H = numpy.copy(self.H)
R_list = self.ratings.to_list()
#R_predicted = self.predict(R_list)
for iters in range(self.max_iters):
begin_time = time.time()
#update features by stochastic gradient descent
self._update_sgd(R_list)
#calculate overall error (with regularization)
R_predicted = self.predict(R_list)
total_cost = self._calculate_cost(R_list,R_predicted)
end_time= time.time()
if self.verbose :
print ('iters-',iters+1,' cost:',total_cost,' time:', end_time - begin_time)
if total_cost < self.delta:
break
self.trained = True
def _update_sgd(self,R_list):
for u_id, i_id, rating in R_list:
predicted_rating = self._predict_one_element(u_id, i_id, p_type = "normal")
error = rating - predicted_rating
#updated factors
for f_id in range(self._size_factor):
if self.W[u_id][f_id] > 0:
w_uf = self.W[u_id][f_id]
h_fi = self.H[f_id][i_id]
self.W[u_id][f_id] += self.alpha *(error * h_fi + self.beta * w_uf)
self.H[f_id][i_id] += self.alpha *(error * w_uf + self.beta * h_fi)
#update biases
if self.item_biased:
self.bias_item[i_id] += self.alpha *( error - self.beta * self.bias_item[i_id])
if self.user_biased:
self.bias_user[u_id] += self.alpha *( error - self.beta * self.bias_user[u_id])
def _calculate_cost(self,R_list,R_predicted):
total_cost =0.
# prediction errors
for u_id, i_id, rating in R_list:
error = rating - R_predicted[u_id][i_id]
total_cost += pow(error,2)
# regularization errors
for u_id in range(self._size_user):
total_cost += self.beta*(numpy.dot(self.W[u_id,:],self.W[u_id,:]) + pow(self.bias_user[u_id],2))
for i_id in range(self._size_item):
total_cost += self.beta*(numpy.dot(self.H[:,i_id],self.H[:,i_id]) + pow(self.bias_item[i_id],2))
return total_cost
def predict(self, R_list = None):
return self._predict(R_list, "normal")
def predict_average(self, R_list = None):
return self._predict(R_list, "avg")
def _predict(self, R_list = None, p_type ="normal"):
R_predicted = numpy.zeros((self._size_user,self._size_item))
if R_list is None:
for u_id in range(self._size_user):
for i_id in range(self._size_item):
R_predicted[u_id][i_id] = self._predict_one_element(u_id, i_id, p_type)
else:
for u_id, i_id, rating in R_list:
R_predicted[u_id][i_id] = self._predict_one_element(u_id, i_id, p_type)
return R_predicted
def _predict_one_element(self, u_id, i_id, p_type ="normal"):
predicted_element = 0.
if p_type == "normal":
predicted_element = numpy.dot(self.W[u_id,:], self.H[:,i_id])
elif p_type == "avg" :
predicted_w = self.average_w(u_id, i_id)
predicted_element = numpy.dot(predicted_w, self.H[:,i_id])
predicted_element += self.mean + self.bias_user[u_id] + self.bias_item[i_id]
return predicted_element
def average_w(self, u_id, i_id):
users = self.trained_transaction_on_item[i_id]
len_user = len(users)
w = numpy.zeros(self._size_factor)
if len_user >0:
for user_id in users:
w += self.W[user_id]
for f_id in range(self._size_factor):
w[f_id] = w[f_id]/ len_user if self.W[u_id][f_id] >0 else 0
return w
def matrix_shape(self):
return (self._size_user,self._size_factor,self._size_item)
def factor_attribution(self, R_list = None):
attribution = numpy.zeros(self._size_factor)
attribution_matrix = self.factor_item_attribution(R_list)
for f_id in range(self._size_factor):
attribution[f_id] = attribution_matrix[f_id].sum()
return attribution
def factor_item_attribution(self, R_list = None):
attribution_matrix = numpy.zeros([self._size_factor,self._size_item])
if R_list is None:
R_list = self.ratings.to_list()
for u_id, i_id, rating in R_list:
total_weight = numpy.inner(self.W[u_id,:], self.H[:,i_id])
for f_id in range(self._size_factor):
attributed_weight = self.W[u_id][f_id] * self.H[f_id][i_id]
if total_weight != 0.:
attribution_matrix[f_id][i_id] += rating * (attributed_weight / total_weight)
return attribution_matrix
| import numpy
from copy import copy
from mta.dataset import Dataset
from mta.ds.rating_row import RatingRow
from mta.ds.touch_row import TouchRow
import time
class MF:
trained = False
dataset_loaded = False
def __init__ (self,max_iters=100, user_biased =False, item_biased =False, alpha=0.001, beta=0.01, delta=0.001,verbose=False):
self.max_iters = max_iters
self.user_biased = user_biased
self.item_biased = item_biased
self.alpha = alpha
self.beta = beta
self.delta = delta
self.verbose = verbose
def load_dataset(self,dataset):
matrix_shape = dataset.matrix_shape()
self._size_user = matrix_shape[0]
self._size_factor = matrix_shape[1]
self._size_item = matrix_shape[2]
self.ratings = copy(dataset.ratings)
self.touchs = copy(dataset.touchs)
self.dataset_loaded = True
if not self.trained :
self.trained_transaction_on_item = []
for i_id in range(self._size_item):
self.trained_transaction_on_item.append([])
if self.verbose:
print('dataset:', matrix_shape, ' is loaded')
def _init_latent_factors(self):
if not self.trained:
init_mean = self.ratings.mean() - self.mean
init_std = self.ratings.std()
self.W = numpy.zeros([self._size_user,self._size_factor])
for u_id,f_id in self.touchs.to_list():
self.W[u_id][f_id] = numpy.random.normal(init_mean, init_std)
self.H = numpy.random.normal(init_mean, init_std,(self._size_factor,self._size_item))
if self.verbose:
print('latent factors has been initialized')
def _init_biases(self):
if not self.trained :
R = self.ratings.to_matrix()
self.mean = 0
self.bias_user = numpy.zeros(self._size_user)
self.bias_item = numpy.zeros(self._size_item)
if self.user_biased or self.item_biased:
self.mean = self.ratings.mean()
if self.verbose:
print('biases has been initialized')
def _mark_matrix(self):
for u_id, i_id, rating in self.ratings.to_list():
self.trained_transaction_on_item[i_id].append(u_id)
def fit(self):
self._init_biases()
self._init_latent_factors()
self._mark_matrix()
best_W = numpy.copy(self.W)
best_H = numpy.copy(self.H)
R_list = self.ratings.to_list()
#R_predicted = self.predict(R_list)
for iters in range(self.max_iters):
begin_time = time.time()
#update features by stochastic gradient descent
self._update_sgd(R_list)
#calculate overall error (with regularization)
R_predicted = self.predict(R_list)
total_cost = self._calculate_cost(R_list,R_predicted)
end_time= time.time()
if self.verbose :
print ('iters-',iters+1,' cost:',total_cost,' time:', end_time - begin_time)
if total_cost < self.delta:
break
self.trained = True
def _update_sgd(self,R_list):
for u_id, i_id, rating in R_list:
predicted_rating = self._predict_one_element(u_id, i_id, p_type = "normal")
error = rating - predicted_rating
#updated factors
for f_id in range(self._size_factor):
if self.W[u_id][f_id] > 0:
w_uf = self.W[u_id][f_id]
h_fi = self.H[f_id][i_id]
self.W[u_id][f_id] += self.alpha *(error * h_fi + self.beta * w_uf)
self.H[f_id][i_id] += self.alpha *(error * w_uf + self.beta * h_fi)
#update biases
if self.item_biased:
self.bias_item[i_id] += self.alpha *( error - self.beta * self.bias_item[i_id])
if self.user_biased:
self.bias_user[u_id] += self.alpha *( error - self.beta * self.bias_user[u_id])
def _calculate_cost(self,R_list,R_predicted):
total_cost =0.
# prediction errors
for u_id, i_id, rating in R_list:
error = rating - R_predicted[u_id][i_id]
total_cost += pow(error,2)
# regularization errors
for u_id in range(self._size_user):
total_cost += self.beta*(numpy.dot(self.W[u_id,:],self.W[u_id,:]) + pow(self.bias_user[u_id],2))
for i_id in range(self._size_item):
total_cost += self.beta*(numpy.dot(self.H[:,i_id],self.H[:,i_id]) + pow(self.bias_item[i_id],2))
return total_cost
def predict(self, R_list = None):
return self._predict(R_list, "normal")
def predict_average(self, R_list = None):
return self._predict(R_list, "avg")
def _predict(self, R_list = None, p_type ="normal"):
R_predicted = numpy.zeros((self._size_user,self._size_item))
if R_list is None:
for u_id in range(self._size_user):
for i_id in range(self._size_item):
R_predicted[u_id][i_id] = self._predict_one_element(u_id, i_id, p_type)
else:
for u_id, i_id, rating in R_list:
R_predicted[u_id][i_id] = self._predict_one_element(u_id, i_id, p_type)
return R_predicted
def _predict_one_element(self, u_id, i_id, p_type ="normal"):
predicted_element = 0.
if p_type == "normal":
predicted_element = numpy.dot(self.W[u_id,:], self.H[:,i_id])
elif p_type == "avg" :
predicted_w = self.average_w(u_id, i_id)
predicted_element = numpy.dot(predicted_w, self.H[:,i_id])
predicted_element += self.mean + self.bias_user[u_id] + self.bias_item[i_id]
return predicted_element
def average_w(self, u_id, i_id):
users = self.trained_transaction_on_item[i_id]
len_user = len(users)
w = numpy.zeros(self._size_factor)
if len_user >0:
for user_id in users:
w += self.W[user_id]
for f_id in range(self._size_factor):
w[f_id] = w[f_id]/ len_user if self.W[u_id][f_id] >0 else 0
return w
def matrix_shape(self):
return (self._size_user,self._size_factor,self._size_item)
def factor_attribution(self, R_list = None):
attribution = numpy.zeros(self._size_factor)
attribution_matrix = self.factor_item_attribution(R_list)
for f_id in range(self._size_factor):
attribution[f_id] = attribution_matrix[f_id].sum()
return attribution
def factor_item_attribution(self, R_list = None):
attribution_matrix = numpy.zeros([self._size_factor,self._size_item])
if R_list is None:
R_list = self.ratings.to_list()
for u_id, i_id, rating in R_list:
total_weight = numpy.inner(self.W[u_id,:], self.H[:,i_id])
for f_id in range(self._size_factor):
attributed_weight = self.W[u_id][f_id] * self.H[f_id][i_id]
if total_weight != 0.:
attribution_matrix[f_id][i_id] += rating * (attributed_weight / total_weight)
return attribution_matrix | en | 0.765185 | #R_predicted = self.predict(R_list) #update features by stochastic gradient descent #calculate overall error (with regularization) #updated factors #update biases # prediction errors # regularization errors | 2.198554 | 2 |
oldtests/test_value.py | tokikanno/mosql | 85 | 6622177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from getpass import getuser
from itertools import product
import mosql.util
import mosql.std
import mosql.mysql
def connect_to_postgresql():
import psycopg2
conn = psycopg2.connect(user=getuser())
cur = conn.cursor()
cur.execute('show server_encoding')
server_encoding, = cur.fetchone()
assert server_encoding == 'UTF8'
cur.execute('show client_encoding')
client_encoding, = cur.fetchone()
assert client_encoding == 'UTF8'
cur.close()
return conn
def test_value_in_postgresql():
mosql.std.patch()
conn = connect_to_postgresql()
cur = conn.cursor()
cur.execute('''
create temporary table _test_value_in_postgresql (
k varchar(128) primary key,
v text
)
''')
# Test V-P-1: Value - PostgreSQL - All BMP Chars
#
# It will include all BMP chars, except
#
# 1. the null byte (U+0000)
# 2. utf-16 surrogates (U+D800-U+DBFF, U+DC00-U+DFFF)
#
# which are not valid string constant in PostgreSQL.
#
# ref: http://www.postgresql.org/docs/9.3/static/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS-ESCAPE
expected_text = u''.join(unichr(i) for i in xrange(0x0001, 0xd800))
expected_text += u''.join(unichr(i) for i in xrange(0xe000, 0xffff+1))
# Test V-P-1-1: Value - PostgreSQL - All BMP Chars - Raw SQL
cur.execute('''
insert into
_test_value_in_postgresql
values (
'raw-sql-bmp',
%s
)
''', (expected_text, ))
cur.execute('''select v from _test_value_in_postgresql where k = 'raw-sql-bmp' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-P-1-2: Value - PostgreSQL - All BMP Chars - MoSQL's Value Function
cur.execute('''
insert into
_test_value_in_postgresql
values (
'mosql-bmp',
{}
)
'''.format(mosql.util.value(expected_text)))
cur.execute('''select v from _test_value_in_postgresql where k = 'mosql-bmp' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-P-2: Value - PostgreSQL - The Double ASCII Char's Dot Product
#
# It will include '\' + any ASCII char, and "'" + any ASCII char.
#
# dot product: dot_product(XY, AB) -> XAXBYAYB
ascii_chars = [unichr(i) for i in xrange(0x01, 0x7f+1)]
expected_text = u''.join(a+b for a, b in product(ascii_chars, ascii_chars))
# Test V-P-2-1: Value - PostgreSQL - The Double ASCII Char's Dot Product - Raw SQL
cur.execute('''
insert into
_test_value_in_postgresql
values (
'raw-sql-2-ascii',
%s
)
''', (expected_text, ))
cur.execute('''select v from _test_value_in_postgresql where k = 'raw-sql-2-ascii' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-P-2-2: Value - PostgreSQL - The Double ASCII Char's Dot Product - MoSQL's Value Function
cur.execute('''
insert into
_test_value_in_postgresql
values (
'mosql-2-ascii',
{}
)
'''.format(mosql.util.value(expected_text)))
cur.execute('''select v from _test_value_in_postgresql where k = 'mosql-2-ascii' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
cur.close()
conn.close()
def connect_to_mysql():
import MySQLdb
conn = MySQLdb.connect(user='root', db='root')
cur = conn.cursor()
# the columns: variable_name, value
cur.execute('''show variables where variable_name = 'character_set_database' ''')
_, character_set_database = cur.fetchone()
assert character_set_database == 'utf8'
cur.execute('''show variables where variable_name = 'character_set_connection' ''')
_, character_set_connection = cur.fetchone()
assert character_set_connection == 'utf8'
cur.close()
return conn
def test_value_in_mysql():
mosql.mysql.patch()
conn = connect_to_mysql()
cur = conn.cursor()
cur.execute('''
create temporary table _test_value_in_mysql (
k varchar(128) primary key,
v mediumtext
)
''')
# Test V-M-1: Value - MySQL - All BMP Chars
#
# It will include all BMP chars, except
#
# 1. the utf-16 low surrogates (U+DC00-U+DFFF)
#
# which are not valid string in MySQL.
#
# ref: http://dev.mysql.com/doc/refman/5.7/en/string-literals.html
expected_text = u''.join(unichr(i) for i in xrange(0x0000, 0xdc00))
expected_text += u''.join(unichr(i) for i in xrange(0xe000, 0xffff+1))
# Test V-M-1-1: Value - MySQL - All BMP Chars - Raw SQL
cur.execute('''
insert into
_test_value_in_mysql
values (
'raw-sql-bmp',
%s
)
''', (expected_text, ))
cur.execute('''select v from _test_value_in_mysql where k = 'raw-sql-bmp' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-M-1-2: Value - MySQL - All BMP Chars - MoSQL's Value Function
cur.execute('''
insert into
_test_value_in_mysql
values (
'mosql-bmp',
{}
)
'''.format(mosql.util.value(expected_text)))
cur.execute('''select v from _test_value_in_mysql where k = 'mosql-bmp' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-M-2: Value - MySQL - The Double ASCII Char's Dot Product
#
# It will include '\' + any ASCII char, and "'" + any ASCII char.
#
# dot product: dot_product(XY, AB) -> XAXBYAYB
ascii_chars = [unichr(i) for i in xrange(0x01, 0x7f+1)]
expected_text = u''.join(a+b for a, b in product(ascii_chars, ascii_chars))
# Test V-M-2-1: Value - MySQL - The Double ASCII Char's Dot Product - Raw SQL
cur.execute('''
insert into
_test_value_in_mysql
values (
'raw-sql-2-ascii',
%s
)
''', (expected_text, ))
cur.execute('''select v from _test_value_in_mysql where k = 'raw-sql-2-ascii' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-M-2-2: Value - MySQL - The Double ASCII Char's Dot Product - MoSQL's Value Function
cur.execute('''
insert into
_test_value_in_mysql
values (
'mosql-2-ascii',
{}
)
'''.format(mosql.util.value(expected_text)))
cur.execute('''select v from _test_value_in_mysql where k = 'mosql-2-ascii' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
cur.close()
conn.close()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from getpass import getuser
from itertools import product
import mosql.util
import mosql.std
import mosql.mysql
def connect_to_postgresql():
import psycopg2
conn = psycopg2.connect(user=getuser())
cur = conn.cursor()
cur.execute('show server_encoding')
server_encoding, = cur.fetchone()
assert server_encoding == 'UTF8'
cur.execute('show client_encoding')
client_encoding, = cur.fetchone()
assert client_encoding == 'UTF8'
cur.close()
return conn
def test_value_in_postgresql():
mosql.std.patch()
conn = connect_to_postgresql()
cur = conn.cursor()
cur.execute('''
create temporary table _test_value_in_postgresql (
k varchar(128) primary key,
v text
)
''')
# Test V-P-1: Value - PostgreSQL - All BMP Chars
#
# It will include all BMP chars, except
#
# 1. the null byte (U+0000)
# 2. utf-16 surrogates (U+D800-U+DBFF, U+DC00-U+DFFF)
#
# which are not valid string constant in PostgreSQL.
#
# ref: http://www.postgresql.org/docs/9.3/static/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS-ESCAPE
expected_text = u''.join(unichr(i) for i in xrange(0x0001, 0xd800))
expected_text += u''.join(unichr(i) for i in xrange(0xe000, 0xffff+1))
# Test V-P-1-1: Value - PostgreSQL - All BMP Chars - Raw SQL
cur.execute('''
insert into
_test_value_in_postgresql
values (
'raw-sql-bmp',
%s
)
''', (expected_text, ))
cur.execute('''select v from _test_value_in_postgresql where k = 'raw-sql-bmp' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-P-1-2: Value - PostgreSQL - All BMP Chars - MoSQL's Value Function
cur.execute('''
insert into
_test_value_in_postgresql
values (
'mosql-bmp',
{}
)
'''.format(mosql.util.value(expected_text)))
cur.execute('''select v from _test_value_in_postgresql where k = 'mosql-bmp' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-P-2: Value - PostgreSQL - The Double ASCII Char's Dot Product
#
# It will include '\' + any ASCII char, and "'" + any ASCII char.
#
# dot product: dot_product(XY, AB) -> XAXBYAYB
ascii_chars = [unichr(i) for i in xrange(0x01, 0x7f+1)]
expected_text = u''.join(a+b for a, b in product(ascii_chars, ascii_chars))
# Test V-P-2-1: Value - PostgreSQL - The Double ASCII Char's Dot Product - Raw SQL
cur.execute('''
insert into
_test_value_in_postgresql
values (
'raw-sql-2-ascii',
%s
)
''', (expected_text, ))
cur.execute('''select v from _test_value_in_postgresql where k = 'raw-sql-2-ascii' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-P-2-2: Value - PostgreSQL - The Double ASCII Char's Dot Product - MoSQL's Value Function
cur.execute('''
insert into
_test_value_in_postgresql
values (
'mosql-2-ascii',
{}
)
'''.format(mosql.util.value(expected_text)))
cur.execute('''select v from _test_value_in_postgresql where k = 'mosql-2-ascii' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
cur.close()
conn.close()
def connect_to_mysql():
import MySQLdb
conn = MySQLdb.connect(user='root', db='root')
cur = conn.cursor()
# the columns: variable_name, value
cur.execute('''show variables where variable_name = 'character_set_database' ''')
_, character_set_database = cur.fetchone()
assert character_set_database == 'utf8'
cur.execute('''show variables where variable_name = 'character_set_connection' ''')
_, character_set_connection = cur.fetchone()
assert character_set_connection == 'utf8'
cur.close()
return conn
def test_value_in_mysql():
mosql.mysql.patch()
conn = connect_to_mysql()
cur = conn.cursor()
cur.execute('''
create temporary table _test_value_in_mysql (
k varchar(128) primary key,
v mediumtext
)
''')
# Test V-M-1: Value - MySQL - All BMP Chars
#
# It will include all BMP chars, except
#
# 1. the utf-16 low surrogates (U+DC00-U+DFFF)
#
# which are not valid string in MySQL.
#
# ref: http://dev.mysql.com/doc/refman/5.7/en/string-literals.html
expected_text = u''.join(unichr(i) for i in xrange(0x0000, 0xdc00))
expected_text += u''.join(unichr(i) for i in xrange(0xe000, 0xffff+1))
# Test V-M-1-1: Value - MySQL - All BMP Chars - Raw SQL
cur.execute('''
insert into
_test_value_in_mysql
values (
'raw-sql-bmp',
%s
)
''', (expected_text, ))
cur.execute('''select v from _test_value_in_mysql where k = 'raw-sql-bmp' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-M-1-2: Value - MySQL - All BMP Chars - MoSQL's Value Function
cur.execute('''
insert into
_test_value_in_mysql
values (
'mosql-bmp',
{}
)
'''.format(mosql.util.value(expected_text)))
cur.execute('''select v from _test_value_in_mysql where k = 'mosql-bmp' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-M-2: Value - MySQL - The Double ASCII Char's Dot Product
#
# It will include '\' + any ASCII char, and "'" + any ASCII char.
#
# dot product: dot_product(XY, AB) -> XAXBYAYB
ascii_chars = [unichr(i) for i in xrange(0x01, 0x7f+1)]
expected_text = u''.join(a+b for a, b in product(ascii_chars, ascii_chars))
# Test V-M-2-1: Value - MySQL - The Double ASCII Char's Dot Product - Raw SQL
cur.execute('''
insert into
_test_value_in_mysql
values (
'raw-sql-2-ascii',
%s
)
''', (expected_text, ))
cur.execute('''select v from _test_value_in_mysql where k = 'raw-sql-2-ascii' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
# Test V-M-2-2: Value - MySQL - The Double ASCII Char's Dot Product - MoSQL's Value Function
cur.execute('''
insert into
_test_value_in_mysql
values (
'mosql-2-ascii',
{}
)
'''.format(mosql.util.value(expected_text)))
cur.execute('''select v from _test_value_in_mysql where k = 'mosql-2-ascii' ''')
fetched_bytes, = cur.fetchone()
fetched_text = fetched_bytes.decode('utf-8')
assert fetched_text == expected_text
cur.close()
conn.close()
| en | 0.319585 | #!/usr/bin/env python # -*- coding: utf-8 -*- create temporary table _test_value_in_postgresql ( k varchar(128) primary key, v text ) # Test V-P-1: Value - PostgreSQL - All BMP Chars # # It will include all BMP chars, except # # 1. the null byte (U+0000) # 2. utf-16 surrogates (U+D800-U+DBFF, U+DC00-U+DFFF) # # which are not valid string constant in PostgreSQL. # # ref: http://www.postgresql.org/docs/9.3/static/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS-ESCAPE # Test V-P-1-1: Value - PostgreSQL - All BMP Chars - Raw SQL insert into _test_value_in_postgresql values ( 'raw-sql-bmp', %s ) select v from _test_value_in_postgresql where k = 'raw-sql-bmp' # Test V-P-1-2: Value - PostgreSQL - All BMP Chars - MoSQL's Value Function insert into _test_value_in_postgresql values ( 'mosql-bmp', {} ) select v from _test_value_in_postgresql where k = 'mosql-bmp' # Test V-P-2: Value - PostgreSQL - The Double ASCII Char's Dot Product # # It will include '\' + any ASCII char, and "'" + any ASCII char. # # dot product: dot_product(XY, AB) -> XAXBYAYB # Test V-P-2-1: Value - PostgreSQL - The Double ASCII Char's Dot Product - Raw SQL insert into _test_value_in_postgresql values ( 'raw-sql-2-ascii', %s ) select v from _test_value_in_postgresql where k = 'raw-sql-2-ascii' # Test V-P-2-2: Value - PostgreSQL - The Double ASCII Char's Dot Product - MoSQL's Value Function insert into _test_value_in_postgresql values ( 'mosql-2-ascii', {} ) select v from _test_value_in_postgresql where k = 'mosql-2-ascii' # the columns: variable_name, value show variables where variable_name = 'character_set_database' show variables where variable_name = 'character_set_connection' create temporary table _test_value_in_mysql ( k varchar(128) primary key, v mediumtext ) # Test V-M-1: Value - MySQL - All BMP Chars # # It will include all BMP chars, except # # 1. the utf-16 low surrogates (U+DC00-U+DFFF) # # which are not valid string in MySQL. # # ref: http://dev.mysql.com/doc/refman/5.7/en/string-literals.html # Test V-M-1-1: Value - MySQL - All BMP Chars - Raw SQL insert into _test_value_in_mysql values ( 'raw-sql-bmp', %s ) select v from _test_value_in_mysql where k = 'raw-sql-bmp' # Test V-M-1-2: Value - MySQL - All BMP Chars - MoSQL's Value Function insert into _test_value_in_mysql values ( 'mosql-bmp', {} ) select v from _test_value_in_mysql where k = 'mosql-bmp' # Test V-M-2: Value - MySQL - The Double ASCII Char's Dot Product # # It will include '\' + any ASCII char, and "'" + any ASCII char. # # dot product: dot_product(XY, AB) -> XAXBYAYB # Test V-M-2-1: Value - MySQL - The Double ASCII Char's Dot Product - Raw SQL insert into _test_value_in_mysql values ( 'raw-sql-2-ascii', %s ) select v from _test_value_in_mysql where k = 'raw-sql-2-ascii' # Test V-M-2-2: Value - MySQL - The Double ASCII Char's Dot Product - MoSQL's Value Function insert into _test_value_in_mysql values ( 'mosql-2-ascii', {} ) select v from _test_value_in_mysql where k = 'mosql-2-ascii' | 2.447391 | 2 |
test/unit/test_cli.py | jsm84/ScalitySproxydSwift | 4 | 6622178 | <reponame>jsm84/ScalitySproxydSwift
# Copyright (c) 2015 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for `swift_scality_backend.cli`.'''
import sys
import unittest
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from swift_scality_backend import cli
from swift_scality_backend.policy_configuration import StoragePolicy
import utils
class FakeStream(object):
def __init__(self, module, attr):
self.stream = StringIO()
self._module = module
self._attr = attr
self._orig_attr = None
def __enter__(self):
self._orig_attr = getattr(self._module, self._attr)
setattr(self._module, self._attr, self.stream)
return self
def __exit__(self, exc, value, tb):
setattr(self._module, self._attr, self._orig_attr)
class Namespace(object):
def __init__(self, **kwargs):
for (name, value) in kwargs.iteritems():
setattr(self, name, value)
class TestStoragePolicyLint(unittest.TestCase):
def test_lint_fails_on_malformed_file(self):
config = 'test'
args = Namespace(
config=StringIO(config))
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_lint(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Parsing error:')
self.assertNotEqual(0, rc)
def test_lint_fails_on_invalid_config(self):
config = '[ring:]'
args = Namespace(
config=StringIO(config))
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_lint(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Configuration error:')
self.assertNotEqual(0, rc)
def test_lint_fails_on_exception(self):
class Stream(object):
def readline(self):
raise IOError('Oops')
args = Namespace(
config=Stream())
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_lint(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Error: Oops')
self.assertNotEqual(0, rc)
def test_lint_succeeds_on_valid_config(self):
config = ''
args = Namespace(
config=StringIO(config))
rc = cli.storage_policy_lint(args)
self.assertEquals(0, rc)
class TestStoragePolicyQuery(unittest.TestCase):
def test_load_fails(self):
config = '[ring:]'
args = Namespace(
config=StringIO(config))
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_query(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Error: Invalid section name')
self.assertNotEqual(0, rc)
def test_lookup_fails(self):
config = ''
args = Namespace(
config=StringIO(config),
policy_index=1)
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_query(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Error: Unknown policy index')
self.assertNotEqual(0, rc)
def test_success(self):
config = '\n'.join(s.strip() for s in '''
[ring:paris]
location = paris
sproxyd_endpoints = http://paris1.int/, http://paris2.int
[ring:sfo]
location = sfo
sproxyd_endpoints = http://sfo1.int
[storage-policy:2]
read = sfo
write = paris
'''.splitlines())
args = Namespace(
config=StringIO(config),
policy_index=2,
action=StoragePolicy.WRITE,
locations=['paris'])
with FakeStream(sys, 'stdout') as stdout:
rc = cli.storage_policy_query(args)
self.assertEqual(0, rc)
out = stdout.stream.getvalue()
self.assertTrue('http://paris1.int' in out)
self.assertTrue('http://paris2.int' in out)
self.assertFalse('sfo' in out)
class TestMain(unittest.TestCase):
def test_main(self):
def exit(code):
raise SystemExit(code)
orig_exit = sys.exit
sys.exit = exit
# Force failure even when `argparse` is installed on Python 2.6 setups
orig_argparse = cli.argparse
if sys.version_info < (2, 7):
cli.argparse = None
try:
with FakeStream(sys, 'stdout') as stdout:
with FakeStream(sys, 'stderr') as stderr:
self.assertRaises(
SystemExit,
cli.main, ['--help'])
if cli.argparse:
utils.assertRegexpMatches(
stdout.stream.getvalue(),
'storage-policy-lint')
else:
self.assertTrue(sys.version_info < (2, 7))
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Python 2.7')
finally:
sys.exit = orig_exit
cli.argparse = orig_argparse
| # Copyright (c) 2015 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for `swift_scality_backend.cli`.'''
import sys
import unittest
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from swift_scality_backend import cli
from swift_scality_backend.policy_configuration import StoragePolicy
import utils
class FakeStream(object):
def __init__(self, module, attr):
self.stream = StringIO()
self._module = module
self._attr = attr
self._orig_attr = None
def __enter__(self):
self._orig_attr = getattr(self._module, self._attr)
setattr(self._module, self._attr, self.stream)
return self
def __exit__(self, exc, value, tb):
setattr(self._module, self._attr, self._orig_attr)
class Namespace(object):
def __init__(self, **kwargs):
for (name, value) in kwargs.iteritems():
setattr(self, name, value)
class TestStoragePolicyLint(unittest.TestCase):
def test_lint_fails_on_malformed_file(self):
config = 'test'
args = Namespace(
config=StringIO(config))
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_lint(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Parsing error:')
self.assertNotEqual(0, rc)
def test_lint_fails_on_invalid_config(self):
config = '[ring:]'
args = Namespace(
config=StringIO(config))
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_lint(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Configuration error:')
self.assertNotEqual(0, rc)
def test_lint_fails_on_exception(self):
class Stream(object):
def readline(self):
raise IOError('Oops')
args = Namespace(
config=Stream())
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_lint(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Error: Oops')
self.assertNotEqual(0, rc)
def test_lint_succeeds_on_valid_config(self):
config = ''
args = Namespace(
config=StringIO(config))
rc = cli.storage_policy_lint(args)
self.assertEquals(0, rc)
class TestStoragePolicyQuery(unittest.TestCase):
def test_load_fails(self):
config = '[ring:]'
args = Namespace(
config=StringIO(config))
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_query(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Error: Invalid section name')
self.assertNotEqual(0, rc)
def test_lookup_fails(self):
config = ''
args = Namespace(
config=StringIO(config),
policy_index=1)
with FakeStream(sys, 'stderr') as stderr:
rc = cli.storage_policy_query(args)
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Error: Unknown policy index')
self.assertNotEqual(0, rc)
def test_success(self):
config = '\n'.join(s.strip() for s in '''
[ring:paris]
location = paris
sproxyd_endpoints = http://paris1.int/, http://paris2.int
[ring:sfo]
location = sfo
sproxyd_endpoints = http://sfo1.int
[storage-policy:2]
read = sfo
write = paris
'''.splitlines())
args = Namespace(
config=StringIO(config),
policy_index=2,
action=StoragePolicy.WRITE,
locations=['paris'])
with FakeStream(sys, 'stdout') as stdout:
rc = cli.storage_policy_query(args)
self.assertEqual(0, rc)
out = stdout.stream.getvalue()
self.assertTrue('http://paris1.int' in out)
self.assertTrue('http://paris2.int' in out)
self.assertFalse('sfo' in out)
class TestMain(unittest.TestCase):
def test_main(self):
def exit(code):
raise SystemExit(code)
orig_exit = sys.exit
sys.exit = exit
# Force failure even when `argparse` is installed on Python 2.6 setups
orig_argparse = cli.argparse
if sys.version_info < (2, 7):
cli.argparse = None
try:
with FakeStream(sys, 'stdout') as stdout:
with FakeStream(sys, 'stderr') as stderr:
self.assertRaises(
SystemExit,
cli.main, ['--help'])
if cli.argparse:
utils.assertRegexpMatches(
stdout.stream.getvalue(),
'storage-policy-lint')
else:
self.assertTrue(sys.version_info < (2, 7))
utils.assertRegexpMatches(
stderr.stream.getvalue(),
'Python 2.7')
finally:
sys.exit = orig_exit
cli.argparse = orig_argparse | en | 0.776395 | # Copyright (c) 2015 Scality # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for `swift_scality_backend.cli`. [ring:paris] location = paris sproxyd_endpoints = http://paris1.int/, http://paris2.int [ring:sfo] location = sfo sproxyd_endpoints = http://sfo1.int [storage-policy:2] read = sfo write = paris # Force failure even when `argparse` is installed on Python 2.6 setups | 1.832617 | 2 |
webgrid_ta/grids.py | sourcery-ai-bot/webgrid | 9 | 6622179 | from __future__ import absolute_import
from webgrid import BaseGrid as BaseGrid
from webgrid import (
Column,
ColumnGroup,
DateColumn,
DateTimeColumn,
EnumColumn,
LinkColumnBase,
NumericColumn,
TimeColumn,
YesNoColumn,
)
from webgrid.filters import (
DateFilter,
DateTimeFilter,
IntFilter,
Operator,
OptionsEnumFilter,
OptionsFilterBase,
TextFilter,
TimeFilter,
ops,
)
from webgrid.renderers import CSV
from webgrid_ta.extensions import lazy_gettext as _
from .app import webgrid
from .model.entities import AccountType, ArrowRecord, Person, Radio, Status, Stopwatch
class Grid(BaseGrid):
manager = webgrid
class FirstNameColumn(LinkColumnBase):
def create_url(self, record):
return '/person-edit/{0}'.format(record.id)
class FullNameColumn(LinkColumnBase):
def extract_data(self, record):
return _('{record.firstname} {record.lastname}', record=record)
def create_url(self, record):
return '/person-edit/{0}'.format(record.id)
class EmailsColumn(Column):
def extract_data(self, recordset):
return ', '.join([e.email for e in recordset.Person.emails])
class StatusFilter(OptionsFilterBase):
operators = (
Operator('o', _('open'), None),
ops.is_,
ops.not_is,
Operator('c', _('closed'), None),
ops.empty,
ops.not_empty
)
options_from = Status.pairs
class PeopleGrid(Grid):
session_on = True
FirstNameColumn(_('First Name'), Person.firstname, TextFilter)
FullNameColumn(_('Full Name'))
YesNoColumn(_('Active'), Person.inactive, reverse=True)
EmailsColumn(_('Emails'))
Column(_('Status'), Status.label.label('status'), StatusFilter(Status.id))
DateTimeColumn(_('Created'), Person.createdts, DateTimeFilter)
DateColumn(_('Due Date'), 'due_date')
Column(_('Sort Order'), Person.sortorder, render_in='xls')
Column(_('State'), Person.state, render_in='xlsx')
NumericColumn(_('Number'), Person.numericcol, has_subtotal=True)
EnumColumn(_('Account Type'), Person.account_type,
OptionsEnumFilter(Person.account_type, enum_type=AccountType))
def query_prep(self, query, has_sort, has_filters):
query = query.add_columns(
Person.id, Person.lastname, Person.due_date, Person.account_type,
).add_entity(Person).outerjoin(Person.status)
# default sort
if not has_sort:
query = query.order_by(Person.id)
return query
class PeopleGridByConfig(PeopleGrid):
query_outer_joins = (Person.status, )
query_default_sort = (Person.id, )
def query_prep(self, query, has_sort, has_filters):
query = query.add_columns(
Person.id, Person.lastname, Person.due_date, Person.account_type,
).add_entity(Person)
return query
class DefaultOpGrid(Grid):
session_on = True
FirstNameColumn(_('First Name'), Person.firstname,
TextFilter(Person.firstname, default_op=ops.eq))
class ArrowGrid(Grid):
session_on = True
DateTimeColumn(_('Created'), ArrowRecord.created_utc, DateTimeFilter)
def query_prep(self, query, has_sort, has_filters):
# default sort
if not has_sort:
query = query.order_by(ArrowRecord.id)
return query
class ArrowCSVGrid(Grid):
session_on = True
allowed_export_targets = {'csv': CSV}
DateTimeColumn(_('Created'), ArrowRecord.created_utc, DateTimeFilter)
def query_prep(self, query, has_sort, has_filters):
# default sort
if not has_sort:
query = query.order_by(ArrowRecord.id)
return query
class StopwatchGrid(Grid):
session_on = True
class LapGroup1(ColumnGroup):
label = 'Lap 1'
class_ = 'lap-1'
lap_group_2 = ColumnGroup('Lap 2', class_='lap-2')
lap_group_3 = ColumnGroup('Lap 3', class_='lap-3')
Column('ID', Stopwatch.id)
Column('Label', Stopwatch.label, TextFilter)
DateTimeColumn('Start Time', Stopwatch.start_time_lap1, group=LapGroup1)
DateTimeColumn('Stop Time', Stopwatch.stop_time_lap1, group=LapGroup1)
Column('Category', Stopwatch.category, TextFilter)
DateTimeColumn('Start Time', Stopwatch.start_time_lap2, group=lap_group_2)
DateTimeColumn('Stop Time', Stopwatch.stop_time_lap2, group=lap_group_2)
DateTimeColumn('Start Time', Stopwatch.start_time_lap3, group=lap_group_3)
DateTimeColumn('Stop Time', Stopwatch.stop_time_lap3, group=lap_group_3)
def query_prep(self, query, has_sort, has_filters):
# default sort
if not has_sort:
query = query.order_by(Stopwatch.id)
return query
class TemporalGrid(Grid):
session_on = True
DateTimeColumn(_('Created'), Person.createdts, DateTimeFilter)
DateColumn(_('Due Date'), Person.due_date, DateFilter)
TimeColumn(_('Start Time'), Person.start_time, TimeFilter)
class RadioGrid(Grid):
session_on = True
Column('Make', Radio.make, TextFilter)
Column('Model', Radio.model, TextFilter)
Column('Year', Radio.year, IntFilter)
| from __future__ import absolute_import
from webgrid import BaseGrid as BaseGrid
from webgrid import (
Column,
ColumnGroup,
DateColumn,
DateTimeColumn,
EnumColumn,
LinkColumnBase,
NumericColumn,
TimeColumn,
YesNoColumn,
)
from webgrid.filters import (
DateFilter,
DateTimeFilter,
IntFilter,
Operator,
OptionsEnumFilter,
OptionsFilterBase,
TextFilter,
TimeFilter,
ops,
)
from webgrid.renderers import CSV
from webgrid_ta.extensions import lazy_gettext as _
from .app import webgrid
from .model.entities import AccountType, ArrowRecord, Person, Radio, Status, Stopwatch
class Grid(BaseGrid):
manager = webgrid
class FirstNameColumn(LinkColumnBase):
def create_url(self, record):
return '/person-edit/{0}'.format(record.id)
class FullNameColumn(LinkColumnBase):
def extract_data(self, record):
return _('{record.firstname} {record.lastname}', record=record)
def create_url(self, record):
return '/person-edit/{0}'.format(record.id)
class EmailsColumn(Column):
def extract_data(self, recordset):
return ', '.join([e.email for e in recordset.Person.emails])
class StatusFilter(OptionsFilterBase):
operators = (
Operator('o', _('open'), None),
ops.is_,
ops.not_is,
Operator('c', _('closed'), None),
ops.empty,
ops.not_empty
)
options_from = Status.pairs
class PeopleGrid(Grid):
session_on = True
FirstNameColumn(_('First Name'), Person.firstname, TextFilter)
FullNameColumn(_('Full Name'))
YesNoColumn(_('Active'), Person.inactive, reverse=True)
EmailsColumn(_('Emails'))
Column(_('Status'), Status.label.label('status'), StatusFilter(Status.id))
DateTimeColumn(_('Created'), Person.createdts, DateTimeFilter)
DateColumn(_('Due Date'), 'due_date')
Column(_('Sort Order'), Person.sortorder, render_in='xls')
Column(_('State'), Person.state, render_in='xlsx')
NumericColumn(_('Number'), Person.numericcol, has_subtotal=True)
EnumColumn(_('Account Type'), Person.account_type,
OptionsEnumFilter(Person.account_type, enum_type=AccountType))
def query_prep(self, query, has_sort, has_filters):
query = query.add_columns(
Person.id, Person.lastname, Person.due_date, Person.account_type,
).add_entity(Person).outerjoin(Person.status)
# default sort
if not has_sort:
query = query.order_by(Person.id)
return query
class PeopleGridByConfig(PeopleGrid):
query_outer_joins = (Person.status, )
query_default_sort = (Person.id, )
def query_prep(self, query, has_sort, has_filters):
query = query.add_columns(
Person.id, Person.lastname, Person.due_date, Person.account_type,
).add_entity(Person)
return query
class DefaultOpGrid(Grid):
session_on = True
FirstNameColumn(_('First Name'), Person.firstname,
TextFilter(Person.firstname, default_op=ops.eq))
class ArrowGrid(Grid):
session_on = True
DateTimeColumn(_('Created'), ArrowRecord.created_utc, DateTimeFilter)
def query_prep(self, query, has_sort, has_filters):
# default sort
if not has_sort:
query = query.order_by(ArrowRecord.id)
return query
class ArrowCSVGrid(Grid):
session_on = True
allowed_export_targets = {'csv': CSV}
DateTimeColumn(_('Created'), ArrowRecord.created_utc, DateTimeFilter)
def query_prep(self, query, has_sort, has_filters):
# default sort
if not has_sort:
query = query.order_by(ArrowRecord.id)
return query
class StopwatchGrid(Grid):
session_on = True
class LapGroup1(ColumnGroup):
label = 'Lap 1'
class_ = 'lap-1'
lap_group_2 = ColumnGroup('Lap 2', class_='lap-2')
lap_group_3 = ColumnGroup('Lap 3', class_='lap-3')
Column('ID', Stopwatch.id)
Column('Label', Stopwatch.label, TextFilter)
DateTimeColumn('Start Time', Stopwatch.start_time_lap1, group=LapGroup1)
DateTimeColumn('Stop Time', Stopwatch.stop_time_lap1, group=LapGroup1)
Column('Category', Stopwatch.category, TextFilter)
DateTimeColumn('Start Time', Stopwatch.start_time_lap2, group=lap_group_2)
DateTimeColumn('Stop Time', Stopwatch.stop_time_lap2, group=lap_group_2)
DateTimeColumn('Start Time', Stopwatch.start_time_lap3, group=lap_group_3)
DateTimeColumn('Stop Time', Stopwatch.stop_time_lap3, group=lap_group_3)
def query_prep(self, query, has_sort, has_filters):
# default sort
if not has_sort:
query = query.order_by(Stopwatch.id)
return query
class TemporalGrid(Grid):
session_on = True
DateTimeColumn(_('Created'), Person.createdts, DateTimeFilter)
DateColumn(_('Due Date'), Person.due_date, DateFilter)
TimeColumn(_('Start Time'), Person.start_time, TimeFilter)
class RadioGrid(Grid):
session_on = True
Column('Make', Radio.make, TextFilter)
Column('Model', Radio.model, TextFilter)
Column('Year', Radio.year, IntFilter)
| en | 0.055744 | # default sort # default sort # default sort # default sort | 2.17835 | 2 |
scripts/prune_inconsistent.py | shouc/corbfuzz | 1 | 6622180 | <reponame>shouc/corbfuzz<gh_stars>1-10
import sys
import os
import z3
def check(arr):
mappings = {}
gated = []
cter = 0
s = z3.Solver()
for i in arr:
i = i.split(",")
if len(i) < 2:
continue
if i[0].startswith("isset"):
sym, is_defined = i[1], int(i[2])
if f"gated_{sym}" not in mappings:
gated.append(sym)
mappings[f"gated_{sym}"] = z3.Bool(f"gated_{sym}")
s.add(mappings[f"gated_{sym}"] == True if is_defined == 0 else False)
sym, decision, val, direction, cvt = (i[1]), int(i[2]), int(i[3]), int(i[4]), int(i[5])
decision = decision if direction == 0 else not decision
if f"gated_{sym}" not in mappings:
gated.append(sym)
mappings[f"gated_{sym}"] = z3.Bool(f"gated_{sym}")
s.add(mappings[f"gated_{sym}"] == True)
if sym not in mappings:
if cvt == 4:
mappings[sym] = z3.Int(f"k_{sym}")
if cvt == 2 or cvt == 3:
mappings[sym] = z3.Bool(f"k_{sym}")
if cvt == 6:
mappings[sym] = z3.String(f"k_{sym}")
if i[0].startswith("20"): # IS_SMALLER_OR_EQUAL
if decision:
if cvt == 4:
s.add(mappings[sym] <= int(val))
if cvt == 2 or cvt == 3:
s.add(mappings[sym] <= 1 if 't' in val else 0)
if cvt == 6:
assert 0
elif i[0].startswith("18") or i[0].startswith("16"): # neq
if decision:
if cvt == 4:
s.add(mappings[sym] != int(val))
if cvt == 2 or cvt == 3:
s.add(mappings[sym] != 't' in val)
if cvt == 6:
s.add(mappings[sym] != val)
elif i[0].startwith("17") or i[0].startwith("15"): # eq
if decision:
if cvt == 4:
s.add(mappings[sym] == int(val))
if cvt == 2 or cvt == 3:
s.add(mappings[sym] == 't' in val)
if cvt == 6:
s.add(mappings[sym] == val)
elif i[0].startwith("19"): # IS_SMALLER
if decision:
if cvt == 4:
s.add(mappings[sym] < int(val))
if cvt == 2 or cvt == 3:
s.add(mappings[sym] < 1 if 't' in val else 0)
if cvt == 6:
assert 0
return s.check()
directory = sys.argv[1]
for i in os.listdir(directory):
try:
if i.endswith(".cons"):
if not check(open(directory + "/" + i).readlines()):
print(f"Inconsistency identified {i}")
os.system(f"rm -f {directory + '/' + i.replace('.cons', '')}")
except:
pass
| import sys
import os
import z3
def check(arr):
mappings = {}
gated = []
cter = 0
s = z3.Solver()
for i in arr:
i = i.split(",")
if len(i) < 2:
continue
if i[0].startswith("isset"):
sym, is_defined = i[1], int(i[2])
if f"gated_{sym}" not in mappings:
gated.append(sym)
mappings[f"gated_{sym}"] = z3.Bool(f"gated_{sym}")
s.add(mappings[f"gated_{sym}"] == True if is_defined == 0 else False)
sym, decision, val, direction, cvt = (i[1]), int(i[2]), int(i[3]), int(i[4]), int(i[5])
decision = decision if direction == 0 else not decision
if f"gated_{sym}" not in mappings:
gated.append(sym)
mappings[f"gated_{sym}"] = z3.Bool(f"gated_{sym}")
s.add(mappings[f"gated_{sym}"] == True)
if sym not in mappings:
if cvt == 4:
mappings[sym] = z3.Int(f"k_{sym}")
if cvt == 2 or cvt == 3:
mappings[sym] = z3.Bool(f"k_{sym}")
if cvt == 6:
mappings[sym] = z3.String(f"k_{sym}")
if i[0].startswith("20"): # IS_SMALLER_OR_EQUAL
if decision:
if cvt == 4:
s.add(mappings[sym] <= int(val))
if cvt == 2 or cvt == 3:
s.add(mappings[sym] <= 1 if 't' in val else 0)
if cvt == 6:
assert 0
elif i[0].startswith("18") or i[0].startswith("16"): # neq
if decision:
if cvt == 4:
s.add(mappings[sym] != int(val))
if cvt == 2 or cvt == 3:
s.add(mappings[sym] != 't' in val)
if cvt == 6:
s.add(mappings[sym] != val)
elif i[0].startwith("17") or i[0].startwith("15"): # eq
if decision:
if cvt == 4:
s.add(mappings[sym] == int(val))
if cvt == 2 or cvt == 3:
s.add(mappings[sym] == 't' in val)
if cvt == 6:
s.add(mappings[sym] == val)
elif i[0].startwith("19"): # IS_SMALLER
if decision:
if cvt == 4:
s.add(mappings[sym] < int(val))
if cvt == 2 or cvt == 3:
s.add(mappings[sym] < 1 if 't' in val else 0)
if cvt == 6:
assert 0
return s.check()
directory = sys.argv[1]
for i in os.listdir(directory):
try:
if i.endswith(".cons"):
if not check(open(directory + "/" + i).readlines()):
print(f"Inconsistency identified {i}")
os.system(f"rm -f {directory + '/' + i.replace('.cons', '')}")
except:
pass | es | 0.20724 | # IS_SMALLER_OR_EQUAL # neq # eq # IS_SMALLER | 2.687891 | 3 |
easy/24.py | pisskidney/leetcode | 0 | 6622181 | <reponame>pisskidney/leetcode
#!/usr/bin/python
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def swapPairs(self, head):
if not head or not head.next:
return head
p, res, prev = head, p.next, ListNode(0)
while p and p.next:
p.next.next, p.next, prev.next, prev, p = p, p.next.next, p.next, p, p.next.next
return res
a = range(1, 5)
b = []
for i in a:
b.append(ListNode(i))
for i in xrange(len(b) - 1):
b[i].next = b[i+1]
s = Solution()
x = s.swapPairs(b[0])
while x:
print x.val
x = x.next
| #!/usr/bin/python
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def swapPairs(self, head):
if not head or not head.next:
return head
p, res, prev = head, p.next, ListNode(0)
while p and p.next:
p.next.next, p.next, prev.next, prev, p = p, p.next.next, p.next, p, p.next.next
return res
a = range(1, 5)
b = []
for i in a:
b.append(ListNode(i))
for i in xrange(len(b) - 1):
b[i].next = b[i+1]
s = Solution()
x = s.swapPairs(b[0])
while x:
print x.val
x = x.next | ru | 0.258958 | #!/usr/bin/python | 3.438342 | 3 |
handcam/ltt/datasets/handcam/OniProcessingCpp.py | luketaverne/handcam | 1 | 6622182 | import cv2
import numpy as np
import itertools
import os
# import h5py #put this back in after the h5py package has a new update on pip. See <https://github.com/h5py/h5py/issues/995>
import sys
from handcam.ltt.datasets.handcam.OrbbecCamParams import OrbbecCamParams
# from handcam.ltt.util.Utils import write_progress_bar
import glob
# from subprocess import Popen, PIPE
import read_oni_as_np
class OniSampleReader:
def __init__(self, sample_path):
self.sample_path = sample_path
self.vid_w = 320
self.vid_h = 240
self.is_valid_sample()
# grasp_labels = ['grasp_1', 'grasp_2', 'grasp_3', 'grasp_4', 'grasp_5', 'grasp_6', 'grasp_7']
grasp_label_to_id = {
"grasp_1": 0,
"grasp_2": 1,
"grasp_3": 2,
"grasp_4": 3,
"grasp_5": 4,
"grasp_6": 5,
"grasp_7": 6,
}
self.grasp_id = grasp_label_to_id[self.grasp_label]
self.cam_params = OrbbecCamParams(
int(self.misc_attrs["cameraid"]), (self.vid_w, self.vid_h)
)
ret_tuple = read_oni_as_np.read_oni_as_np(
os.path.join(self.sample_path, "video.oni"),
self.cam_params.mat,
self.grasp_id,
self.misc_attrs["armReadyTime_ms"],
self.misc_attrs["objectTouched_ms"],
)
self.vid = ret_tuple[0]
self.frame_labels = ret_tuple[1]
def __read_misc_txt__(self):
with open(os.path.join(self.sample_path, "misc.txt"), "r") as file:
misc_list = [line.strip() for line in file]
self.misc_attrs = {}
for line in misc_list:
key = line.split(":")[0]
value = line.split(":")[1]
self.misc_attrs[key] = str(value)
required_misc_properties = [
u"armReadyTime_ms",
u"objectTouched_ms",
u"lighting",
u"clutter",
u"greenScreen",
u"handedness",
u"cameraid",
u"subjectid",
]
if set(required_misc_properties) != set(self.misc_attrs.keys()):
raise ValueError("Sample is missing some required information in misc.txt")
self.misc_attrs[u"armReadyTime_ms"] = int(self.misc_attrs[u"armReadyTime_ms"])
self.misc_attrs[u"objectTouched_ms"] = int(self.misc_attrs[u"objectTouched_ms"])
self.misc_attrs[u"cameraid"] = int(self.misc_attrs[u"cameraid"])
if int(self.misc_attrs[u"cameraid"]) not in [1, 2]:
raise ValueError(
"Invalid camera selected. Please choose 1 (Luke's) or 2 (Matteo's)."
)
# Need to convert lightigng, clutter, greenScreen, handedness to boolean
modify_misc_properties = [
u"lighting",
u"clutter",
u"greenScreen",
u"handedness",
]
for prop in modify_misc_properties:
if self.misc_attrs[prop] in [
u"true",
u"True",
u"TRUE",
u"right",
u"Right",
u"RIGHT",
]:
# right handed will be 1
self.misc_attrs[prop] = 1
else:
# false or left handed will be zero
self.misc_attrs[prop] = 0
def __read_accel_txt__(self):
self.accel = np.genfromtxt(
os.path.join(self.sample_path, "accel.txt"), skip_header=1, delimiter=","
)
if self.accel.shape[1] != 4:
raise ValueError(
"accel.txt has the wrong shape. Should be (None, 4) but is "
+ str(self.accel.shape)
)
def __read_gyro_txt__(self):
self.gyro = np.genfromtxt(
os.path.join(self.sample_path, "gyro.txt"), skip_header=1, delimiter=","
)
if self.gyro.shape[1] != 4:
raise ValueError(
"gyro.txt has the wrong shape. Should be (None, 4) but is "
+ str(self.gyro.shape)
)
def __read_pose_txt__(self):
self.pose = np.genfromtxt(
os.path.join(self.sample_path, "pose.txt"), skip_header=1, delimiter=","
)
if self.pose.shape[1] != 5:
raise ValueError(
"pose.txt has the wrong shape. Should be (None, 5) but is "
+ str(self.pose.shape)
)
def __process_Myo_data__(self):
# Align the MyoData.
# It seems that every time ANY of the 3 Myo things get new data, they all write to their buffer.
# # We just need the time since recording started, so subtract the minimum timestamp
min_timestamp = np.min([self.accel[:, 0], self.gyro[:, 0], self.pose[:, 0]])
self.accel[:, 0] = self.accel[:, 0] - min_timestamp
self.gyro[:, 0] = self.gyro[:, 0] - min_timestamp
self.pose[:, 0] = self.pose[:, 0] - min_timestamp
#
# # Now we need to get rid of duplicate datapoints (rows)
# self.accel = np.unique(self.accel, axis=0) # only removes exact duplicates, we still have some duplicate timestamps. Discuss what to do with them later
# self.pose = np.unique(self.pose, axis=0)
# self.gyro = np.unique(self.gyro, axis=0)
def is_valid_sample(self):
"""
Check if data folder contains correct files with the correct properties.
:return:
"""
is_valid = True
# has misc?
self.__read_misc_txt__()
# Has accel?
self.__read_accel_txt__()
# Has pose?
self.__read_pose_txt__()
# Has vel?
self.__read_gyro_txt__()
# Has video?
if not os.path.isfile(os.path.join(self.sample_path, "video.oni")):
IOError("video.oni doesn't exist for this sample")
# Normalize the myo data
self.__process_Myo_data__()
# Make sure the video timestamps are there/are created
# self.__read_timestamps_txt__()
# Set the grasp_label (string)
self.grasp_label = self.sample_path.split("-")[-1].split("/")[0]
return True
def getDepthHistogram(self, src):
size = 256
if src.dtype == np.uint16:
size = 65536
depthHistogram = np.zeros(
(size), dtype=np.float
) # would be 65536 if we kept the 16-bit
depthHist = np.empty((src.shape[0], src.shape[1], 3), dtype=np.uint8)
# depthHist = rgb
number_of_points = 0
for y, x in itertools.product(range(src.shape[1]), range(src.shape[0])):
depth_cell = src[x, y]
if depth_cell != 0:
depthHistogram[depth_cell] += 1
number_of_points += 1
for nIndex in range(1, int(depthHistogram.shape[0])):
depthHistogram[nIndex] += depthHistogram[nIndex - 1]
for nIndex in range(1, int(depthHistogram.shape[0])):
depthHistogram[nIndex] = (
number_of_points - depthHistogram[nIndex]
) / number_of_points
for y, x in itertools.product(range(src.shape[1]), range(src.shape[0])):
depth_cell = src[x, y]
depth_value = depthHistogram[depth_cell] * 255 # converting to uint8
depthHist[x, y, 0] = 0
depthHist[x, y, 1] = depth_value
depthHist[x, y, 2] = depth_value
# cv2.bitwise_or()
return depthHist
def get_depth_overlay(self, reverse_channels=False):
rgb_vid = np.asarray(self.vid[..., 0:3], dtype=np.uint8)
if reverse_channels:
rgb_vid = np.rot90(rgb_vid, axes=(1, 2))
rgb_vid = np.flip(rgb_vid, axis=2)
vid = np.empty(shape=rgb_vid.shape, dtype=np.uint8)
for i in range(self.vid.shape[0]):
img = rgb_vid[i].copy()
depth_hist = self.getDepthHistogram(self.vid[i, ..., 3:])
if reverse_channels:
depth_hist = depth_hist[..., ::-1]
depth_hist = np.rot90(depth_hist)
depth_hist = np.fliplr(depth_hist)
# print(depth_img.dtype)
# print(rgb_img.dtype)
cv2.addWeighted(depth_hist, 0.5, img, 0.5, 0.5, img, dtype=cv2.CV_8UC3)
vid[i] = img.copy()
return vid
| import cv2
import numpy as np
import itertools
import os
# import h5py #put this back in after the h5py package has a new update on pip. See <https://github.com/h5py/h5py/issues/995>
import sys
from handcam.ltt.datasets.handcam.OrbbecCamParams import OrbbecCamParams
# from handcam.ltt.util.Utils import write_progress_bar
import glob
# from subprocess import Popen, PIPE
import read_oni_as_np
class OniSampleReader:
def __init__(self, sample_path):
self.sample_path = sample_path
self.vid_w = 320
self.vid_h = 240
self.is_valid_sample()
# grasp_labels = ['grasp_1', 'grasp_2', 'grasp_3', 'grasp_4', 'grasp_5', 'grasp_6', 'grasp_7']
grasp_label_to_id = {
"grasp_1": 0,
"grasp_2": 1,
"grasp_3": 2,
"grasp_4": 3,
"grasp_5": 4,
"grasp_6": 5,
"grasp_7": 6,
}
self.grasp_id = grasp_label_to_id[self.grasp_label]
self.cam_params = OrbbecCamParams(
int(self.misc_attrs["cameraid"]), (self.vid_w, self.vid_h)
)
ret_tuple = read_oni_as_np.read_oni_as_np(
os.path.join(self.sample_path, "video.oni"),
self.cam_params.mat,
self.grasp_id,
self.misc_attrs["armReadyTime_ms"],
self.misc_attrs["objectTouched_ms"],
)
self.vid = ret_tuple[0]
self.frame_labels = ret_tuple[1]
def __read_misc_txt__(self):
with open(os.path.join(self.sample_path, "misc.txt"), "r") as file:
misc_list = [line.strip() for line in file]
self.misc_attrs = {}
for line in misc_list:
key = line.split(":")[0]
value = line.split(":")[1]
self.misc_attrs[key] = str(value)
required_misc_properties = [
u"armReadyTime_ms",
u"objectTouched_ms",
u"lighting",
u"clutter",
u"greenScreen",
u"handedness",
u"cameraid",
u"subjectid",
]
if set(required_misc_properties) != set(self.misc_attrs.keys()):
raise ValueError("Sample is missing some required information in misc.txt")
self.misc_attrs[u"armReadyTime_ms"] = int(self.misc_attrs[u"armReadyTime_ms"])
self.misc_attrs[u"objectTouched_ms"] = int(self.misc_attrs[u"objectTouched_ms"])
self.misc_attrs[u"cameraid"] = int(self.misc_attrs[u"cameraid"])
if int(self.misc_attrs[u"cameraid"]) not in [1, 2]:
raise ValueError(
"Invalid camera selected. Please choose 1 (Luke's) or 2 (Matteo's)."
)
# Need to convert lightigng, clutter, greenScreen, handedness to boolean
modify_misc_properties = [
u"lighting",
u"clutter",
u"greenScreen",
u"handedness",
]
for prop in modify_misc_properties:
if self.misc_attrs[prop] in [
u"true",
u"True",
u"TRUE",
u"right",
u"Right",
u"RIGHT",
]:
# right handed will be 1
self.misc_attrs[prop] = 1
else:
# false or left handed will be zero
self.misc_attrs[prop] = 0
def __read_accel_txt__(self):
self.accel = np.genfromtxt(
os.path.join(self.sample_path, "accel.txt"), skip_header=1, delimiter=","
)
if self.accel.shape[1] != 4:
raise ValueError(
"accel.txt has the wrong shape. Should be (None, 4) but is "
+ str(self.accel.shape)
)
def __read_gyro_txt__(self):
self.gyro = np.genfromtxt(
os.path.join(self.sample_path, "gyro.txt"), skip_header=1, delimiter=","
)
if self.gyro.shape[1] != 4:
raise ValueError(
"gyro.txt has the wrong shape. Should be (None, 4) but is "
+ str(self.gyro.shape)
)
def __read_pose_txt__(self):
self.pose = np.genfromtxt(
os.path.join(self.sample_path, "pose.txt"), skip_header=1, delimiter=","
)
if self.pose.shape[1] != 5:
raise ValueError(
"pose.txt has the wrong shape. Should be (None, 5) but is "
+ str(self.pose.shape)
)
def __process_Myo_data__(self):
# Align the MyoData.
# It seems that every time ANY of the 3 Myo things get new data, they all write to their buffer.
# # We just need the time since recording started, so subtract the minimum timestamp
min_timestamp = np.min([self.accel[:, 0], self.gyro[:, 0], self.pose[:, 0]])
self.accel[:, 0] = self.accel[:, 0] - min_timestamp
self.gyro[:, 0] = self.gyro[:, 0] - min_timestamp
self.pose[:, 0] = self.pose[:, 0] - min_timestamp
#
# # Now we need to get rid of duplicate datapoints (rows)
# self.accel = np.unique(self.accel, axis=0) # only removes exact duplicates, we still have some duplicate timestamps. Discuss what to do with them later
# self.pose = np.unique(self.pose, axis=0)
# self.gyro = np.unique(self.gyro, axis=0)
def is_valid_sample(self):
"""
Check if data folder contains correct files with the correct properties.
:return:
"""
is_valid = True
# has misc?
self.__read_misc_txt__()
# Has accel?
self.__read_accel_txt__()
# Has pose?
self.__read_pose_txt__()
# Has vel?
self.__read_gyro_txt__()
# Has video?
if not os.path.isfile(os.path.join(self.sample_path, "video.oni")):
IOError("video.oni doesn't exist for this sample")
# Normalize the myo data
self.__process_Myo_data__()
# Make sure the video timestamps are there/are created
# self.__read_timestamps_txt__()
# Set the grasp_label (string)
self.grasp_label = self.sample_path.split("-")[-1].split("/")[0]
return True
def getDepthHistogram(self, src):
size = 256
if src.dtype == np.uint16:
size = 65536
depthHistogram = np.zeros(
(size), dtype=np.float
) # would be 65536 if we kept the 16-bit
depthHist = np.empty((src.shape[0], src.shape[1], 3), dtype=np.uint8)
# depthHist = rgb
number_of_points = 0
for y, x in itertools.product(range(src.shape[1]), range(src.shape[0])):
depth_cell = src[x, y]
if depth_cell != 0:
depthHistogram[depth_cell] += 1
number_of_points += 1
for nIndex in range(1, int(depthHistogram.shape[0])):
depthHistogram[nIndex] += depthHistogram[nIndex - 1]
for nIndex in range(1, int(depthHistogram.shape[0])):
depthHistogram[nIndex] = (
number_of_points - depthHistogram[nIndex]
) / number_of_points
for y, x in itertools.product(range(src.shape[1]), range(src.shape[0])):
depth_cell = src[x, y]
depth_value = depthHistogram[depth_cell] * 255 # converting to uint8
depthHist[x, y, 0] = 0
depthHist[x, y, 1] = depth_value
depthHist[x, y, 2] = depth_value
# cv2.bitwise_or()
return depthHist
def get_depth_overlay(self, reverse_channels=False):
rgb_vid = np.asarray(self.vid[..., 0:3], dtype=np.uint8)
if reverse_channels:
rgb_vid = np.rot90(rgb_vid, axes=(1, 2))
rgb_vid = np.flip(rgb_vid, axis=2)
vid = np.empty(shape=rgb_vid.shape, dtype=np.uint8)
for i in range(self.vid.shape[0]):
img = rgb_vid[i].copy()
depth_hist = self.getDepthHistogram(self.vid[i, ..., 3:])
if reverse_channels:
depth_hist = depth_hist[..., ::-1]
depth_hist = np.rot90(depth_hist)
depth_hist = np.fliplr(depth_hist)
# print(depth_img.dtype)
# print(rgb_img.dtype)
cv2.addWeighted(depth_hist, 0.5, img, 0.5, 0.5, img, dtype=cv2.CV_8UC3)
vid[i] = img.copy()
return vid
| en | 0.776331 | # import h5py #put this back in after the h5py package has a new update on pip. See <https://github.com/h5py/h5py/issues/995> # from handcam.ltt.util.Utils import write_progress_bar # from subprocess import Popen, PIPE # grasp_labels = ['grasp_1', 'grasp_2', 'grasp_3', 'grasp_4', 'grasp_5', 'grasp_6', 'grasp_7'] # Need to convert lightigng, clutter, greenScreen, handedness to boolean # right handed will be 1 # false or left handed will be zero # Align the MyoData. # It seems that every time ANY of the 3 Myo things get new data, they all write to their buffer. # # We just need the time since recording started, so subtract the minimum timestamp # # # Now we need to get rid of duplicate datapoints (rows) # self.accel = np.unique(self.accel, axis=0) # only removes exact duplicates, we still have some duplicate timestamps. Discuss what to do with them later # self.pose = np.unique(self.pose, axis=0) # self.gyro = np.unique(self.gyro, axis=0) Check if data folder contains correct files with the correct properties. :return: # has misc? # Has accel? # Has pose? # Has vel? # Has video? # Normalize the myo data # Make sure the video timestamps are there/are created # self.__read_timestamps_txt__() # Set the grasp_label (string) # would be 65536 if we kept the 16-bit # depthHist = rgb # converting to uint8 # cv2.bitwise_or() # print(depth_img.dtype) # print(rgb_img.dtype) | 2.018207 | 2 |
synthetic_data/metrics/plots.py | rustamzh/synthetic_data | 8 | 6622183 | import os
import sys
import joblib
import numpy as np
import pickle as pkl
import pandas as pd
import seaborn as sns
import scipy.stats as stats
from sklearn import metrics
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from sklearn.utils import shuffle
from sklearn.decomposition import PCA as PCA
from sklearn.manifold import TSNE
from sklearn.neighbors import NearestNeighbors
class LossPlot():
"""
Uses `matplotlib` and `seaborn` to plot the test loss,
generator loss, discriminator loss across several epochs.
Parameters
----------
log_file : string, required
The pickle file with all the log values generated by
HealthGAN.
"""
def __init__(self, log_file):
if not os.path.exists('gen_data'):
os.makedirs('gen_data')
if not os.path.exists('gen_data/plots'):
os.makedirs('gen_data/plots')
try:
self.log = pkl.load(open(log_file, 'rb'))
except:
print("Please provide a correct pickle log file")
def plot(self, savefig=False):
"""
Plot the loss graph.
Parameters
----------
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
Produces a 8x8 figure for losses
"""
losses = ['test_loss', 'gen_loss', 'disc_loss', 'time']
titles = ['Test Loss', 'Generator Loss', 'Discriminator Loss', 'Time per Epoch']
pylab.rcParams['figure.figsize'] = 6, 6
try:
for i, loss in enumerate(losses):
j = i%2
if isinstance(self.log[loss][0], list):
new_df = pd.DataFrame({titles[i]: [v[-1] for v in self.log[loss]]})
else:
new_df = pd.DataFrame({titles[i]: self.log[loss]})
sns.lineplot(data=new_df, dashes=False, palette="hls")
plt.title(titles[i])
plt.xlabel('Epochs (in thousands)')
if (savefig):
plt.savefig('gen_data/plots/' + loss + '.png')
plt.show()
plt.close()
if (savefig):
print("Plots saved! Refer to the files 'time.png', test_loss.png', 'disc_loss.png' and 'gen_loss.png' inside 'gen_data/plots' folder.")
except:
print("Could not produce plots")
class MemInfPlot():
"""
Uses `matplotlib` and `seaborn` to plot the membership inference plot
Parameters
----------
train_file : string, required
The training file to be used for generating the membership inference plot.
test_file : string, required
The testing file to be used for generating the membership inference plot.
synth_file : string, required
The synthetic data file to be used for generating the membership inference plot.
name : string, required
A name for the plot.
"""
def __init__(self, train_file, test_file, synth_file, name):
if not os.path.exists('gen_data'):
os.makedirs('gen_data')
if not os.path.exists('gen_data/plots'):
os.makedirs('gen_data/plots')
data, labels = self.__create_shuffled_data(train_file, test_file)
self.fpr, self.tpr, self.auc = self.__compute_auc(synth_file, data, labels)
self.name = name
print("AUC = {}".format(self.auc))
def __create_shuffled_data(self, train_file, test_file):
# Read in train and test
train_set = pd.read_csv(train_file)
test_set = pd.read_csv(test_file)
# Create labels
label_train = np.empty(train_set.shape[0], dtype=int)
label_train.fill(-1)
label_test = np.empty(test_set.shape[0], dtype=int)
label_test.fill(1)
# Combine
labels = np.concatenate([label_train, label_test], axis=0)
data = pd.concat([train_set, test_set], axis=0)
data['labels'] = labels.tolist()
# Randomize
data = shuffle(data)
data, labels = (data.drop('labels', axis=1), data['labels'])
return data, labels
def __compute_auc(self, synth_file, data, labels):
synth_data = pd.read_csv(synth_file)
syn_dists = self.__nearest_neighbors(data, synth_data)
fpr, tpr, _ = metrics.roc_curve(labels, syn_dists)
roc_auc = metrics.auc(fpr, tpr)
return fpr, tpr, roc_auc
def __nearest_neighbors(self, t, s):
"""
Find nearest neighbors d_ts and d_ss
"""
# Fit to S
nn_s = NearestNeighbors(1, n_jobs=-1).fit(s)
# Find distances from t to s
d = nn_s.kneighbors(t)[0]
return d
def plot(self, savefig=False):
"""
The function plots the membership inference plot.
Parameters
----------
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
PCA Plot:
Plots the AUC curve and saves the file as
`membership_inference_auc_{name}.png`
"""
pylab.rcParams['figure.figsize'] = 6, 6
plt.title('Receiver Operating Characteristic', fontsize = 24)
plt.plot([0, 1], [0, 1], 'r--')
plt.plot(self.fpr, self.tpr, label=f'{self.name} AUC = {self.auc:0.2f}')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.ylabel('True Positive Rate', fontsize=18)
plt.xlabel('False Positive Rate', fontsize=18)
if (savefig):
plt.savefig(f'gen_data/membership_inference_auc_{self.name}.png')
plt.show()
if (savefig):
print(f"The plot has been saved as membership_inference_auc_{self.name}.png inside gen_data/plots.")
class ComponentPlots():
"""
Uses `matplotlib` and `seaborn` to plot PCA and TSNE plot
for real and synthetic data files.
"""
def __init__(self):
if not os.path.exists('gen_data'):
os.makedirs('gen_data')
if not os.path.exists('gen_data/plots'):
os.makedirs('gen_data/plots')
def pca_plot(self,
real_data,
synthetic_data=None,
title="Two Component PCA",
savefig=False):
"""
The function plots PCA between two components for
real and synthetic data.
Parameters
----------
real_data : str, required
The file which contains the real data.
synthetic_data : str, optional
The file which contains the synthetic data.
title: str, optional
The title of the plot.
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
PCA Plot:
Plots the PCA components for the two datasets and
save file with the given name followed by '_real_syn'.
"""
real_data = pd.read_csv(real_data)
if synthetic_data is not None:
synthetic_data = pd.read_csv(synthetic_data)
plt.style.use('seaborn-muted')
pylab.rcParams['figure.figsize'] = 8, 8
np.random.seed(1234)
flatui = ["#34495e", "#e74c3c"]
sns.set_palette(flatui)
pca_orig = PCA(2)
pca_orig_data = pca_orig.fit_transform(real_data)
plt.scatter(*pca_orig_data.T, alpha=.3)
plt.title(title, fontsize=24)
plt.xlabel('First Component', fontsize=16)
plt.ylabel('Second Component', fontsize=16)
if synthetic_data is not None:
pca_synth_data = pca_orig.transform(synthetic_data)
plt.scatter(*pca_synth_data.T, alpha=.4)
plt.legend(labels=['Original Data', 'Synthetic Data'])
if (savefig):
plt.savefig(f'gen_data/plots/{title}_real_syn.png')
plt.show()
if (savefig):
print(f"PCA Plot generated as {title}_real_syn.png inside gen_data/plots.")
else:
plt.legend(labels=['Original Data'])
if (savefig):
plt.savefig(f'gen_data/plots/{title}_real.png')
plt.show()
if (savefig):
print(f"PCA Plot generated as {title}_real.png inside gen_data/plots.")
def combined_pca(self,
real_data,
synthetic_datas,
names,
savefig=False):
"""
The function plots PCA between two components between
real data and several synthetic datasets.
Parameters
----------
real_data : str, required
The file which contains the real data.
synthetic_datas : list, required
The list of files that contain synthetic data (max 6).
names: list, required
The titles for each plot.
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
PCA Plots:
Plots the PCA components across a set of plots for each
of the synthetic data files.
"""
plt.style.use('seaborn-muted')
pylab.rcParams['figure.figsize'] = 8, 8
np.random.seed(1234)
flatui = ["#34495e", "#e74c3c"]
sns.set_palette(flatui)
real_data = pd.read_csv(real_data)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(
2, 3, sharey=True, sharex=True)
pca_orig = PCA(2)
pca_orig_data = pca_orig.fit_transform(real_data)
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
# plot orig data
for a in axes:
a.scatter(*pca_orig_data.T, alpha=.3)
pca_synth_data = []
for s in synthetic_datas:
s = pd.read_csv(s)
pca_synth_data.append(pca_orig.transform(s))
for i, a in enumerate(axes):
if i < len(pca_synth_data):
a.scatter(*(pca_synth_data[i]).T, alpha=.4)
a.set_title(names[i], fontsize=16)
fig.add_subplot(111, frameon=False)
# Hide tick and tick label of the big axes
plt.tick_params(labelcolor='none',
top='off',
bottom='off',
left='off',
right='off')
plt.grid(False)
plt.xlabel("First Component", fontsize=18)
plt.ylabel("Second Component", fontsize=18)
if (savefig):
plt.savefig(f'gen_data/plots/combined_pca.png')
plt.show()
if (savefig):
print(f"PCA Plot generated as combined_pca.png inside gen_data/plots.")
def combined_tsne(self,
real_data,
synthetic_datas,
names,
savefig=False):
"""
The function plots t-distributed Stochastic Neighbor Embedding
between two components for real and several synthetic datasets.
Parameters
----------
real_data : str, required
The file which contains the real data.
synthetic_datas : list, required
The list of files that contain synthetic data (max 6).
names: list, required
The titles for each plot.
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
PCA Plots:
Plots the PCA components across a set of plots for each
of the synthetic data files.
"""
plt.style.use('seaborn-muted')
pylab.rcParams['figure.figsize'] = 8, 8
np.random.seed(1234)
flatui = ["#34495e", "#e74c3c"]
sns.set_palette(flatui)
real_data = pd.read_csv(real_data)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(
2, 3, sharey=True, sharex=True)
tsne_orig = TSNE(n_components=2)
tsne_orig_data = tsne_orig.fit_transform(real_data)
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
# plot orig data
for a in axes:
a.scatter(*tsne_orig_data.T, alpha=.3)
tsne_synth_data = []
for s in synthetic_datas:
s = pd.read_csv(s)
tsne_synth_data.append(tsne_orig.fit_transform(s))
for i, a in enumerate(axes):
if i < len(tsne_synth_data):
a.scatter(*(tsne_synth_data[i]).T, alpha=.4)
a.set_title(names[i], fontsize=16)
fig.add_subplot(111, frameon=False)
# Hide tick and tick label of the big axes
plt.tick_params(labelcolor='none',
top='off',
bottom='off',
left='off',
right='off')
plt.grid(False)
plt.xlabel("First Component", fontsize=18)
plt.ylabel("Second Component", fontsize=18)
if (savefig):
plt.savefig(f'gen_data/plots/combined_tsne.png')
plt.show()
if (savefig):
print(f"PCA Plot generated as combined_tsne.png inside gen_data/plots.") | import os
import sys
import joblib
import numpy as np
import pickle as pkl
import pandas as pd
import seaborn as sns
import scipy.stats as stats
from sklearn import metrics
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from sklearn.utils import shuffle
from sklearn.decomposition import PCA as PCA
from sklearn.manifold import TSNE
from sklearn.neighbors import NearestNeighbors
class LossPlot():
"""
Uses `matplotlib` and `seaborn` to plot the test loss,
generator loss, discriminator loss across several epochs.
Parameters
----------
log_file : string, required
The pickle file with all the log values generated by
HealthGAN.
"""
def __init__(self, log_file):
if not os.path.exists('gen_data'):
os.makedirs('gen_data')
if not os.path.exists('gen_data/plots'):
os.makedirs('gen_data/plots')
try:
self.log = pkl.load(open(log_file, 'rb'))
except:
print("Please provide a correct pickle log file")
def plot(self, savefig=False):
"""
Plot the loss graph.
Parameters
----------
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
Produces a 8x8 figure for losses
"""
losses = ['test_loss', 'gen_loss', 'disc_loss', 'time']
titles = ['Test Loss', 'Generator Loss', 'Discriminator Loss', 'Time per Epoch']
pylab.rcParams['figure.figsize'] = 6, 6
try:
for i, loss in enumerate(losses):
j = i%2
if isinstance(self.log[loss][0], list):
new_df = pd.DataFrame({titles[i]: [v[-1] for v in self.log[loss]]})
else:
new_df = pd.DataFrame({titles[i]: self.log[loss]})
sns.lineplot(data=new_df, dashes=False, palette="hls")
plt.title(titles[i])
plt.xlabel('Epochs (in thousands)')
if (savefig):
plt.savefig('gen_data/plots/' + loss + '.png')
plt.show()
plt.close()
if (savefig):
print("Plots saved! Refer to the files 'time.png', test_loss.png', 'disc_loss.png' and 'gen_loss.png' inside 'gen_data/plots' folder.")
except:
print("Could not produce plots")
class MemInfPlot():
"""
Uses `matplotlib` and `seaborn` to plot the membership inference plot
Parameters
----------
train_file : string, required
The training file to be used for generating the membership inference plot.
test_file : string, required
The testing file to be used for generating the membership inference plot.
synth_file : string, required
The synthetic data file to be used for generating the membership inference plot.
name : string, required
A name for the plot.
"""
def __init__(self, train_file, test_file, synth_file, name):
if not os.path.exists('gen_data'):
os.makedirs('gen_data')
if not os.path.exists('gen_data/plots'):
os.makedirs('gen_data/plots')
data, labels = self.__create_shuffled_data(train_file, test_file)
self.fpr, self.tpr, self.auc = self.__compute_auc(synth_file, data, labels)
self.name = name
print("AUC = {}".format(self.auc))
def __create_shuffled_data(self, train_file, test_file):
# Read in train and test
train_set = pd.read_csv(train_file)
test_set = pd.read_csv(test_file)
# Create labels
label_train = np.empty(train_set.shape[0], dtype=int)
label_train.fill(-1)
label_test = np.empty(test_set.shape[0], dtype=int)
label_test.fill(1)
# Combine
labels = np.concatenate([label_train, label_test], axis=0)
data = pd.concat([train_set, test_set], axis=0)
data['labels'] = labels.tolist()
# Randomize
data = shuffle(data)
data, labels = (data.drop('labels', axis=1), data['labels'])
return data, labels
def __compute_auc(self, synth_file, data, labels):
synth_data = pd.read_csv(synth_file)
syn_dists = self.__nearest_neighbors(data, synth_data)
fpr, tpr, _ = metrics.roc_curve(labels, syn_dists)
roc_auc = metrics.auc(fpr, tpr)
return fpr, tpr, roc_auc
def __nearest_neighbors(self, t, s):
"""
Find nearest neighbors d_ts and d_ss
"""
# Fit to S
nn_s = NearestNeighbors(1, n_jobs=-1).fit(s)
# Find distances from t to s
d = nn_s.kneighbors(t)[0]
return d
def plot(self, savefig=False):
"""
The function plots the membership inference plot.
Parameters
----------
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
PCA Plot:
Plots the AUC curve and saves the file as
`membership_inference_auc_{name}.png`
"""
pylab.rcParams['figure.figsize'] = 6, 6
plt.title('Receiver Operating Characteristic', fontsize = 24)
plt.plot([0, 1], [0, 1], 'r--')
plt.plot(self.fpr, self.tpr, label=f'{self.name} AUC = {self.auc:0.2f}')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.ylabel('True Positive Rate', fontsize=18)
plt.xlabel('False Positive Rate', fontsize=18)
if (savefig):
plt.savefig(f'gen_data/membership_inference_auc_{self.name}.png')
plt.show()
if (savefig):
print(f"The plot has been saved as membership_inference_auc_{self.name}.png inside gen_data/plots.")
class ComponentPlots():
"""
Uses `matplotlib` and `seaborn` to plot PCA and TSNE plot
for real and synthetic data files.
"""
def __init__(self):
if not os.path.exists('gen_data'):
os.makedirs('gen_data')
if not os.path.exists('gen_data/plots'):
os.makedirs('gen_data/plots')
def pca_plot(self,
real_data,
synthetic_data=None,
title="Two Component PCA",
savefig=False):
"""
The function plots PCA between two components for
real and synthetic data.
Parameters
----------
real_data : str, required
The file which contains the real data.
synthetic_data : str, optional
The file which contains the synthetic data.
title: str, optional
The title of the plot.
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
PCA Plot:
Plots the PCA components for the two datasets and
save file with the given name followed by '_real_syn'.
"""
real_data = pd.read_csv(real_data)
if synthetic_data is not None:
synthetic_data = pd.read_csv(synthetic_data)
plt.style.use('seaborn-muted')
pylab.rcParams['figure.figsize'] = 8, 8
np.random.seed(1234)
flatui = ["#34495e", "#e74c3c"]
sns.set_palette(flatui)
pca_orig = PCA(2)
pca_orig_data = pca_orig.fit_transform(real_data)
plt.scatter(*pca_orig_data.T, alpha=.3)
plt.title(title, fontsize=24)
plt.xlabel('First Component', fontsize=16)
plt.ylabel('Second Component', fontsize=16)
if synthetic_data is not None:
pca_synth_data = pca_orig.transform(synthetic_data)
plt.scatter(*pca_synth_data.T, alpha=.4)
plt.legend(labels=['Original Data', 'Synthetic Data'])
if (savefig):
plt.savefig(f'gen_data/plots/{title}_real_syn.png')
plt.show()
if (savefig):
print(f"PCA Plot generated as {title}_real_syn.png inside gen_data/plots.")
else:
plt.legend(labels=['Original Data'])
if (savefig):
plt.savefig(f'gen_data/plots/{title}_real.png')
plt.show()
if (savefig):
print(f"PCA Plot generated as {title}_real.png inside gen_data/plots.")
def combined_pca(self,
real_data,
synthetic_datas,
names,
savefig=False):
"""
The function plots PCA between two components between
real data and several synthetic datasets.
Parameters
----------
real_data : str, required
The file which contains the real data.
synthetic_datas : list, required
The list of files that contain synthetic data (max 6).
names: list, required
The titles for each plot.
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
PCA Plots:
Plots the PCA components across a set of plots for each
of the synthetic data files.
"""
plt.style.use('seaborn-muted')
pylab.rcParams['figure.figsize'] = 8, 8
np.random.seed(1234)
flatui = ["#34495e", "#e74c3c"]
sns.set_palette(flatui)
real_data = pd.read_csv(real_data)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(
2, 3, sharey=True, sharex=True)
pca_orig = PCA(2)
pca_orig_data = pca_orig.fit_transform(real_data)
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
# plot orig data
for a in axes:
a.scatter(*pca_orig_data.T, alpha=.3)
pca_synth_data = []
for s in synthetic_datas:
s = pd.read_csv(s)
pca_synth_data.append(pca_orig.transform(s))
for i, a in enumerate(axes):
if i < len(pca_synth_data):
a.scatter(*(pca_synth_data[i]).T, alpha=.4)
a.set_title(names[i], fontsize=16)
fig.add_subplot(111, frameon=False)
# Hide tick and tick label of the big axes
plt.tick_params(labelcolor='none',
top='off',
bottom='off',
left='off',
right='off')
plt.grid(False)
plt.xlabel("First Component", fontsize=18)
plt.ylabel("Second Component", fontsize=18)
if (savefig):
plt.savefig(f'gen_data/plots/combined_pca.png')
plt.show()
if (savefig):
print(f"PCA Plot generated as combined_pca.png inside gen_data/plots.")
def combined_tsne(self,
real_data,
synthetic_datas,
names,
savefig=False):
"""
The function plots t-distributed Stochastic Neighbor Embedding
between two components for real and several synthetic datasets.
Parameters
----------
real_data : str, required
The file which contains the real data.
synthetic_datas : list, required
The list of files that contain synthetic data (max 6).
names: list, required
The titles for each plot.
savefig: boolean, optional
If set to True, the plots generated will be saved to disk.
Outputs
-------
PCA Plots:
Plots the PCA components across a set of plots for each
of the synthetic data files.
"""
plt.style.use('seaborn-muted')
pylab.rcParams['figure.figsize'] = 8, 8
np.random.seed(1234)
flatui = ["#34495e", "#e74c3c"]
sns.set_palette(flatui)
real_data = pd.read_csv(real_data)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(
2, 3, sharey=True, sharex=True)
tsne_orig = TSNE(n_components=2)
tsne_orig_data = tsne_orig.fit_transform(real_data)
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
# plot orig data
for a in axes:
a.scatter(*tsne_orig_data.T, alpha=.3)
tsne_synth_data = []
for s in synthetic_datas:
s = pd.read_csv(s)
tsne_synth_data.append(tsne_orig.fit_transform(s))
for i, a in enumerate(axes):
if i < len(tsne_synth_data):
a.scatter(*(tsne_synth_data[i]).T, alpha=.4)
a.set_title(names[i], fontsize=16)
fig.add_subplot(111, frameon=False)
# Hide tick and tick label of the big axes
plt.tick_params(labelcolor='none',
top='off',
bottom='off',
left='off',
right='off')
plt.grid(False)
plt.xlabel("First Component", fontsize=18)
plt.ylabel("Second Component", fontsize=18)
if (savefig):
plt.savefig(f'gen_data/plots/combined_tsne.png')
plt.show()
if (savefig):
print(f"PCA Plot generated as combined_tsne.png inside gen_data/plots.") | en | 0.720266 | Uses `matplotlib` and `seaborn` to plot the test loss, generator loss, discriminator loss across several epochs. Parameters ---------- log_file : string, required The pickle file with all the log values generated by HealthGAN. Plot the loss graph. Parameters ---------- savefig: boolean, optional If set to True, the plots generated will be saved to disk. Outputs ------- Produces a 8x8 figure for losses Uses `matplotlib` and `seaborn` to plot the membership inference plot Parameters ---------- train_file : string, required The training file to be used for generating the membership inference plot. test_file : string, required The testing file to be used for generating the membership inference plot. synth_file : string, required The synthetic data file to be used for generating the membership inference plot. name : string, required A name for the plot. # Read in train and test # Create labels # Combine # Randomize Find nearest neighbors d_ts and d_ss # Fit to S # Find distances from t to s The function plots the membership inference plot. Parameters ---------- savefig: boolean, optional If set to True, the plots generated will be saved to disk. Outputs ------- PCA Plot: Plots the AUC curve and saves the file as `membership_inference_auc_{name}.png` Uses `matplotlib` and `seaborn` to plot PCA and TSNE plot for real and synthetic data files. The function plots PCA between two components for real and synthetic data. Parameters ---------- real_data : str, required The file which contains the real data. synthetic_data : str, optional The file which contains the synthetic data. title: str, optional The title of the plot. savefig: boolean, optional If set to True, the plots generated will be saved to disk. Outputs ------- PCA Plot: Plots the PCA components for the two datasets and save file with the given name followed by '_real_syn'. The function plots PCA between two components between real data and several synthetic datasets. Parameters ---------- real_data : str, required The file which contains the real data. synthetic_datas : list, required The list of files that contain synthetic data (max 6). names: list, required The titles for each plot. savefig: boolean, optional If set to True, the plots generated will be saved to disk. Outputs ------- PCA Plots: Plots the PCA components across a set of plots for each of the synthetic data files. # plot orig data # Hide tick and tick label of the big axes The function plots t-distributed Stochastic Neighbor Embedding between two components for real and several synthetic datasets. Parameters ---------- real_data : str, required The file which contains the real data. synthetic_datas : list, required The list of files that contain synthetic data (max 6). names: list, required The titles for each plot. savefig: boolean, optional If set to True, the plots generated will be saved to disk. Outputs ------- PCA Plots: Plots the PCA components across a set of plots for each of the synthetic data files. # plot orig data # Hide tick and tick label of the big axes | 2.415501 | 2 |
fcmpy/expert_fcm/defuzz.py | maxiuw/FcmBci | 5 | 6622184 | from abc import ABC, abstractclassmethod
import skfuzzy as fuzz
from fcmpy.expert_fcm.input_validator import type_check
class Defuzzification(ABC):
"""
Defuzzification methods.
"""
@abstractclassmethod
def defuzz() -> float:
raise NotImplementedError('defuzzification method is not defined!')
class Centroid(Defuzzification):
"""
Centroid difuzzification method (i.e., center of gravity).
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
Centroid difuzzification method (i.e., center of gravity).
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method)
class Bisector(Defuzzification):
"""
Bisector difuzzification method.
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
Bisector difuzzification method.
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method)
class MeanOfMax(Defuzzification):
"""
MeanOfMax difuzzification method.
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
MeanOfMax difuzzification method.
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method)
class MinOfMax(Defuzzification):
"""
MinOfMax difuzzification method.
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
MinOfMax difuzzification method.
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method)
class MaxOfMax(Defuzzification):
"""
MaxOfMax difuzzification method.
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
MaxOfMax difuzzification method.
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method) | from abc import ABC, abstractclassmethod
import skfuzzy as fuzz
from fcmpy.expert_fcm.input_validator import type_check
class Defuzzification(ABC):
"""
Defuzzification methods.
"""
@abstractclassmethod
def defuzz() -> float:
raise NotImplementedError('defuzzification method is not defined!')
class Centroid(Defuzzification):
"""
Centroid difuzzification method (i.e., center of gravity).
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
Centroid difuzzification method (i.e., center of gravity).
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method)
class Bisector(Defuzzification):
"""
Bisector difuzzification method.
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
Bisector difuzzification method.
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method)
class MeanOfMax(Defuzzification):
"""
MeanOfMax difuzzification method.
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
MeanOfMax difuzzification method.
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method)
class MinOfMax(Defuzzification):
"""
MinOfMax difuzzification method.
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
MinOfMax difuzzification method.
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method)
class MaxOfMax(Defuzzification):
"""
MaxOfMax difuzzification method.
"""
@staticmethod
@type_check
def defuzz(**kwargs) -> float:
"""
MaxOfMax difuzzification method.
Other Parameters
----------
**x: numpy.ndarray
universe of discourse
**mfx: numpy.ndarray,
"aggregated" membership functions
Return
-------
y: float
defuzzified value
"""
method = kwargs['method']
x = kwargs['x']
mfx = kwargs['mfx']
return fuzz.defuzz(x, mfx, method) | en | 0.452967 | Defuzzification methods. Centroid difuzzification method (i.e., center of gravity). Centroid difuzzification method (i.e., center of gravity). Other Parameters ---------- **x: numpy.ndarray universe of discourse **mfx: numpy.ndarray, "aggregated" membership functions Return ------- y: float defuzzified value Bisector difuzzification method. Bisector difuzzification method. Other Parameters ---------- **x: numpy.ndarray universe of discourse **mfx: numpy.ndarray, "aggregated" membership functions Return ------- y: float defuzzified value MeanOfMax difuzzification method. MeanOfMax difuzzification method. Other Parameters ---------- **x: numpy.ndarray universe of discourse **mfx: numpy.ndarray, "aggregated" membership functions Return ------- y: float defuzzified value MinOfMax difuzzification method. MinOfMax difuzzification method. Other Parameters ---------- **x: numpy.ndarray universe of discourse **mfx: numpy.ndarray, "aggregated" membership functions Return ------- y: float defuzzified value MaxOfMax difuzzification method. MaxOfMax difuzzification method. Other Parameters ---------- **x: numpy.ndarray universe of discourse **mfx: numpy.ndarray, "aggregated" membership functions Return ------- y: float defuzzified value | 2.835195 | 3 |
models/ivae/mnist.py | lim0606/pytorch-ardae-vae | 11 | 6622185 | import math
import numpy as np
from scipy import stats
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from torch.distributions import MultivariateNormal
from models.layers import Identity, MLP, WNMLP, ContextConcatMLP, ContextScaleMLP, ContextWNScaleMLP, ContextSPScaleMLP, ContextSPWNScaleMLP, ContextBilinearMLP, ContextWNBilinearMLP, ContextSWNBilinearMLP, ContextResMLP
from models.reparam import BernoulliDistributionLinear
from utils import loss_recon_bernoulli_with_logit, normal_energy_func
from utils import logprob_gaussian, get_covmat
from utils import get_nonlinear_func
from utils import expand_tensor
from utils import cond_jac_clamping_loss
def weight_init(m):
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
#torch.nn.init.xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
def sample_gaussian(mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + std * eps
def convert_2d_3d_tensor(input, sample_size):
assert input.dim() == 2
input_expanded, _ = expand_tensor(input, sample_size, do_unsqueeze=True)
return input_expanded
class Encoder(nn.Module):
def __init__(self,
input_dim=2,
noise_dim=2,
h_dim=64,
z_dim=2,
nonlinearity='softplus',
num_hidden_layers=1,
std=1.,
init='none', #'gaussian',
enc_noise=False, #True,
):
super().__init__()
self.input_dim = input_dim
self.noise_dim = noise_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.std = std
self.init = init
self.enc_noise = enc_noise
#ctx_dim = noise_dim if not enc_noise else h_dim
#self.inp_encode = MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
#self.nos_encode = Identity() if not enc_noise \
# else MLP(input_dim=noise_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
#self.fc = ContextConcatMLP(input_dim=h_dim, context_dim=ctx_dim, hidden_dim=h_dim, output_dim=z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=False)
def reset_parameters(self):
raise NotImplementedError
def sample_noise(self, batch_size, std=None, device=None):
std = std if std is not None else self.std
device = device if device is not None else next(self.parameters).device
eps = torch.randn(batch_size, self.noise_dim).to(device)
return std * eps
def _forward_inp(self, x):
batch_size = x.size(0)
x = x.view(batch_size, self.input_dim)
# rescale
x = 2*x -1
# enc
inp = self.inp_encode(x)
return inp
def _forward_nos(self, batch_size=None, noise=None, std=None, device=None):
assert batch_size is not None or noise is not None
if noise is None:
noise = self.sample_noise(batch_size, std=std, device=device)
# enc
nos = self.nos_encode(noise)
return nos
def _forward_all(self, inp, nos):
raise NotImplementedError
return z
def forward(self, x, noise=None, std=None, nz=1):
batch_size = x.size(0)
if noise is None:
noise = self.sample_noise(batch_size*nz, std=std, device=x.device)
else:
assert noise.size(0) == batch_size*nz
assert noise.size(1) == self.noise_dim
# enc
nos = self._forward_nos(noise=noise, std=std, device=x.device)
inp = self._forward_inp(x)
# view
inp = inp.unsqueeze(1).expand(-1, nz, -1).contiguous()
inp = inp.view(batch_size*nz, -1)
# forward
z = self._forward_all(inp, nos)
return z.view(batch_size, nz, -1)
class ConcatEncoder(Encoder):
def __init__(self,
input_dim=2,
noise_dim=2,
h_dim=64,
z_dim=2,
nonlinearity='softplus',
num_hidden_layers=1,
std=1.,
init='none', #'gaussian',
enc_noise=False,
):
super().__init__(
input_dim = input_dim,
noise_dim = noise_dim,
h_dim = h_dim,
z_dim = z_dim,
nonlinearity = nonlinearity,
num_hidden_layers = num_hidden_layers,
std = std,
init = init,
enc_noise = enc_noise,
)
nos_dim = noise_dim if not enc_noise else h_dim
self.inp_encode = MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=True)
self.nos_encode = Identity() if not enc_noise \
else MLP(input_dim=noise_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=0, use_nonlinearity_output=True)
self.fc = MLP(input_dim=h_dim+nos_dim, hidden_dim=h_dim, output_dim=z_dim, nonlinearity=nonlinearity, num_hidden_layers=1, use_nonlinearity_output=False)
if self.init == 'gaussian':
self.reset_parameters()
else:
pass
def reset_parameters(self):
nn.init.normal_(self.fc.fc.weight)
def _forward_all(self, inp, nos):
#z = self.fc(inp, nos)
inp_nos = torch.cat([inp, nos], dim=1)
z = self.fc(inp_nos)
return z
class Decoder(nn.Module):
def __init__(self,
input_dim=784,
h_dim=300,
z_dim=32,
nonlinearity='softplus',
num_hidden_layers=1,
):
super().__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.main = MLP(input_dim=z_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=True)
self.reparam = BernoulliDistributionLinear(h_dim, input_dim)
def sample(self, logit):
return self.reparam.sample_logistic_sigmoid(logit)
def forward(self, z):
batch_size = z.size(0)
z = z.view(batch_size, -1)
# forward
h = self.main(z)
logit = self.reparam(h)
# sample
x = self.sample(logit)
return x, logit
class ImplicitPosteriorVAE(nn.Module):
def __init__(self,
energy_func=normal_energy_func,
input_dim=784,
noise_dim=100,
h_dim=300,
z_dim=32,
nonlinearity='softplus',
num_hidden_layers=1,
init='gaussian',
enc_type='concat',
):
super().__init__()
self.energy_func = energy_func
self.input_dim = input_dim
self.noise_dim = noise_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.latent_dim = z_dim # for ais
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.init = init
self.enc_type = enc_type
assert enc_type in ['concat']
if enc_type == 'concat':
self.encode = ConcatEncoder(input_dim, noise_dim, h_dim, z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers+1, init=init)
else:
raise NotImplementedError
self.decode = Decoder(input_dim, h_dim, z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers)
self.reset_parameters()
def reset_parameters(self):
#self.apply(weight_init)
self.decode.apply(weight_init)
#torch.nn.init.constant_(self.decode.reparam.logit_fn.bias, -5)
if self.init == 'gaussian':
self.encode.reset_parameters()
def loss(self, z, logit_x, target_x, beta=1.0):
# loss from energy func
prior_loss = self.energy_func(z.view(-1, self.z_dim))
# recon loss (neg likelihood): -log p(x|z)
recon_loss = loss_recon_bernoulli_with_logit(logit_x, target_x.view(-1, self.input_dim), do_sum=False)
# add loss
loss = recon_loss + beta*prior_loss
return loss.mean(), recon_loss.mean(), prior_loss.mean()
def jac_clamping_loss(self, input, z, eps, std, nz, eta_min, p=2, EPS=1.):
raise NotImplementedError
def forward_hidden(self, input, std=None, nz=1):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
# gen noise source
eps = self.encode.sample_noise(batch_size*nz, std=std, device=input.device)
# sample z
z = self.encode(input, noise=eps, std=std, nz=nz)
return z
def forward(self, input, beta=1.0, eta=0.0, lmbd=0.0, std=None, nz=1):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
input_expanded = convert_2d_3d_tensor(input, sample_size=nz)
input_expanded_flattened = input_expanded.view(batch_size*nz, -1)
#target = input.unsqueeze(1).expand(-1, nz, -1).contiguous().view(batch_size*nz, -1)
# gen noise source
eps = self.encode.sample_noise(batch_size*nz, std=std, device=input.device)
# sample z
z = self.encode(input, noise=eps, std=std, nz=nz)
# z flattten
z_flattened = z.view(batch_size*nz, -1)
# decode
x, logit_x = self.decode(z_flattened)
# loss
if lmbd > 0:
raise NotImplementedError
jaclmp_loss = lmbd*self.jac_clamping_loss(input, z, eps, std=std, nz=nz, eta_min=eta)
else:
jaclmp_loss = 0
loss, recon_loss, prior_loss = self.loss(
z_flattened,
logit_x, input_expanded_flattened,
beta=beta,
)
loss += jaclmp_loss
# return
return x, torch.sigmoid(logit_x), z, loss, recon_loss.detach(), prior_loss.detach()
def generate(self, batch_size=1):
# init mu_z and logvar_z (as unit normal dist)
weight = next(self.parameters())
mu_z = weight.new_zeros(batch_size, self.z_dim)
logvar_z = weight.new_zeros(batch_size, self.z_dim)
# sample z (from unit normal dist)
z = sample_gaussian(mu_z, logvar_z) # sample z
# decode
output, logit_x = self.decode(z)
# return
return output, torch.sigmoid(logit_x), z
def logprob(self, input, sample_size=128, z=None, std=None):
return self.logprob_w_cov_gaussian_posterior(input, sample_size, z, std)
def logprob_w_kde_posterior(self, input, sample_size=128, z=None, std=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
assert sample_size >= 2*self.z_dim
''' get z and pseudo log q(newz|x) '''
z, newz = [], []
logposterior = []
inp = self.encode._forward_inp(input).detach()
for i in range(batch_size):
_inp = inp[i:i+1, :].expand(sample_size, inp.size(1))
_nos = self.encode._forward_nos(sample_size, std=std, device=input.device).detach()
_z = self.encode._forward_all(_inp, _nos) # ssz x zdim
z += [_z.detach().unsqueeze(0)]
z = torch.cat(z, dim=0) # bsz x ssz x zdim
for i in range(batch_size):
_z = z[i, :, :].cpu().numpy().T # zdim x ssz
kernel = stats.gaussian_kde(_z)
_newz = kernel.resample(sample_size) # zdim x ssz
_logposterior = kernel.logpdf(_newz) # ssz
_newz = torch.from_numpy(_newz.T).float().to(input.device) # ssz x zdim
_logposterior = torch.from_numpy(_logposterior).float().to(input.device) # ssz
newz += [_newz.unsqueeze(0)]
logposterior += [_logposterior.unsqueeze(0)]
newz = torch.cat(newz, dim=0) # bsz x ssz x zdim
logposterior = torch.cat(logposterior, dim=0) # bsz x ssz
''' get log p(z) '''
# get prior (as unit normal dist)
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logprior = logprob_gaussian(mu_pz, logvar_pz, newz, do_unsqueeze=False, do_mean=False)
logprior = torch.sum(logprior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(x|z) '''
# decode
logit_x = []
#for i in range(sample_size):
for i in range(batch_size):
_, _logit_x = self.decode(newz[i, :, :]) # ssz x zdim
logit_x += [_logit_x.detach().unsqueeze(0)]
logit_x = torch.cat(logit_x, dim=0) # bsz x ssz x input_dim
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x|z)p(z)/q(z|x) '''
logprob = loglikelihood + logprior - logposterior # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob - logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
def logprob_w_cov_gaussian_posterior(self, input, sample_size=128, z=None, std=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
assert sample_size >= 2*self.z_dim
''' get z and pseudo log q(newz|x) '''
z, newz = [], []
#cov_qz, rv_z = [], []
logposterior = []
inp = self.encode._forward_inp(input).detach()
#for i in range(sample_size):
for i in range(batch_size):
_inp = inp[i:i+1, :].expand(sample_size, inp.size(1))
_nos = self.encode._forward_nos(batch_size=sample_size, std=std, device=input.device).detach()
_z = self.encode._forward_all(_inp, _nos) # ssz x zdim
z += [_z.detach().unsqueeze(0)]
z = torch.cat(z, dim=0) # bsz x ssz x zdim
mu_qz = torch.mean(z, dim=1) # bsz x zdim
for i in range(batch_size):
_cov_qz = get_covmat(z[i, :, :])
_rv_z = MultivariateNormal(mu_qz[i], _cov_qz)
_newz = _rv_z.rsample(torch.Size([1, sample_size]))
_logposterior = _rv_z.log_prob(_newz)
#cov_qz += [_cov_qz.unsqueeze(0)]
#rv_z += [_rv_z]
newz += [_newz]
logposterior += [_logposterior]
#cov_qz = torch.cat(cov_qz, dim=0) # bsz x zdim x zdim
newz = torch.cat(newz, dim=0) # bsz x ssz x zdim
logposterior = torch.cat(logposterior, dim=0) # bsz x ssz
''' get log p(z) '''
# get prior (as unit normal dist)
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logprior = logprob_gaussian(mu_pz, logvar_pz, newz, do_unsqueeze=False, do_mean=False)
logprior = torch.sum(logprior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(x|z) '''
# decode
logit_x = []
#for i in range(sample_size):
for i in range(batch_size):
_, _logit_x = self.decode(newz[i, :, :]) # ssz x zdim
logit_x += [_logit_x.detach().unsqueeze(0)]
logit_x = torch.cat(logit_x, dim=0) # bsz x ssz x input_dim
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x|z)p(z)/q(z|x) '''
logprob = loglikelihood + logprior - logposterior # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob - logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
def logprob_w_diag_gaussian_posterior(self, input, sample_size=128, z=None, std=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
''' get z '''
z = []
for i in range(sample_size):
_z = self.encode(input, std=std)
_z_flattened = _z.view(_z.size(1)*_z.size(2), -1)
z += [_z_flattened.detach().unsqueeze(1)]
z = torch.cat(z, dim=1) # bsz x ssz x zdim
mu_qz = torch.mean(z, dim=1)
logvar_qz = torch.log(torch.var(z, dim=1) + 1e-10)
''' get pseudo log q(z|x) '''
mu_qz = mu_qz.detach().repeat(1, sample_size).view(batch_size, sample_size, self.z_dim)
logvar_qz = logvar_qz.detach().repeat(1, sample_size).view(batch_size, sample_size, self.z_dim)
newz = sample_gaussian(mu_qz, logvar_qz)
logposterior = logprob_gaussian(mu_qz, logvar_qz, newz, do_unsqueeze=False, do_mean=False)
logposterior = torch.sum(logposterior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(z) '''
# get prior (as unit normal dist)
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logprior = logprob_gaussian(mu_pz, logvar_pz, newz, do_unsqueeze=False, do_mean=False)
logprior = torch.sum(logprior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(x|z) '''
# decode
logit_x = []
for i in range(sample_size):
_, _logit_x = self.decode(newz[:, i, :])
logit_x += [_logit_x.detach().unsqueeze(1)]
logit_x = torch.cat(logit_x, dim=1) # bsz x ssz x input_dim
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x|z)p(z)/q(z|x) '''
logprob = loglikelihood + logprior - logposterior # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob - logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
def logprob_w_prior(self, input, sample_size=128, z=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
''' get z samples from p(z) '''
# get prior (as unit normal dist)
if z is None:
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
z = sample_gaussian(mu_pz, logvar_pz) # sample z
''' get log p(x|z) '''
# decode
logit_x = []
for i in range(sample_size):
_, _logit_x = self.decode(z[:, i, :])
logit_x += [_logit_x.detach().unsqueeze(1)]
logit_x = torch.cat(logit_x, dim=1) # bsz x ssz x input_dim
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x) '''
logprob = loglikelihood # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob-logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
| import math
import numpy as np
from scipy import stats
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from torch.distributions import MultivariateNormal
from models.layers import Identity, MLP, WNMLP, ContextConcatMLP, ContextScaleMLP, ContextWNScaleMLP, ContextSPScaleMLP, ContextSPWNScaleMLP, ContextBilinearMLP, ContextWNBilinearMLP, ContextSWNBilinearMLP, ContextResMLP
from models.reparam import BernoulliDistributionLinear
from utils import loss_recon_bernoulli_with_logit, normal_energy_func
from utils import logprob_gaussian, get_covmat
from utils import get_nonlinear_func
from utils import expand_tensor
from utils import cond_jac_clamping_loss
def weight_init(m):
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
#torch.nn.init.xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
def sample_gaussian(mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + std * eps
def convert_2d_3d_tensor(input, sample_size):
assert input.dim() == 2
input_expanded, _ = expand_tensor(input, sample_size, do_unsqueeze=True)
return input_expanded
class Encoder(nn.Module):
def __init__(self,
input_dim=2,
noise_dim=2,
h_dim=64,
z_dim=2,
nonlinearity='softplus',
num_hidden_layers=1,
std=1.,
init='none', #'gaussian',
enc_noise=False, #True,
):
super().__init__()
self.input_dim = input_dim
self.noise_dim = noise_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.std = std
self.init = init
self.enc_noise = enc_noise
#ctx_dim = noise_dim if not enc_noise else h_dim
#self.inp_encode = MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
#self.nos_encode = Identity() if not enc_noise \
# else MLP(input_dim=noise_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
#self.fc = ContextConcatMLP(input_dim=h_dim, context_dim=ctx_dim, hidden_dim=h_dim, output_dim=z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=False)
def reset_parameters(self):
raise NotImplementedError
def sample_noise(self, batch_size, std=None, device=None):
std = std if std is not None else self.std
device = device if device is not None else next(self.parameters).device
eps = torch.randn(batch_size, self.noise_dim).to(device)
return std * eps
def _forward_inp(self, x):
batch_size = x.size(0)
x = x.view(batch_size, self.input_dim)
# rescale
x = 2*x -1
# enc
inp = self.inp_encode(x)
return inp
def _forward_nos(self, batch_size=None, noise=None, std=None, device=None):
assert batch_size is not None or noise is not None
if noise is None:
noise = self.sample_noise(batch_size, std=std, device=device)
# enc
nos = self.nos_encode(noise)
return nos
def _forward_all(self, inp, nos):
raise NotImplementedError
return z
def forward(self, x, noise=None, std=None, nz=1):
batch_size = x.size(0)
if noise is None:
noise = self.sample_noise(batch_size*nz, std=std, device=x.device)
else:
assert noise.size(0) == batch_size*nz
assert noise.size(1) == self.noise_dim
# enc
nos = self._forward_nos(noise=noise, std=std, device=x.device)
inp = self._forward_inp(x)
# view
inp = inp.unsqueeze(1).expand(-1, nz, -1).contiguous()
inp = inp.view(batch_size*nz, -1)
# forward
z = self._forward_all(inp, nos)
return z.view(batch_size, nz, -1)
class ConcatEncoder(Encoder):
def __init__(self,
input_dim=2,
noise_dim=2,
h_dim=64,
z_dim=2,
nonlinearity='softplus',
num_hidden_layers=1,
std=1.,
init='none', #'gaussian',
enc_noise=False,
):
super().__init__(
input_dim = input_dim,
noise_dim = noise_dim,
h_dim = h_dim,
z_dim = z_dim,
nonlinearity = nonlinearity,
num_hidden_layers = num_hidden_layers,
std = std,
init = init,
enc_noise = enc_noise,
)
nos_dim = noise_dim if not enc_noise else h_dim
self.inp_encode = MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=True)
self.nos_encode = Identity() if not enc_noise \
else MLP(input_dim=noise_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=0, use_nonlinearity_output=True)
self.fc = MLP(input_dim=h_dim+nos_dim, hidden_dim=h_dim, output_dim=z_dim, nonlinearity=nonlinearity, num_hidden_layers=1, use_nonlinearity_output=False)
if self.init == 'gaussian':
self.reset_parameters()
else:
pass
def reset_parameters(self):
nn.init.normal_(self.fc.fc.weight)
def _forward_all(self, inp, nos):
#z = self.fc(inp, nos)
inp_nos = torch.cat([inp, nos], dim=1)
z = self.fc(inp_nos)
return z
class Decoder(nn.Module):
def __init__(self,
input_dim=784,
h_dim=300,
z_dim=32,
nonlinearity='softplus',
num_hidden_layers=1,
):
super().__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.main = MLP(input_dim=z_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=True)
self.reparam = BernoulliDistributionLinear(h_dim, input_dim)
def sample(self, logit):
return self.reparam.sample_logistic_sigmoid(logit)
def forward(self, z):
batch_size = z.size(0)
z = z.view(batch_size, -1)
# forward
h = self.main(z)
logit = self.reparam(h)
# sample
x = self.sample(logit)
return x, logit
class ImplicitPosteriorVAE(nn.Module):
def __init__(self,
energy_func=normal_energy_func,
input_dim=784,
noise_dim=100,
h_dim=300,
z_dim=32,
nonlinearity='softplus',
num_hidden_layers=1,
init='gaussian',
enc_type='concat',
):
super().__init__()
self.energy_func = energy_func
self.input_dim = input_dim
self.noise_dim = noise_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.latent_dim = z_dim # for ais
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.init = init
self.enc_type = enc_type
assert enc_type in ['concat']
if enc_type == 'concat':
self.encode = ConcatEncoder(input_dim, noise_dim, h_dim, z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers+1, init=init)
else:
raise NotImplementedError
self.decode = Decoder(input_dim, h_dim, z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers)
self.reset_parameters()
def reset_parameters(self):
#self.apply(weight_init)
self.decode.apply(weight_init)
#torch.nn.init.constant_(self.decode.reparam.logit_fn.bias, -5)
if self.init == 'gaussian':
self.encode.reset_parameters()
def loss(self, z, logit_x, target_x, beta=1.0):
# loss from energy func
prior_loss = self.energy_func(z.view(-1, self.z_dim))
# recon loss (neg likelihood): -log p(x|z)
recon_loss = loss_recon_bernoulli_with_logit(logit_x, target_x.view(-1, self.input_dim), do_sum=False)
# add loss
loss = recon_loss + beta*prior_loss
return loss.mean(), recon_loss.mean(), prior_loss.mean()
def jac_clamping_loss(self, input, z, eps, std, nz, eta_min, p=2, EPS=1.):
raise NotImplementedError
def forward_hidden(self, input, std=None, nz=1):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
# gen noise source
eps = self.encode.sample_noise(batch_size*nz, std=std, device=input.device)
# sample z
z = self.encode(input, noise=eps, std=std, nz=nz)
return z
def forward(self, input, beta=1.0, eta=0.0, lmbd=0.0, std=None, nz=1):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
input_expanded = convert_2d_3d_tensor(input, sample_size=nz)
input_expanded_flattened = input_expanded.view(batch_size*nz, -1)
#target = input.unsqueeze(1).expand(-1, nz, -1).contiguous().view(batch_size*nz, -1)
# gen noise source
eps = self.encode.sample_noise(batch_size*nz, std=std, device=input.device)
# sample z
z = self.encode(input, noise=eps, std=std, nz=nz)
# z flattten
z_flattened = z.view(batch_size*nz, -1)
# decode
x, logit_x = self.decode(z_flattened)
# loss
if lmbd > 0:
raise NotImplementedError
jaclmp_loss = lmbd*self.jac_clamping_loss(input, z, eps, std=std, nz=nz, eta_min=eta)
else:
jaclmp_loss = 0
loss, recon_loss, prior_loss = self.loss(
z_flattened,
logit_x, input_expanded_flattened,
beta=beta,
)
loss += jaclmp_loss
# return
return x, torch.sigmoid(logit_x), z, loss, recon_loss.detach(), prior_loss.detach()
def generate(self, batch_size=1):
# init mu_z and logvar_z (as unit normal dist)
weight = next(self.parameters())
mu_z = weight.new_zeros(batch_size, self.z_dim)
logvar_z = weight.new_zeros(batch_size, self.z_dim)
# sample z (from unit normal dist)
z = sample_gaussian(mu_z, logvar_z) # sample z
# decode
output, logit_x = self.decode(z)
# return
return output, torch.sigmoid(logit_x), z
def logprob(self, input, sample_size=128, z=None, std=None):
return self.logprob_w_cov_gaussian_posterior(input, sample_size, z, std)
def logprob_w_kde_posterior(self, input, sample_size=128, z=None, std=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
assert sample_size >= 2*self.z_dim
''' get z and pseudo log q(newz|x) '''
z, newz = [], []
logposterior = []
inp = self.encode._forward_inp(input).detach()
for i in range(batch_size):
_inp = inp[i:i+1, :].expand(sample_size, inp.size(1))
_nos = self.encode._forward_nos(sample_size, std=std, device=input.device).detach()
_z = self.encode._forward_all(_inp, _nos) # ssz x zdim
z += [_z.detach().unsqueeze(0)]
z = torch.cat(z, dim=0) # bsz x ssz x zdim
for i in range(batch_size):
_z = z[i, :, :].cpu().numpy().T # zdim x ssz
kernel = stats.gaussian_kde(_z)
_newz = kernel.resample(sample_size) # zdim x ssz
_logposterior = kernel.logpdf(_newz) # ssz
_newz = torch.from_numpy(_newz.T).float().to(input.device) # ssz x zdim
_logposterior = torch.from_numpy(_logposterior).float().to(input.device) # ssz
newz += [_newz.unsqueeze(0)]
logposterior += [_logposterior.unsqueeze(0)]
newz = torch.cat(newz, dim=0) # bsz x ssz x zdim
logposterior = torch.cat(logposterior, dim=0) # bsz x ssz
''' get log p(z) '''
# get prior (as unit normal dist)
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logprior = logprob_gaussian(mu_pz, logvar_pz, newz, do_unsqueeze=False, do_mean=False)
logprior = torch.sum(logprior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(x|z) '''
# decode
logit_x = []
#for i in range(sample_size):
for i in range(batch_size):
_, _logit_x = self.decode(newz[i, :, :]) # ssz x zdim
logit_x += [_logit_x.detach().unsqueeze(0)]
logit_x = torch.cat(logit_x, dim=0) # bsz x ssz x input_dim
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x|z)p(z)/q(z|x) '''
logprob = loglikelihood + logprior - logposterior # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob - logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
def logprob_w_cov_gaussian_posterior(self, input, sample_size=128, z=None, std=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
assert sample_size >= 2*self.z_dim
''' get z and pseudo log q(newz|x) '''
z, newz = [], []
#cov_qz, rv_z = [], []
logposterior = []
inp = self.encode._forward_inp(input).detach()
#for i in range(sample_size):
for i in range(batch_size):
_inp = inp[i:i+1, :].expand(sample_size, inp.size(1))
_nos = self.encode._forward_nos(batch_size=sample_size, std=std, device=input.device).detach()
_z = self.encode._forward_all(_inp, _nos) # ssz x zdim
z += [_z.detach().unsqueeze(0)]
z = torch.cat(z, dim=0) # bsz x ssz x zdim
mu_qz = torch.mean(z, dim=1) # bsz x zdim
for i in range(batch_size):
_cov_qz = get_covmat(z[i, :, :])
_rv_z = MultivariateNormal(mu_qz[i], _cov_qz)
_newz = _rv_z.rsample(torch.Size([1, sample_size]))
_logposterior = _rv_z.log_prob(_newz)
#cov_qz += [_cov_qz.unsqueeze(0)]
#rv_z += [_rv_z]
newz += [_newz]
logposterior += [_logposterior]
#cov_qz = torch.cat(cov_qz, dim=0) # bsz x zdim x zdim
newz = torch.cat(newz, dim=0) # bsz x ssz x zdim
logposterior = torch.cat(logposterior, dim=0) # bsz x ssz
''' get log p(z) '''
# get prior (as unit normal dist)
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logprior = logprob_gaussian(mu_pz, logvar_pz, newz, do_unsqueeze=False, do_mean=False)
logprior = torch.sum(logprior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(x|z) '''
# decode
logit_x = []
#for i in range(sample_size):
for i in range(batch_size):
_, _logit_x = self.decode(newz[i, :, :]) # ssz x zdim
logit_x += [_logit_x.detach().unsqueeze(0)]
logit_x = torch.cat(logit_x, dim=0) # bsz x ssz x input_dim
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x|z)p(z)/q(z|x) '''
logprob = loglikelihood + logprior - logposterior # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob - logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
def logprob_w_diag_gaussian_posterior(self, input, sample_size=128, z=None, std=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
''' get z '''
z = []
for i in range(sample_size):
_z = self.encode(input, std=std)
_z_flattened = _z.view(_z.size(1)*_z.size(2), -1)
z += [_z_flattened.detach().unsqueeze(1)]
z = torch.cat(z, dim=1) # bsz x ssz x zdim
mu_qz = torch.mean(z, dim=1)
logvar_qz = torch.log(torch.var(z, dim=1) + 1e-10)
''' get pseudo log q(z|x) '''
mu_qz = mu_qz.detach().repeat(1, sample_size).view(batch_size, sample_size, self.z_dim)
logvar_qz = logvar_qz.detach().repeat(1, sample_size).view(batch_size, sample_size, self.z_dim)
newz = sample_gaussian(mu_qz, logvar_qz)
logposterior = logprob_gaussian(mu_qz, logvar_qz, newz, do_unsqueeze=False, do_mean=False)
logposterior = torch.sum(logposterior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(z) '''
# get prior (as unit normal dist)
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logprior = logprob_gaussian(mu_pz, logvar_pz, newz, do_unsqueeze=False, do_mean=False)
logprior = torch.sum(logprior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(x|z) '''
# decode
logit_x = []
for i in range(sample_size):
_, _logit_x = self.decode(newz[:, i, :])
logit_x += [_logit_x.detach().unsqueeze(1)]
logit_x = torch.cat(logit_x, dim=1) # bsz x ssz x input_dim
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x|z)p(z)/q(z|x) '''
logprob = loglikelihood + logprior - logposterior # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob - logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
def logprob_w_prior(self, input, sample_size=128, z=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
''' get z samples from p(z) '''
# get prior (as unit normal dist)
if z is None:
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
z = sample_gaussian(mu_pz, logvar_pz) # sample z
''' get log p(x|z) '''
# decode
logit_x = []
for i in range(sample_size):
_, _logit_x = self.decode(z[:, i, :])
logit_x += [_logit_x.detach().unsqueeze(1)]
logit_x = torch.cat(logit_x, dim=1) # bsz x ssz x input_dim
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = -F.binary_cross_entropy_with_logits(logit_x, _input, reduction='none')
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x) '''
logprob = loglikelihood # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob-logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
| en | 0.379649 | #torch.nn.init.xavier_normal_(m.weight) #'gaussian', #True, #ctx_dim = noise_dim if not enc_noise else h_dim #self.inp_encode = MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True) #self.nos_encode = Identity() if not enc_noise \ # else MLP(input_dim=noise_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True) #self.fc = ContextConcatMLP(input_dim=h_dim, context_dim=ctx_dim, hidden_dim=h_dim, output_dim=z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=False) # rescale # enc # enc # enc # view # forward #'gaussian', #z = self.fc(inp, nos) # forward # sample # for ais #self.apply(weight_init) #torch.nn.init.constant_(self.decode.reparam.logit_fn.bias, -5) # loss from energy func # recon loss (neg likelihood): -log p(x|z) # add loss # init # gen noise source # sample z # init #target = input.unsqueeze(1).expand(-1, nz, -1).contiguous().view(batch_size*nz, -1) # gen noise source # sample z # z flattten # decode # loss # return # init mu_z and logvar_z (as unit normal dist) # sample z (from unit normal dist) # sample z # decode # return # init get z and pseudo log q(newz|x) # ssz x zdim # bsz x ssz x zdim # zdim x ssz # zdim x ssz # ssz # ssz x zdim # ssz # bsz x ssz x zdim # bsz x ssz get log p(z) # get prior (as unit normal dist) # bsz x ssz get log p(x|z) # decode #for i in range(sample_size): # ssz x zdim # bsz x ssz x input_dim # bsz x ssz x input_dim # bsz x ssz get log p(x|z)p(z)/q(z|x) # bsz x ssz # relative prob # bsz x 1 # return # init get z and pseudo log q(newz|x) #cov_qz, rv_z = [], [] #for i in range(sample_size): # ssz x zdim # bsz x ssz x zdim # bsz x zdim #cov_qz += [_cov_qz.unsqueeze(0)] #rv_z += [_rv_z] #cov_qz = torch.cat(cov_qz, dim=0) # bsz x zdim x zdim # bsz x ssz x zdim # bsz x ssz get log p(z) # get prior (as unit normal dist) # bsz x ssz get log p(x|z) # decode #for i in range(sample_size): # ssz x zdim # bsz x ssz x input_dim # bsz x ssz x input_dim # bsz x ssz get log p(x|z)p(z)/q(z|x) # bsz x ssz # relative prob # bsz x 1 # return # init get z # bsz x ssz x zdim get pseudo log q(z|x) # bsz x ssz get log p(z) # get prior (as unit normal dist) # bsz x ssz get log p(x|z) # decode # bsz x ssz x input_dim # bsz x ssz x input_dim # bsz x ssz get log p(x|z)p(z)/q(z|x) # bsz x ssz # relative prob # bsz x 1 # return # init get z samples from p(z) # get prior (as unit normal dist) # sample z get log p(x|z) # decode # bsz x ssz x input_dim # bsz x ssz x input_dim # bsz x ssz get log p(x) # bsz x ssz # relative prob # bsz x 1 # return | 2.059899 | 2 |
napari_imsmicrolink/_tests/test_imsmicrolink.py | NHPatterson/napari-imsmicrolink | 3 | 6622186 | <reponame>NHPatterson/napari-imsmicrolink
# import os
# from pathlib import Path
# from napari_imsmicrolink._dock_widget import IMSMicroLink
#
#
# def test_ims_data_read(make_napari_viewer):
# HERE = os.path.dirname(__file__)
# data_fp = Path(HERE) / "data_tests" / "_test_data" / "bruker_spotlist.txt"
# viewer = make_napari_viewer()
# imsml = IMSMicroLink(viewer)
# imsml.read_ims_data(data_fp)
#
# assert imsml.ims_pixel_map
# assert imsml.viewer.layers["IMS Pixel Map"]
# assert imsml.viewer.layers["IMS Fiducials"]
# assert imsml.viewer.layers["IMS ROIs"]
| # import os
# from pathlib import Path
# from napari_imsmicrolink._dock_widget import IMSMicroLink
#
#
# def test_ims_data_read(make_napari_viewer):
# HERE = os.path.dirname(__file__)
# data_fp = Path(HERE) / "data_tests" / "_test_data" / "bruker_spotlist.txt"
# viewer = make_napari_viewer()
# imsml = IMSMicroLink(viewer)
# imsml.read_ims_data(data_fp)
#
# assert imsml.ims_pixel_map
# assert imsml.viewer.layers["IMS Pixel Map"]
# assert imsml.viewer.layers["IMS Fiducials"]
# assert imsml.viewer.layers["IMS ROIs"] | en | 0.400634 | # import os # from pathlib import Path # from napari_imsmicrolink._dock_widget import IMSMicroLink # # # def test_ims_data_read(make_napari_viewer): # HERE = os.path.dirname(__file__) # data_fp = Path(HERE) / "data_tests" / "_test_data" / "bruker_spotlist.txt" # viewer = make_napari_viewer() # imsml = IMSMicroLink(viewer) # imsml.read_ims_data(data_fp) # # assert imsml.ims_pixel_map # assert imsml.viewer.layers["IMS Pixel Map"] # assert imsml.viewer.layers["IMS Fiducials"] # assert imsml.viewer.layers["IMS ROIs"] | 2.135181 | 2 |
fab_deploy_tests/test_project3/test_project3/geo_app/urls.py | erlaveri/django-fab-deploy | 0 | 6622187 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf.urls import patterns, include, url
urlpatterns = patterns('test_project3.geo_app.views',
url(r'^distance/$', 'distance'),
)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf.urls import patterns, include, url
urlpatterns = patterns('test_project3.geo_app.views',
url(r'^distance/$', 'distance'),
)
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.494726 | 1 |
test_project/test_export.py | admariner/django-sql-dashboard | 293 | 6622188 | <reponame>admariner/django-sql-dashboard
def test_export_requires_setting(admin_client, dashboard_db):
for key in ("export_csv_0", "export_tsv_0"):
response = admin_client.post(
"/dashboard/",
{
"sql": "SELECT 'hello' as label, * FROM generate_series(0, 10000)",
key: "1",
},
)
assert response.status_code == 403
def test_no_export_on_saved_dashboard(
admin_client, dashboard_db, settings, saved_dashboard
):
settings.DASHBOARD_ENABLE_FULL_EXPORT = True
response = admin_client.get("/dashboard/test/")
assert response.status_code == 200
assert b'<pre class="sql">select 22 + 55</pre>' in response.content
assert b"Export all as CSV" not in response.content
def test_export_csv(admin_client, dashboard_db, settings):
settings.DASHBOARD_ENABLE_FULL_EXPORT = True
response = admin_client.post(
"/dashboard/",
{
"sql": "SELECT 'hello' as label, * FROM generate_series(0, 10000)",
"export_csv_0": "1",
},
)
body = b"".join(response.streaming_content)
assert body.startswith(
b"label,generate_series\r\nhello,0\r\nhello,1\r\nhello,2\r\n"
)
assert body.endswith(b"hello,9998\r\nhello,9999\r\nhello,10000\r\n")
assert response["Content-Type"] == "text/csv"
content_disposition = response["Content-Disposition"]
assert content_disposition.startswith(
'attachment; filename="select--hello--as-label'
)
assert content_disposition.endswith('.csv"')
def test_export_tsv(admin_client, dashboard_db, settings):
settings.DASHBOARD_ENABLE_FULL_EXPORT = True
response = admin_client.post(
"/dashboard/",
{
"sql": "SELECT 'hello' as label, * FROM generate_series(0, 10000)",
"export_tsv_0": "1",
},
)
body = b"".join(response.streaming_content)
assert body.startswith(
b"label\tgenerate_series\r\nhello\t0\r\nhello\t1\r\nhello\t2\r\n"
)
assert body.endswith(b"hello\t9998\r\nhello\t9999\r\nhello\t10000\r\n")
assert response["Content-Type"] == "text/tab-separated-values"
content_disposition = response["Content-Disposition"]
assert content_disposition.startswith(
'attachment; filename="select--hello--as-label'
)
assert content_disposition.endswith('.tsv"')
| def test_export_requires_setting(admin_client, dashboard_db):
for key in ("export_csv_0", "export_tsv_0"):
response = admin_client.post(
"/dashboard/",
{
"sql": "SELECT 'hello' as label, * FROM generate_series(0, 10000)",
key: "1",
},
)
assert response.status_code == 403
def test_no_export_on_saved_dashboard(
admin_client, dashboard_db, settings, saved_dashboard
):
settings.DASHBOARD_ENABLE_FULL_EXPORT = True
response = admin_client.get("/dashboard/test/")
assert response.status_code == 200
assert b'<pre class="sql">select 22 + 55</pre>' in response.content
assert b"Export all as CSV" not in response.content
def test_export_csv(admin_client, dashboard_db, settings):
settings.DASHBOARD_ENABLE_FULL_EXPORT = True
response = admin_client.post(
"/dashboard/",
{
"sql": "SELECT 'hello' as label, * FROM generate_series(0, 10000)",
"export_csv_0": "1",
},
)
body = b"".join(response.streaming_content)
assert body.startswith(
b"label,generate_series\r\nhello,0\r\nhello,1\r\nhello,2\r\n"
)
assert body.endswith(b"hello,9998\r\nhello,9999\r\nhello,10000\r\n")
assert response["Content-Type"] == "text/csv"
content_disposition = response["Content-Disposition"]
assert content_disposition.startswith(
'attachment; filename="select--hello--as-label'
)
assert content_disposition.endswith('.csv"')
def test_export_tsv(admin_client, dashboard_db, settings):
settings.DASHBOARD_ENABLE_FULL_EXPORT = True
response = admin_client.post(
"/dashboard/",
{
"sql": "SELECT 'hello' as label, * FROM generate_series(0, 10000)",
"export_tsv_0": "1",
},
)
body = b"".join(response.streaming_content)
assert body.startswith(
b"label\tgenerate_series\r\nhello\t0\r\nhello\t1\r\nhello\t2\r\n"
)
assert body.endswith(b"hello\t9998\r\nhello\t9999\r\nhello\t10000\r\n")
assert response["Content-Type"] == "text/tab-separated-values"
content_disposition = response["Content-Disposition"]
assert content_disposition.startswith(
'attachment; filename="select--hello--as-label'
)
assert content_disposition.endswith('.tsv"') | none | 1 | 2.134319 | 2 | |
venv/lib/python3.8/site-packages/numpy/lib/tests/test__datasource.py | Retraces/UkraineBot | 2 | 6622189 | /home/runner/.cache/pip/pool/1c/b8/43/a6a237eaa2165dd2e663da4f5e965265d45c70c299fa1d94e6397ada01 | /home/runner/.cache/pip/pool/1c/b8/43/a6a237eaa2165dd2e663da4f5e965265d45c70c299fa1d94e6397ada01 | none | 1 | 0.836136 | 1 | |
geocoder/regex_library.py | taurenk/Py-Geocoder | 0 | 6622190 | <filename>geocoder/regex_library.py
"""
12/24/2014
Compile ALL reusable regex in one location, for centralized use.
"""
import re
import standards
class RegexLib:
number_regex = re.compile( r'^\d+[-]?(\w+)?')
po_regex = re.compile( r'(?:(PO BOX|P O BOX)\s(\d*[- ]?\d*))' )
intersection_test = re.compile(r'(?:\s(AT|@|AND|&)\s)')
street_regex = re.compile(r'(?:([A-Z0-9\'\-]+)\s?)+')
apt_regex = re.compile(r'[#][A-Z0-9]*')
city_regex = re.compile(r'(?:[A-Z\-]+\s*)+')
state_regex = None
zip_regex = re.compile(r'(?:(\d+)|(\d*[- ]?\d*))?$')
secondary_str_regex = None
street_prefix_regex = None
def __init__(self):
print 'Initiating RegexLib'
self.state_regex = re.compile(r'(?:\b' + self.import_state_regex() + r')')
self.street_prefix_regex = re.compile(r'^(' + self.import_prefix_regex() + r')' )
self.secondary_str_regex = re.compile(r'(?:\s(' + self.import_secondary_regex() + r') \w+?)' )
def import_state_regex(self):
"""Generate the US States regex string """
list = []
for key in standards.standards().states:
list.append(key + r'\s?$')
list.append(standards.standards().states[key]+ r'\s?$')
return r'|'.join(list)
def import_secondary_regex(self):
list = []
for key in standards.standards().units:
list.append(key)
list.append(standards.standards().units[key])
return r'|'.join(list)
def import_prefix_regex(self):
list = []
for key in standards.standards().tiger_prefix_types:
list.append(key + r'\s?')
list.append(standards.standards().tiger_prefix_types[key]+ r'\s?')
return r'|'.join(list)
| <filename>geocoder/regex_library.py
"""
12/24/2014
Compile ALL reusable regex in one location, for centralized use.
"""
import re
import standards
class RegexLib:
number_regex = re.compile( r'^\d+[-]?(\w+)?')
po_regex = re.compile( r'(?:(PO BOX|P O BOX)\s(\d*[- ]?\d*))' )
intersection_test = re.compile(r'(?:\s(AT|@|AND|&)\s)')
street_regex = re.compile(r'(?:([A-Z0-9\'\-]+)\s?)+')
apt_regex = re.compile(r'[#][A-Z0-9]*')
city_regex = re.compile(r'(?:[A-Z\-]+\s*)+')
state_regex = None
zip_regex = re.compile(r'(?:(\d+)|(\d*[- ]?\d*))?$')
secondary_str_regex = None
street_prefix_regex = None
def __init__(self):
print 'Initiating RegexLib'
self.state_regex = re.compile(r'(?:\b' + self.import_state_regex() + r')')
self.street_prefix_regex = re.compile(r'^(' + self.import_prefix_regex() + r')' )
self.secondary_str_regex = re.compile(r'(?:\s(' + self.import_secondary_regex() + r') \w+?)' )
def import_state_regex(self):
"""Generate the US States regex string """
list = []
for key in standards.standards().states:
list.append(key + r'\s?$')
list.append(standards.standards().states[key]+ r'\s?$')
return r'|'.join(list)
def import_secondary_regex(self):
list = []
for key in standards.standards().units:
list.append(key)
list.append(standards.standards().units[key])
return r'|'.join(list)
def import_prefix_regex(self):
list = []
for key in standards.standards().tiger_prefix_types:
list.append(key + r'\s?')
list.append(standards.standards().tiger_prefix_types[key]+ r'\s?')
return r'|'.join(list)
| en | 0.49639 | 12/24/2014 Compile ALL reusable regex in one location, for centralized use. #][A-Z0-9]*') Generate the US States regex string | 2.530848 | 3 |
JiYouMCC/0017/0017.py | hooting/show-me-the-code-python | 0 | 6622191 | # -*- coding: utf-8 -*-
import xlrd
from xml.dom.minidom import Document
from xml.etree.ElementTree import Comment, Element
import json
infos = []
info_file = xlrd.open_workbook('students.xls')
info_table = info_file.sheets()[0]
row_count = info_table.nrows
doc = Document()
root = doc.createElement('root')
doc.appendChild(root)
students = doc.createElement('students')
for row in range(row_count):
student = doc.createElement('student')
student.setAttribute('name', info_table.cell(row, 1).value.encode('utf-8'))
scores = doc.createElement('scores')
score = doc.createElement('score')
score.setAttribute('subject', '数学')
score.appendChild(doc.createTextNode('%d' % info_table.cell(row, 2).value))
scores.appendChild(score)
score1 = doc.createElement('score')
score1.setAttribute('subject', '语文')
score1.appendChild(doc.createTextNode('%d' % info_table.cell(row, 3).value))
scores.appendChild(score1)
score2 = doc.createElement('score')
score2.setAttribute('subject', '英文')
score2.appendChild(doc.createTextNode('%d' % info_table.cell(row, 4).value))
scores.appendChild(score2)
student.appendChild(scores)
students.appendChild(student)
root.appendChild(students)
file = open('students.xml','w')
file.write(doc.toprettyxml(indent = ''))
file.close()
| # -*- coding: utf-8 -*-
import xlrd
from xml.dom.minidom import Document
from xml.etree.ElementTree import Comment, Element
import json
infos = []
info_file = xlrd.open_workbook('students.xls')
info_table = info_file.sheets()[0]
row_count = info_table.nrows
doc = Document()
root = doc.createElement('root')
doc.appendChild(root)
students = doc.createElement('students')
for row in range(row_count):
student = doc.createElement('student')
student.setAttribute('name', info_table.cell(row, 1).value.encode('utf-8'))
scores = doc.createElement('scores')
score = doc.createElement('score')
score.setAttribute('subject', '数学')
score.appendChild(doc.createTextNode('%d' % info_table.cell(row, 2).value))
scores.appendChild(score)
score1 = doc.createElement('score')
score1.setAttribute('subject', '语文')
score1.appendChild(doc.createTextNode('%d' % info_table.cell(row, 3).value))
scores.appendChild(score1)
score2 = doc.createElement('score')
score2.setAttribute('subject', '英文')
score2.appendChild(doc.createTextNode('%d' % info_table.cell(row, 4).value))
scores.appendChild(score2)
student.appendChild(scores)
students.appendChild(student)
root.appendChild(students)
file = open('students.xml','w')
file.write(doc.toprettyxml(indent = ''))
file.close()
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.886786 | 3 |
eqstats/catalogs.py | egdaub/eqstats | 1 | 6622192 | <reponame>egdaub/eqstats
import numpy as np
def omori_times(ncat, nevents, tmin, tmax, b, p=1., detectprob = None):
"""
creates ncat synthetic realizations of an Omori decay in seismicity
parameters are nevents (number of events), tmin (minimum catalog time, main shock is t=0)
tmax (maximum catalog time), b (Omori time offset, R \propto 1/(b+t)^p)
Inputs:
ncat = number of realizations
nevents = number of events per realization
tmin = catalog start time (t=0 is main shock)
tmax = catalog end time (t=0 is main shock)
b, p = Omori parameters
detectprob = function mapping event time to detection probability
returns numpy array with shape (ncat, nevents)
"""
assert(ncat > 0)
assert(nevents > 0)
assert(tmin > 0.)
assert(tmax > tmin)
assert(b > 0.)
assert(p > 0.)
if detectprob is None:
detectprob = lambda x: 1.
acceptedtimes = []
for i in range(nevents*ncat):
while True:
times = np.random.random()
if p == 1.:
times = tmin + (b+tmin)*(((b+tmax)/(b+tmin))**times - 1.)
else:
times = -b + ((1.-times)/(b+tmin)**(p-1.)+times/(b+tmax)**(p-1.))**(-1./(p-1.))
detect = detectprob(times)
if detect >= np.random.random():
acceptedtimes.append(times)
break
times = np.reshape(np.array(acceptedtimes), (ncat, nevents))
times = np.sort(times)
return times
def random_times(nevents, tmin = 0., tmax = 100.):
"generates a random sequence of nevents events"
times = tmin + (tmax-tmin)*np.random.random(nevents)
times = np.sort(times)
return times
def random_magnitudes(nevents, mmin, mmax, b = 1.):
"generates array of length nevents of magnitude values for a GR distribution given min and max magnitudes and b"
return mmin-1./b*np.log(1.-np.random.random(nevents)*(1.-10.**(-b*(mmax-mmin))))/np.log(10.)
| import numpy as np
def omori_times(ncat, nevents, tmin, tmax, b, p=1., detectprob = None):
"""
creates ncat synthetic realizations of an Omori decay in seismicity
parameters are nevents (number of events), tmin (minimum catalog time, main shock is t=0)
tmax (maximum catalog time), b (Omori time offset, R \propto 1/(b+t)^p)
Inputs:
ncat = number of realizations
nevents = number of events per realization
tmin = catalog start time (t=0 is main shock)
tmax = catalog end time (t=0 is main shock)
b, p = Omori parameters
detectprob = function mapping event time to detection probability
returns numpy array with shape (ncat, nevents)
"""
assert(ncat > 0)
assert(nevents > 0)
assert(tmin > 0.)
assert(tmax > tmin)
assert(b > 0.)
assert(p > 0.)
if detectprob is None:
detectprob = lambda x: 1.
acceptedtimes = []
for i in range(nevents*ncat):
while True:
times = np.random.random()
if p == 1.:
times = tmin + (b+tmin)*(((b+tmax)/(b+tmin))**times - 1.)
else:
times = -b + ((1.-times)/(b+tmin)**(p-1.)+times/(b+tmax)**(p-1.))**(-1./(p-1.))
detect = detectprob(times)
if detect >= np.random.random():
acceptedtimes.append(times)
break
times = np.reshape(np.array(acceptedtimes), (ncat, nevents))
times = np.sort(times)
return times
def random_times(nevents, tmin = 0., tmax = 100.):
"generates a random sequence of nevents events"
times = tmin + (tmax-tmin)*np.random.random(nevents)
times = np.sort(times)
return times
def random_magnitudes(nevents, mmin, mmax, b = 1.):
"generates array of length nevents of magnitude values for a GR distribution given min and max magnitudes and b"
return mmin-1./b*np.log(1.-np.random.random(nevents)*(1.-10.**(-b*(mmax-mmin))))/np.log(10.) | en | 0.671957 | creates ncat synthetic realizations of an Omori decay in seismicity parameters are nevents (number of events), tmin (minimum catalog time, main shock is t=0) tmax (maximum catalog time), b (Omori time offset, R \propto 1/(b+t)^p) Inputs: ncat = number of realizations nevents = number of events per realization tmin = catalog start time (t=0 is main shock) tmax = catalog end time (t=0 is main shock) b, p = Omori parameters detectprob = function mapping event time to detection probability returns numpy array with shape (ncat, nevents) | 2.831908 | 3 |
WhileLoop/EasterGuests.py | Rohitm619/Softuni-Python-Basic | 1 | 6622193 | import math
from math import ceil
number_of_guests = int(input())
budget = float(input())
number_of_kozunak = number_of_guests / 3 #ceil
number_of_eggs_needed = number_of_guests * 2
kozunak_price = ceil(number_of_kozunak) * 4
egg_price = number_of_eggs_needed * 0.45
total = kozunak_price + egg_price
diff = total - budget
if budget >= total:
print(f"Lyubo bought {ceil(number_of_kozunak)} Easter bread and {number_of_eggs_needed} eggs.")
print(f"He has {abs(diff):.2f} lv. left.")
else:
print(f"Lyubo doesn't have enough money.")
print(f"He needs {abs(diff):.2f} lv. more.") | import math
from math import ceil
number_of_guests = int(input())
budget = float(input())
number_of_kozunak = number_of_guests / 3 #ceil
number_of_eggs_needed = number_of_guests * 2
kozunak_price = ceil(number_of_kozunak) * 4
egg_price = number_of_eggs_needed * 0.45
total = kozunak_price + egg_price
diff = total - budget
if budget >= total:
print(f"Lyubo bought {ceil(number_of_kozunak)} Easter bread and {number_of_eggs_needed} eggs.")
print(f"He has {abs(diff):.2f} lv. left.")
else:
print(f"Lyubo doesn't have enough money.")
print(f"He needs {abs(diff):.2f} lv. more.") | none | 1 | 3.500434 | 4 | |
Q617.py | Linchin/python_leetcode_git | 0 | 6622194 | """
Q617
Merge Two Binary Trees
Easy
Given two binary trees and imagine that when you put one
of them to cover the other, some nodes of the two trees
are overlapped while the others are not.
You need to merge them into a new binary tree. The merge
rule is that if two nodes overlap, then sum node values
up as the new value of the merged node. Otherwise, the
NOT null node will be used as the node of new tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:
def merge(t1: TreeNode, t2: TreeNode) -> TreeNode:
if t1 is None and t2 is None:
return None
sum_ = 0
left1 = None
left2 = None
right1 = None
right2 = None
if t1 is not None:
sum_ += t1.val
left1 = t1.left
right1 = t1.right
if t2 is not None:
sum_ += t2.val
left2 = t2.left
right2 = t2.right
new_node = TreeNode(sum_)
new_node.left = merge(left1, left2)
new_node.right = merge(right1, right2)
return new_node
return merge(t1, t2)
a1 = TreeNode(1)
a2 = TreeNode(2)
a3 = TreeNode(3)
b1 = TreeNode(5)
b2 = TreeNode(5)
b3 = TreeNode(5)
#a1.left = a2
a1.right = a3
b1.left = b2
b1.right = b3
sol = Solution()
tree = sol.mergeTrees(a1, b1)
def preorder(tree):
if tree is not None:
print(tree.val)
preorder(tree.left)
preorder(tree.right)
preorder(tree)
| """
Q617
Merge Two Binary Trees
Easy
Given two binary trees and imagine that when you put one
of them to cover the other, some nodes of the two trees
are overlapped while the others are not.
You need to merge them into a new binary tree. The merge
rule is that if two nodes overlap, then sum node values
up as the new value of the merged node. Otherwise, the
NOT null node will be used as the node of new tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:
def merge(t1: TreeNode, t2: TreeNode) -> TreeNode:
if t1 is None and t2 is None:
return None
sum_ = 0
left1 = None
left2 = None
right1 = None
right2 = None
if t1 is not None:
sum_ += t1.val
left1 = t1.left
right1 = t1.right
if t2 is not None:
sum_ += t2.val
left2 = t2.left
right2 = t2.right
new_node = TreeNode(sum_)
new_node.left = merge(left1, left2)
new_node.right = merge(right1, right2)
return new_node
return merge(t1, t2)
a1 = TreeNode(1)
a2 = TreeNode(2)
a3 = TreeNode(3)
b1 = TreeNode(5)
b2 = TreeNode(5)
b3 = TreeNode(5)
#a1.left = a2
a1.right = a3
b1.left = b2
b1.right = b3
sol = Solution()
tree = sol.mergeTrees(a1, b1)
def preorder(tree):
if tree is not None:
print(tree.val)
preorder(tree.left)
preorder(tree.right)
preorder(tree)
| en | 0.952031 | Q617 Merge Two Binary Trees Easy Given two binary trees and imagine that when you put one of them to cover the other, some nodes of the two trees are overlapped while the others are not. You need to merge them into a new binary tree. The merge rule is that if two nodes overlap, then sum node values up as the new value of the merged node. Otherwise, the NOT null node will be used as the node of new tree. # Definition for a binary tree node. #a1.left = a2 | 3.827987 | 4 |
tests/stubreferencetest.py | netcharm/ironclad | 0 | 6622195 | <reponame>netcharm/ironclad
import os
from tests.utils.runtest import makesuite, run
from tests.utils.gc import gcwait
from tests.utils.testcase import TestCase
from Ironclad import dgt_getfuncptr, dgt_registerdata, Unmanaged, StubReference
from System import IntPtr
class StubReferenceTest(TestCase):
def testMapInitUnmapLibrary(self):
self.assertEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library already mapped")
sr = StubReference(os.path.join("build", "ironclad", "python26.dll"))
self.assertNotEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library not mapped by construction")
fpCalls = []
@dgt_getfuncptr
def GetFuncPtr(name):
fpCalls.append(name)
return IntPtr.Zero
dataCalls = []
@dgt_registerdata
def RegisterData(name, _):
dataCalls.append(name)
sr.Init(GetFuncPtr, RegisterData)
self.assertEquals(len(fpCalls) > 0, True, "did not get any addresses")
self.assertEquals(len(dataCalls) > 0, True, "did not set any data")
sr.Dispose()
self.assertEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library not unmapped on dispose")
sr.Dispose()
# safe to call Dispose twice
def testUnmapsAutomagically(self):
sr = StubReference(os.path.join("build", "ironclad", "python26.dll"))
self.assertNotEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library not mapped by construction")
del sr
gcwait()
self.assertEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library not unmapped on finalize")
def testLoadBuiltinModule(self):
sr = StubReference(os.path.join("tests", "data", "fakepython.dll"))
sr.LoadBuiltinModule('somecrazymodule') # if func not found and callable, error
sr.Dispose()
suite = makesuite(StubReferenceTest)
if __name__ == '__main__':
run(suite)
| import os
from tests.utils.runtest import makesuite, run
from tests.utils.gc import gcwait
from tests.utils.testcase import TestCase
from Ironclad import dgt_getfuncptr, dgt_registerdata, Unmanaged, StubReference
from System import IntPtr
class StubReferenceTest(TestCase):
def testMapInitUnmapLibrary(self):
self.assertEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library already mapped")
sr = StubReference(os.path.join("build", "ironclad", "python26.dll"))
self.assertNotEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library not mapped by construction")
fpCalls = []
@dgt_getfuncptr
def GetFuncPtr(name):
fpCalls.append(name)
return IntPtr.Zero
dataCalls = []
@dgt_registerdata
def RegisterData(name, _):
dataCalls.append(name)
sr.Init(GetFuncPtr, RegisterData)
self.assertEquals(len(fpCalls) > 0, True, "did not get any addresses")
self.assertEquals(len(dataCalls) > 0, True, "did not set any data")
sr.Dispose()
self.assertEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library not unmapped on dispose")
sr.Dispose()
# safe to call Dispose twice
def testUnmapsAutomagically(self):
sr = StubReference(os.path.join("build", "ironclad", "python26.dll"))
self.assertNotEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library not mapped by construction")
del sr
gcwait()
self.assertEquals(Unmanaged.GetModuleHandle("python26.dll"), IntPtr.Zero,
"library not unmapped on finalize")
def testLoadBuiltinModule(self):
sr = StubReference(os.path.join("tests", "data", "fakepython.dll"))
sr.LoadBuiltinModule('somecrazymodule') # if func not found and callable, error
sr.Dispose()
suite = makesuite(StubReferenceTest)
if __name__ == '__main__':
run(suite) | en | 0.823827 | # safe to call Dispose twice # if func not found and callable, error | 2.083995 | 2 |
thai2transformers/auto.py | modem888/thai2transformers | 64 | 6622196 | from collections import OrderedDict
from transformers import (
AutoConfig,
PretrainedConfig
)
from transformers.modeling_bert import BertConfig
from transformers.modeling_roberta import RobertaConfig
from transformers.modeling_xlm_roberta import XLMRobertaConfig
from .models import (
XLMRobertaForMultiLabelSequenceClassification,
BertForMultiLabelSequenceClassification,
RobertaForMultiLabelSequenceClassification
)
MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
[
(XLMRobertaConfig, XLMRobertaForMultiLabelSequenceClassification),
(BertConfig, BertForMultiLabelSequenceClassification),
(RobertaConfig, RobertaForMultiLabelSequenceClassification),
]
)
class AutoModelForMultiLabelSequenceClassification:
def __init__(self):
raise EnvironmentError(
"AutoModelForMultiLabelSequenceClassification is designed to be instantiated "
"using the `AutoModelForMultiLabelSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForMultiLabelSequenceClassification.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a sequence classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use
:meth:`~transformers.AutoModelForMultiLabelSequenceClassification.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForMultiLabelSequenceClassification
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForMultiLabelSequenceClassification.from_config(config)
"""
if type(config) in MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForMultiLabelSequenceClassification
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
| from collections import OrderedDict
from transformers import (
AutoConfig,
PretrainedConfig
)
from transformers.modeling_bert import BertConfig
from transformers.modeling_roberta import RobertaConfig
from transformers.modeling_xlm_roberta import XLMRobertaConfig
from .models import (
XLMRobertaForMultiLabelSequenceClassification,
BertForMultiLabelSequenceClassification,
RobertaForMultiLabelSequenceClassification
)
MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
[
(XLMRobertaConfig, XLMRobertaForMultiLabelSequenceClassification),
(BertConfig, BertForMultiLabelSequenceClassification),
(RobertaConfig, RobertaForMultiLabelSequenceClassification),
]
)
class AutoModelForMultiLabelSequenceClassification:
def __init__(self):
raise EnvironmentError(
"AutoModelForMultiLabelSequenceClassification is designed to be instantiated "
"using the `AutoModelForMultiLabelSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForMultiLabelSequenceClassification.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a sequence classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use
:meth:`~transformers.AutoModelForMultiLabelSequenceClassification.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForMultiLabelSequenceClassification
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForMultiLabelSequenceClassification.from_config(config)
"""
if type(config) in MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING[type(config)](config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForMultiLabelSequenceClassification
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING.keys():
return MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING[type(config)].from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTI_LABEL_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
| en | 0.600123 | Instantiates one of the model classes of the library---with a sequence classification head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForMultiLabelSequenceClassification.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForMultiLabelSequenceClassification >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModelForMultiLabelSequenceClassification.from_config(config) Examples:: >>> from transformers import AutoConfig, AutoModelForMultiLabelSequenceClassification >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModelForMultiLabelSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) | 2.594154 | 3 |
renderable_core/services/autoscaler.py | therenderable/renderable-core | 0 | 6622197 | <gh_stars>0
import time
from threading import Thread, Lock
import docker
class Autoscaler:
def __init__(self, hostname, port, certificate_path, cleanup_period, cooldown_period):
self.hostname = hostname
self.port = port
self.certificate_path = certificate_path
self.cleanup_period = cleanup_period
self.cooldown_period = cooldown_period
public_certificate_path = str(self.certificate_path / 'cert.pem')
private_certificate_path = str(self.certificate_path / 'key.pem')
tls_config = docker.tls.TLSConfig(
client_cert = (public_certificate_path, private_certificate_path))
self.client = docker.DockerClient(f'https://{self.hostname}:{self.port}', tls = tls_config)
self.requests = {}
self.requests_lock = Lock()
cleanup_thread = Thread(target = self._cleanup_nodes, daemon = True)
cleanup_thread.start()
scaling_thread = Thread(target = self._scale_services, daemon = True)
scaling_thread.start()
def _cleanup_nodes(self):
def filter_by_status(node):
return node.attrs['Status']['State'] == 'down'
while True:
try:
nodes = list(filter(filter_by_status, self.client.nodes.list()))
for node in nodes:
node.remove(force = True)
except:
pass
time.sleep(self.cleanup_period)
def _scale_services(self):
while True:
self.requests_lock.acquire()
for container_name, delta in self.requests.items():
if delta != 0:
try:
self._update_service(container_name, delta)
self.requests[container_name] = 0
except:
pass
self.requests_lock.release()
time.sleep(self.cooldown_period)
def _update_service(self, container_name, delta):
service = self.client.services.get(container_name)
replicas = service.attrs['Spec']['Mode']['Replicated']['Replicas']
target_replicas = int(max(replicas + delta, 0))
service.scale(target_replicas)
def scale(self, container_name, task_count, upscaling):
self.requests_lock.acquire()
if container_name not in self.requests.keys():
self.requests[container_name] = 0
delta = task_count if upscaling else -task_count
self.requests[container_name] += delta
self.requests_lock.release()
| import time
from threading import Thread, Lock
import docker
class Autoscaler:
def __init__(self, hostname, port, certificate_path, cleanup_period, cooldown_period):
self.hostname = hostname
self.port = port
self.certificate_path = certificate_path
self.cleanup_period = cleanup_period
self.cooldown_period = cooldown_period
public_certificate_path = str(self.certificate_path / 'cert.pem')
private_certificate_path = str(self.certificate_path / 'key.pem')
tls_config = docker.tls.TLSConfig(
client_cert = (public_certificate_path, private_certificate_path))
self.client = docker.DockerClient(f'https://{self.hostname}:{self.port}', tls = tls_config)
self.requests = {}
self.requests_lock = Lock()
cleanup_thread = Thread(target = self._cleanup_nodes, daemon = True)
cleanup_thread.start()
scaling_thread = Thread(target = self._scale_services, daemon = True)
scaling_thread.start()
def _cleanup_nodes(self):
def filter_by_status(node):
return node.attrs['Status']['State'] == 'down'
while True:
try:
nodes = list(filter(filter_by_status, self.client.nodes.list()))
for node in nodes:
node.remove(force = True)
except:
pass
time.sleep(self.cleanup_period)
def _scale_services(self):
while True:
self.requests_lock.acquire()
for container_name, delta in self.requests.items():
if delta != 0:
try:
self._update_service(container_name, delta)
self.requests[container_name] = 0
except:
pass
self.requests_lock.release()
time.sleep(self.cooldown_period)
def _update_service(self, container_name, delta):
service = self.client.services.get(container_name)
replicas = service.attrs['Spec']['Mode']['Replicated']['Replicas']
target_replicas = int(max(replicas + delta, 0))
service.scale(target_replicas)
def scale(self, container_name, task_count, upscaling):
self.requests_lock.acquire()
if container_name not in self.requests.keys():
self.requests[container_name] = 0
delta = task_count if upscaling else -task_count
self.requests[container_name] += delta
self.requests_lock.release() | none | 1 | 2.453247 | 2 | |
app/models.py | kasamsharif/tdd-flask | 1 | 6622198 | <reponame>kasamsharif/tdd-flask<gh_stars>1-10
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class ChoiceList(db.Model):
"""This class represents choice list"""
__tableaname__ = "choicelists"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
created_on = db.Column(db.DateTime, default=db.func.current_timestamp())
updated_on = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp()
)
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return ChoiceList.query.all()
def delete(self):
self.session.delete(self)
self.session.commit()
def __repr__(self):
return "<ChoiceList: {}>".format(self.name)
| from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class ChoiceList(db.Model):
"""This class represents choice list"""
__tableaname__ = "choicelists"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
created_on = db.Column(db.DateTime, default=db.func.current_timestamp())
updated_on = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp()
)
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return ChoiceList.query.all()
def delete(self):
self.session.delete(self)
self.session.commit()
def __repr__(self):
return "<ChoiceList: {}>".format(self.name) | en | 0.782571 | This class represents choice list initialize with name. | 3.046776 | 3 |
src/primaires/scripting/extensions/nombre.py | vlegoff/tsunami | 14 | 6622199 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module contenant la classe Nombre, détaillée plus bas."""
from textwrap import dedent
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.flag import Flag
from primaires.interpreteur.editeur.flottant import Flottant
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.scripting.extensions.base import Extension
class Nombre(Extension):
"""Classe représentant le type éditable 'nombre'.
Ce type utilise soit l'éditeur Entier, soit l'éditeur Flottant.
Les limites inférieures et supérieures sont également supportées.
"""
extension = "nombre"
aide = "un nombre, à virgule ou pas"
def __init__(self, structure, nom):
Extension.__init__(self, structure, nom)
self.a_virgule = False
self.limite_inf = None
self.limite_sup = None
@property
def editeur(self):
"""Retourne le type d'éditeur."""
if self.a_virgule:
return Flottant
else:
return Entier
@property
def arguments(self):
"""Retourne les arguments de l'éditeur."""
return (self.limite_inf, self.limite_sup)
def etendre_editeur(self, presentation):
"""Ëtend l'éditeur en fonction du type de l'extension."""
# Nombre à virgule
a_virgule = presentation.ajouter_choix("nombre à virgule", "v", Flag,
self, "a_virgule")
a_virgule.parent = presentation
# Limite inférieure
inf = presentation.ajouter_choix("limite inférieure", "f", Entier,
self, "limite_inf")
inf.parent = presentation
inf.prompt = "Entrez la limite inférieure : "
inf.apercu = "{valeur}"
inf.aide_courte = dedent("""
Entrez la limite inférieure autorisée ou |ent|/|ff| pour
revenir à la fenêtre parente.
Si une limite inférieure est précisée, le personnage édiant
ce menu ne pourra pas entrer un nombre inférieur.
Limite inférieure actuelle : {valeur}""".strip("\n"))
# Limite supérieure
sup = presentation.ajouter_choix("limite supérieure", "s", Entier,
self, "limite_sup")
sup.parent = presentation
sup.prompt = "Entrez la limite supérieure : "
sup.apercu = "{valeur}"
sup.aide_courte = dedent("""
Entrez la limite supérieure autorisée ou |cmd|/|ff| pour
revenir à la fenêtre parente.
Si une limite supérieure est précisée, le personnage édiant ce
menu ne pourra pas entrer un nombre supérieur.
Limite supérieure actuelle : {valeur}""".strip("\n"))
| # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module contenant la classe Nombre, détaillée plus bas."""
from textwrap import dedent
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.flag import Flag
from primaires.interpreteur.editeur.flottant import Flottant
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.scripting.extensions.base import Extension
class Nombre(Extension):
"""Classe représentant le type éditable 'nombre'.
Ce type utilise soit l'éditeur Entier, soit l'éditeur Flottant.
Les limites inférieures et supérieures sont également supportées.
"""
extension = "nombre"
aide = "un nombre, à virgule ou pas"
def __init__(self, structure, nom):
Extension.__init__(self, structure, nom)
self.a_virgule = False
self.limite_inf = None
self.limite_sup = None
@property
def editeur(self):
"""Retourne le type d'éditeur."""
if self.a_virgule:
return Flottant
else:
return Entier
@property
def arguments(self):
"""Retourne les arguments de l'éditeur."""
return (self.limite_inf, self.limite_sup)
def etendre_editeur(self, presentation):
"""Ëtend l'éditeur en fonction du type de l'extension."""
# Nombre à virgule
a_virgule = presentation.ajouter_choix("nombre à virgule", "v", Flag,
self, "a_virgule")
a_virgule.parent = presentation
# Limite inférieure
inf = presentation.ajouter_choix("limite inférieure", "f", Entier,
self, "limite_inf")
inf.parent = presentation
inf.prompt = "Entrez la limite inférieure : "
inf.apercu = "{valeur}"
inf.aide_courte = dedent("""
Entrez la limite inférieure autorisée ou |ent|/|ff| pour
revenir à la fenêtre parente.
Si une limite inférieure est précisée, le personnage édiant
ce menu ne pourra pas entrer un nombre inférieur.
Limite inférieure actuelle : {valeur}""".strip("\n"))
# Limite supérieure
sup = presentation.ajouter_choix("limite supérieure", "s", Entier,
self, "limite_sup")
sup.parent = presentation
sup.prompt = "Entrez la limite supérieure : "
sup.apercu = "{valeur}"
sup.aide_courte = dedent("""
Entrez la limite supérieure autorisée ou |cmd|/|ff| pour
revenir à la fenêtre parente.
Si une limite supérieure est précisée, le personnage édiant ce
menu ne pourra pas entrer un nombre supérieur.
Limite supérieure actuelle : {valeur}""".strip("\n"))
| fr | 0.540872 | # -*-coding:Utf-8 -* # Copyright (c) 2010-2017 <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. Module contenant la classe Nombre, détaillée plus bas. Classe représentant le type éditable 'nombre'. Ce type utilise soit l'éditeur Entier, soit l'éditeur Flottant. Les limites inférieures et supérieures sont également supportées. Retourne le type d'éditeur. Retourne les arguments de l'éditeur. Ëtend l'éditeur en fonction du type de l'extension. # Nombre à virgule # Limite inférieure Entrez la limite inférieure autorisée ou |ent|/|ff| pour revenir à la fenêtre parente. Si une limite inférieure est précisée, le personnage édiant ce menu ne pourra pas entrer un nombre inférieur. Limite inférieure actuelle : {valeur} # Limite supérieure Entrez la limite supérieure autorisée ou |cmd|/|ff| pour revenir à la fenêtre parente. Si une limite supérieure est précisée, le personnage édiant ce menu ne pourra pas entrer un nombre supérieur. Limite supérieure actuelle : {valeur} | 1.308947 | 1 |
codes/GP-obtain-2D-LLS.py | AbhilashMathews/gp_extras_applications | 1 | 6622200 | <filename>codes/GP-obtain-2D-LLS.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 15:56:39 2019
@author: mathewsa
This script is used for plotting the length scales learned by the GP across the
2D (i.e. radial and temporal) domain specified by the user. This script is to
be run only after first running and saving the GP after it has been trained
upon the experimental data. Note that certain trained GPs may have trouble during
training to find good estimates of length scales across the domain, nevertheless
the fits to the original data may still be mostly all right, but checking for
'good times' which are stored in the array 'inputs_t_array_good' should be
performed as described in the script 'GP-obtain-2D-profiles.py'.
"""
import sys
sys.path.append('C:/Users/mathewsa/') #provides path to gp_extras
import gp_extras
import pickle
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, Matern, ConstantKernel as C
from gp_extras.kernels import HeteroscedasticKernel, LocalLengthScalesKernel
from scipy.optimize import differential_evolution
from scipy import stats
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams['font.size'] = 18
psi_min = 0.85 #lower limit you want for plotting x-axis
psi_max = 1.05 #upper limit you want for plotting y-axis
T_min = 0.0 #in keV, lower limit you want for plotting y-axis
T_max = 2.0 #in keV, upper limit you want for plotting y-axis
dpsi = 0.01 #normalized poloidal flux coordinate spacing you specify
dt = 0.001 #seconds; this is the grid spacing you specify
t_min = 0.4 #in seconds, lower limit for x-axis for 2d array/plot
t_max = 1.58 #in seconds, upper limit for x-axis for 2d array/plot
n_sampling = 1000 #provides higher sampling count for profile statistics
file_path = '.../trainedGPs/saved_GP_1091016033/' #path to saved GP contents
#file_path is where the gp and its variables have been saved
# --------------------------------------------------------------
# End of user inputs
# --------------------------------------------------------------
def de_optimizer(obj_func, initial_theta, bounds):
res = differential_evolution(lambda x: obj_func(x, eval_gradient=False),
bounds, maxiter=n_max_iter, disp=False, polish=True)
return res.x, obj_func(res.x, eval_gradient=False)
number_of_samples = 1
X_n = np.load(str(file_path)+'X_n.npy')
y_n_TS = np.load(str(file_path)+'y_n_TS.npy')
y_n_TS_err = np.load(str(file_path)+'y_n_TS_err.npy')
n_max_iter = np.load(str(file_path)+'n_max_iter.npy')
gp = pickle.load(open(str(file_path)+"gp.dump","rb"))
x1 = np.arange(psi_min,psi_max,dpsi) #radial coordinate
x2 = np.arange(t_min,t_max,dt) #temporal coordinate
i = 0
inputs_x = []
while i < len(x1):
j = 0
while j < len(x2):
inputs_x.append([x1[i],x2[j]])
j = j + 1
i = i + 1
inputs_x_array = np.array(inputs_x)
lls_len_scale = []
i = 0
while i < len(inputs_x_array):
lls_len_scale_i = gp.kernel_.k1.k2.theta_gp* 10**gp.kernel_.k1.k2.gp_l.predict(inputs_x_array[i].reshape(1, -1))[0]
lls_len_scale.append(lls_len_scale_i)
i = i + 1
lls_len_scale = np.array(lls_len_scale)
fig = plt.figure(figsize=(16,6))
cm = plt.cm.get_cmap('RdYlGn')
ax = fig.add_subplot(111, projection='3d')
c = ax.scatter(inputs_x_array[:,0],inputs_x_array[:,1],lls_len_scale,c=lls_len_scale[:,0],cmap=cm,alpha=0.3)
ax.set_xlabel(r"$\psi$",labelpad=20)
ax.set_ylabel('Time (s)',labelpad=27.5)
ax.zaxis.set_rotate_label(False)
ax.set_zlabel('GP LLS',labelpad=5,rotation=90)
ax.set_xlim(0.8,1.1)
ax.set_ylim(0.4,1.55)
fig.colorbar(c, ax=ax)
ax.azim = 25
ax.elev = 20
plt.show() | <filename>codes/GP-obtain-2D-LLS.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 15:56:39 2019
@author: mathewsa
This script is used for plotting the length scales learned by the GP across the
2D (i.e. radial and temporal) domain specified by the user. This script is to
be run only after first running and saving the GP after it has been trained
upon the experimental data. Note that certain trained GPs may have trouble during
training to find good estimates of length scales across the domain, nevertheless
the fits to the original data may still be mostly all right, but checking for
'good times' which are stored in the array 'inputs_t_array_good' should be
performed as described in the script 'GP-obtain-2D-profiles.py'.
"""
import sys
sys.path.append('C:/Users/mathewsa/') #provides path to gp_extras
import gp_extras
import pickle
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, Matern, ConstantKernel as C
from gp_extras.kernels import HeteroscedasticKernel, LocalLengthScalesKernel
from scipy.optimize import differential_evolution
from scipy import stats
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams['font.size'] = 18
psi_min = 0.85 #lower limit you want for plotting x-axis
psi_max = 1.05 #upper limit you want for plotting y-axis
T_min = 0.0 #in keV, lower limit you want for plotting y-axis
T_max = 2.0 #in keV, upper limit you want for plotting y-axis
dpsi = 0.01 #normalized poloidal flux coordinate spacing you specify
dt = 0.001 #seconds; this is the grid spacing you specify
t_min = 0.4 #in seconds, lower limit for x-axis for 2d array/plot
t_max = 1.58 #in seconds, upper limit for x-axis for 2d array/plot
n_sampling = 1000 #provides higher sampling count for profile statistics
file_path = '.../trainedGPs/saved_GP_1091016033/' #path to saved GP contents
#file_path is where the gp and its variables have been saved
# --------------------------------------------------------------
# End of user inputs
# --------------------------------------------------------------
def de_optimizer(obj_func, initial_theta, bounds):
res = differential_evolution(lambda x: obj_func(x, eval_gradient=False),
bounds, maxiter=n_max_iter, disp=False, polish=True)
return res.x, obj_func(res.x, eval_gradient=False)
number_of_samples = 1
X_n = np.load(str(file_path)+'X_n.npy')
y_n_TS = np.load(str(file_path)+'y_n_TS.npy')
y_n_TS_err = np.load(str(file_path)+'y_n_TS_err.npy')
n_max_iter = np.load(str(file_path)+'n_max_iter.npy')
gp = pickle.load(open(str(file_path)+"gp.dump","rb"))
x1 = np.arange(psi_min,psi_max,dpsi) #radial coordinate
x2 = np.arange(t_min,t_max,dt) #temporal coordinate
i = 0
inputs_x = []
while i < len(x1):
j = 0
while j < len(x2):
inputs_x.append([x1[i],x2[j]])
j = j + 1
i = i + 1
inputs_x_array = np.array(inputs_x)
lls_len_scale = []
i = 0
while i < len(inputs_x_array):
lls_len_scale_i = gp.kernel_.k1.k2.theta_gp* 10**gp.kernel_.k1.k2.gp_l.predict(inputs_x_array[i].reshape(1, -1))[0]
lls_len_scale.append(lls_len_scale_i)
i = i + 1
lls_len_scale = np.array(lls_len_scale)
fig = plt.figure(figsize=(16,6))
cm = plt.cm.get_cmap('RdYlGn')
ax = fig.add_subplot(111, projection='3d')
c = ax.scatter(inputs_x_array[:,0],inputs_x_array[:,1],lls_len_scale,c=lls_len_scale[:,0],cmap=cm,alpha=0.3)
ax.set_xlabel(r"$\psi$",labelpad=20)
ax.set_ylabel('Time (s)',labelpad=27.5)
ax.zaxis.set_rotate_label(False)
ax.set_zlabel('GP LLS',labelpad=5,rotation=90)
ax.set_xlim(0.8,1.1)
ax.set_ylim(0.4,1.55)
fig.colorbar(c, ax=ax)
ax.azim = 25
ax.elev = 20
plt.show() | en | 0.777849 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Fri Sep 27 15:56:39 2019 @author: mathewsa This script is used for plotting the length scales learned by the GP across the 2D (i.e. radial and temporal) domain specified by the user. This script is to be run only after first running and saving the GP after it has been trained upon the experimental data. Note that certain trained GPs may have trouble during training to find good estimates of length scales across the domain, nevertheless the fits to the original data may still be mostly all right, but checking for 'good times' which are stored in the array 'inputs_t_array_good' should be performed as described in the script 'GP-obtain-2D-profiles.py'. #provides path to gp_extras #lower limit you want for plotting x-axis #upper limit you want for plotting y-axis #in keV, lower limit you want for plotting y-axis #in keV, upper limit you want for plotting y-axis #normalized poloidal flux coordinate spacing you specify #seconds; this is the grid spacing you specify #in seconds, lower limit for x-axis for 2d array/plot #in seconds, upper limit for x-axis for 2d array/plot #provides higher sampling count for profile statistics #path to saved GP contents #file_path is where the gp and its variables have been saved # -------------------------------------------------------------- # End of user inputs # -------------------------------------------------------------- #radial coordinate #temporal coordinate | 2.660349 | 3 |
lib/GuiMain.py | KorvinSilver/proverbial_hangman | 0 | 6622201 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GuiMain.ui'
#
# Created: Sun Nov 26 20:51:18 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(680, 400)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(400, 400))
MainWindow.setMaximumSize(QtCore.QSize(800, 600))
self.CentralWidget = QtGui.QWidget(MainWindow)
self.CentralWidget.setMinimumSize(QtCore.QSize(400, 400))
self.CentralWidget.setObjectName("CentralWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.CentralWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.GridLayout = QtGui.QGridLayout()
self.GridLayout.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.GridLayout.setSpacing(8)
self.GridLayout.setObjectName("GridLayout")
self.ImageLabel = QtGui.QLabel(self.CentralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ImageLabel.sizePolicy().hasHeightForWidth())
self.ImageLabel.setSizePolicy(sizePolicy)
self.ImageLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ImageLabel.setObjectName("ImageLabel")
self.GridLayout.addWidget(self.ImageLabel, 0, 0, 1, 1)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.ToolButton = QtGui.QPushButton(self.CentralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ToolButton.sizePolicy().hasHeightForWidth())
self.ToolButton.setSizePolicy(sizePolicy)
self.ToolButton.setMinimumSize(QtCore.QSize(24, 24))
self.ToolButton.setMaximumSize(QtCore.QSize(24, 24))
self.ToolButton.setText("")
self.ToolButton.setObjectName("ToolButton")
self.horizontalLayout_3.addWidget(self.ToolButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.GuessLabel = QtGui.QLabel(self.CentralWidget)
self.GuessLabel.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.GuessLabel.setObjectName("GuessLabel")
self.verticalLayout_2.addWidget(self.GuessLabel)
self.GuessText = QtGui.QLabel(self.CentralWidget)
self.GuessText.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.GuessText.setObjectName("GuessText")
self.verticalLayout_2.addWidget(self.GuessText)
self.GridLayout.addLayout(self.verticalLayout_2, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.GridLayout)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.ProverbLabel = QtGui.QLabel(self.CentralWidget)
self.ProverbLabel.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.ProverbLabel.setMargin(0)
self.ProverbLabel.setIndent(-1)
self.ProverbLabel.setObjectName("ProverbLabel")
self.verticalLayout_3.addWidget(self.ProverbLabel)
self.ProverbText = QtGui.QLabel(self.CentralWidget)
self.ProverbText.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.ProverbText.setObjectName("ProverbText")
self.verticalLayout_3.addWidget(self.ProverbText)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.PlayerInput = QtGui.QLineEdit(self.CentralWidget)
self.PlayerInput.setInputMask("")
self.PlayerInput.setObjectName("PlayerInput")
self.horizontalLayout_4.addWidget(self.PlayerInput)
self.OkButton = QtGui.QPushButton(self.CentralWidget)
self.OkButton.setObjectName("OkButton")
self.horizontalLayout_4.addWidget(self.OkButton)
self.horizontalLayout_2.addLayout(self.horizontalLayout_4)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
spacerItem4 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.verticalLayout_3.addItem(spacerItem4)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem5)
self.NewGameButton = QtGui.QPushButton(self.CentralWidget)
self.NewGameButton.setObjectName("NewGameButton")
self.horizontalLayout.addWidget(self.NewGameButton)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem6)
self.ExitButton = QtGui.QPushButton(self.CentralWidget)
self.ExitButton.setObjectName("ExitButton")
self.horizontalLayout.addWidget(self.ExitButton)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem7)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.verticalLayout.addLayout(self.verticalLayout_3)
spacerItem8 = QtGui.QSpacerItem(20, 4, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.verticalLayout.addItem(spacerItem8)
MainWindow.setCentralWidget(self.CentralWidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.ImageLabel.setText(QtGui.QApplication.translate("MainWindow", "ImageLabel", None, QtGui.QApplication.UnicodeUTF8))
self.GuessLabel.setText(QtGui.QApplication.translate("MainWindow", "GuessLabel", None, QtGui.QApplication.UnicodeUTF8))
self.GuessText.setText(QtGui.QApplication.translate("MainWindow", "GuessText", None, QtGui.QApplication.UnicodeUTF8))
self.ProverbLabel.setText(QtGui.QApplication.translate("MainWindow", "ProverbLabel", None, QtGui.QApplication.UnicodeUTF8))
self.ProverbText.setText(QtGui.QApplication.translate("MainWindow", "ProverbText", None, QtGui.QApplication.UnicodeUTF8))
self.OkButton.setText(QtGui.QApplication.translate("MainWindow", "OK", None, QtGui.QApplication.UnicodeUTF8))
self.NewGameButton.setText(QtGui.QApplication.translate("MainWindow", "New Game", None, QtGui.QApplication.UnicodeUTF8))
self.ExitButton.setText(QtGui.QApplication.translate("MainWindow", "Exit", None, QtGui.QApplication.UnicodeUTF8))
| # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GuiMain.ui'
#
# Created: Sun Nov 26 20:51:18 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(680, 400)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(400, 400))
MainWindow.setMaximumSize(QtCore.QSize(800, 600))
self.CentralWidget = QtGui.QWidget(MainWindow)
self.CentralWidget.setMinimumSize(QtCore.QSize(400, 400))
self.CentralWidget.setObjectName("CentralWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.CentralWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.GridLayout = QtGui.QGridLayout()
self.GridLayout.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.GridLayout.setSpacing(8)
self.GridLayout.setObjectName("GridLayout")
self.ImageLabel = QtGui.QLabel(self.CentralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ImageLabel.sizePolicy().hasHeightForWidth())
self.ImageLabel.setSizePolicy(sizePolicy)
self.ImageLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ImageLabel.setObjectName("ImageLabel")
self.GridLayout.addWidget(self.ImageLabel, 0, 0, 1, 1)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.ToolButton = QtGui.QPushButton(self.CentralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ToolButton.sizePolicy().hasHeightForWidth())
self.ToolButton.setSizePolicy(sizePolicy)
self.ToolButton.setMinimumSize(QtCore.QSize(24, 24))
self.ToolButton.setMaximumSize(QtCore.QSize(24, 24))
self.ToolButton.setText("")
self.ToolButton.setObjectName("ToolButton")
self.horizontalLayout_3.addWidget(self.ToolButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.GuessLabel = QtGui.QLabel(self.CentralWidget)
self.GuessLabel.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.GuessLabel.setObjectName("GuessLabel")
self.verticalLayout_2.addWidget(self.GuessLabel)
self.GuessText = QtGui.QLabel(self.CentralWidget)
self.GuessText.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.GuessText.setObjectName("GuessText")
self.verticalLayout_2.addWidget(self.GuessText)
self.GridLayout.addLayout(self.verticalLayout_2, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.GridLayout)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.ProverbLabel = QtGui.QLabel(self.CentralWidget)
self.ProverbLabel.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.ProverbLabel.setMargin(0)
self.ProverbLabel.setIndent(-1)
self.ProverbLabel.setObjectName("ProverbLabel")
self.verticalLayout_3.addWidget(self.ProverbLabel)
self.ProverbText = QtGui.QLabel(self.CentralWidget)
self.ProverbText.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.ProverbText.setObjectName("ProverbText")
self.verticalLayout_3.addWidget(self.ProverbText)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.PlayerInput = QtGui.QLineEdit(self.CentralWidget)
self.PlayerInput.setInputMask("")
self.PlayerInput.setObjectName("PlayerInput")
self.horizontalLayout_4.addWidget(self.PlayerInput)
self.OkButton = QtGui.QPushButton(self.CentralWidget)
self.OkButton.setObjectName("OkButton")
self.horizontalLayout_4.addWidget(self.OkButton)
self.horizontalLayout_2.addLayout(self.horizontalLayout_4)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
spacerItem4 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.verticalLayout_3.addItem(spacerItem4)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem5)
self.NewGameButton = QtGui.QPushButton(self.CentralWidget)
self.NewGameButton.setObjectName("NewGameButton")
self.horizontalLayout.addWidget(self.NewGameButton)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem6)
self.ExitButton = QtGui.QPushButton(self.CentralWidget)
self.ExitButton.setObjectName("ExitButton")
self.horizontalLayout.addWidget(self.ExitButton)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem7)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.verticalLayout.addLayout(self.verticalLayout_3)
spacerItem8 = QtGui.QSpacerItem(20, 4, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.verticalLayout.addItem(spacerItem8)
MainWindow.setCentralWidget(self.CentralWidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.ImageLabel.setText(QtGui.QApplication.translate("MainWindow", "ImageLabel", None, QtGui.QApplication.UnicodeUTF8))
self.GuessLabel.setText(QtGui.QApplication.translate("MainWindow", "GuessLabel", None, QtGui.QApplication.UnicodeUTF8))
self.GuessText.setText(QtGui.QApplication.translate("MainWindow", "GuessText", None, QtGui.QApplication.UnicodeUTF8))
self.ProverbLabel.setText(QtGui.QApplication.translate("MainWindow", "ProverbLabel", None, QtGui.QApplication.UnicodeUTF8))
self.ProverbText.setText(QtGui.QApplication.translate("MainWindow", "ProverbText", None, QtGui.QApplication.UnicodeUTF8))
self.OkButton.setText(QtGui.QApplication.translate("MainWindow", "OK", None, QtGui.QApplication.UnicodeUTF8))
self.NewGameButton.setText(QtGui.QApplication.translate("MainWindow", "New Game", None, QtGui.QApplication.UnicodeUTF8))
self.ExitButton.setText(QtGui.QApplication.translate("MainWindow", "Exit", None, QtGui.QApplication.UnicodeUTF8))
| en | 0.79589 | # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'GuiMain.ui' # # Created: Sun Nov 26 20:51:18 2017 # by: pyside-uic 0.2.15 running on PySide 1.2.4 # # WARNING! All changes made in this file will be lost! | 1.713277 | 2 |
12.Highly divisible triangular number.py | iFun/Project-Euler | 0 | 6622202 | from math import sqrt
def main():
count = 0
totalNumber = 0
tmp = 10001
result = 0
total = 0
while tmp != 20001:
total = getSum(tmp)
result = findDivisor(total)
if len(result) > count:
count = len(result)
totalNumber = total
print (count)
print(totalNumber)
tmp = tmp + 1
# print(count)
# print(totalNumber)
def getSum(num):
start = 1 + num
end = start * num
return end/2
def findDivisor(n):
return set(x for tup in ([i, n//i]
for i in range(1, int(n**0.5)+1) if n % i == 0) for x in tup)
if __name__ == '__main__':
main()
| from math import sqrt
def main():
count = 0
totalNumber = 0
tmp = 10001
result = 0
total = 0
while tmp != 20001:
total = getSum(tmp)
result = findDivisor(total)
if len(result) > count:
count = len(result)
totalNumber = total
print (count)
print(totalNumber)
tmp = tmp + 1
# print(count)
# print(totalNumber)
def getSum(num):
start = 1 + num
end = start * num
return end/2
def findDivisor(n):
return set(x for tup in ([i, n//i]
for i in range(1, int(n**0.5)+1) if n % i == 0) for x in tup)
if __name__ == '__main__':
main()
| en | 0.658862 | # print(count) # print(totalNumber) | 3.625025 | 4 |
src/main/python/app/workers/CategorySaverWorker.py | karlpet/WadLauncher | 2 | 6622203 | import sys, json, os, uuid
from configparser import ConfigParser
from dataclasses import asdict
from PyQt5.QtCore import QThread, pyqtSignal
from app.config import Config
from app.workers.WorkerPool import *
def category_saver_worker_wrapper(items, progress_handlers=[], done_handlers=[]):
worker = CategorySaverWorker(items)
for handler in progress_handlers:
worker.progress.connect(handler)
for handler in done_handlers:
worker.done.connect(handler)
WorkerPool.Instance().start(worker)
class CategorySaverWorker(QThread):
done = pyqtSignal(object)
progress = pyqtSignal(object)
def __init__(self, items):
super(CategorySaverWorker, self).__init__()
config = Config.Instance()
base_path = os.path.expanduser(config['PATHS']['BASE_PATH'])
self.path = os.path.join(base_path, 'user_categories.ini')
self.items = [asdict(item) for item in items]
def run(self):
cfg = ConfigParser(allow_no_value=True)
for item in self.items:
id = item['id']
if not cfg.has_section(id):
cfg.add_section(id)
cfg.set(id, 'id', id)
is_root = 'yes' if item.get('is_root', False) else 'no'
cfg.set(id, 'is_root', is_root)
cfg.set(id, 'name', item['name'])
cfg.set(id, 'children', json.dumps(item['children']))
with open(os.path.abspath(self.path), 'w+') as config_file:
cfg.write(config_file)
self.done.emit(None)
| import sys, json, os, uuid
from configparser import ConfigParser
from dataclasses import asdict
from PyQt5.QtCore import QThread, pyqtSignal
from app.config import Config
from app.workers.WorkerPool import *
def category_saver_worker_wrapper(items, progress_handlers=[], done_handlers=[]):
worker = CategorySaverWorker(items)
for handler in progress_handlers:
worker.progress.connect(handler)
for handler in done_handlers:
worker.done.connect(handler)
WorkerPool.Instance().start(worker)
class CategorySaverWorker(QThread):
done = pyqtSignal(object)
progress = pyqtSignal(object)
def __init__(self, items):
super(CategorySaverWorker, self).__init__()
config = Config.Instance()
base_path = os.path.expanduser(config['PATHS']['BASE_PATH'])
self.path = os.path.join(base_path, 'user_categories.ini')
self.items = [asdict(item) for item in items]
def run(self):
cfg = ConfigParser(allow_no_value=True)
for item in self.items:
id = item['id']
if not cfg.has_section(id):
cfg.add_section(id)
cfg.set(id, 'id', id)
is_root = 'yes' if item.get('is_root', False) else 'no'
cfg.set(id, 'is_root', is_root)
cfg.set(id, 'name', item['name'])
cfg.set(id, 'children', json.dumps(item['children']))
with open(os.path.abspath(self.path), 'w+') as config_file:
cfg.write(config_file)
self.done.emit(None)
| none | 1 | 2.072729 | 2 | |
reports/jasa/transcet_map.py | nedlrichards/tau_decomp | 0 | 6622204 | <filename>reports/jasa/transcet_map.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import matplotlib.tri as tri
from scipy.ndimage import gaussian_filter
from src import Config
plt.ion()
cf=Config()
woa_file = np.genfromtxt('data/external/woa18_decav81B0_t14mn04.csv',
delimiter=',', missing_values='',
filling_values=np.nan,
usecols=(0,1,12), invalid_raise=False).T
xi = np.linspace(-160, -115, 100)
yi = np.linspace(15, 50, 101)
lat_exp = [33.42, 34.88]
lon_exp = [-137.7, -148.32]
ind1 = (woa_file[0] > 15) & (woa_file[0] < 50)
ind2 = (woa_file[1] > -160) & (woa_file[1] < -115)
ind = ind1 & ind2
nan_i = ~np.isnan(woa_file[2])
ind &= nan_i
triang = tri.Triangulation(woa_file[1, ind], woa_file[0, ind])
interpolator = tri.LinearTriInterpolator(triang, woa_file[2, ind])
fig = plt.figure(figsize=(cf.jasa_1clm, 2.5))
ax = fig.add_subplot(1,1,1,projection=ccrs.PlateCarree())
ax.set_extent([-160, -115, 15, 50],crs=ccrs.PlateCarree())
ax.coastlines()
ax.plot(lon_exp, lat_exp, color='C3')
#m.fillcontinents(color="#FFDDCC", lake_color='#DDEEFF')
Xi, Yi = np.meshgrid(xi, yi)
zi = interpolator(Xi, Yi)
data = gaussian_filter(zi, 1)
cs = ax.contour(xi, yi, data, linewidths=0.5, colors='k', levels=np.arange(6, 26, 2))
locs = [(-155.4545454545455, 47.200126321991945),
(-149.54545454545456, 44.78292593479905),
(-135.45454545454547, 43.84985763955887),
(-129.09090909090912, 39.75988868530165),
(-135, 36.89),
(-132.1, 33.66),
(-135., 30.5),
(-139.5, 27.35),
(-146.4, 24.35),
(-151.5509215051357, 20.25)]
lbls = ax.clabel(cs, cs.levels, manual=locs)
ax.stock_img()
gl = ax.gridlines(draw_labels=True)
gl.top_labels = False
gl.right_labels = False
#parallels = np.linspace(20, 50, 5)
#m.drawparallels(parallels,labels=[False,True,True,False])
#meridians = np.linspace(-115, -155, 5)
#m.drawmeridians(meridians,labels=[True,False,False,True])
fig.savefig('reports/jasa/figures/transcet.png', dpi=300)
| <filename>reports/jasa/transcet_map.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import matplotlib.tri as tri
from scipy.ndimage import gaussian_filter
from src import Config
plt.ion()
cf=Config()
woa_file = np.genfromtxt('data/external/woa18_decav81B0_t14mn04.csv',
delimiter=',', missing_values='',
filling_values=np.nan,
usecols=(0,1,12), invalid_raise=False).T
xi = np.linspace(-160, -115, 100)
yi = np.linspace(15, 50, 101)
lat_exp = [33.42, 34.88]
lon_exp = [-137.7, -148.32]
ind1 = (woa_file[0] > 15) & (woa_file[0] < 50)
ind2 = (woa_file[1] > -160) & (woa_file[1] < -115)
ind = ind1 & ind2
nan_i = ~np.isnan(woa_file[2])
ind &= nan_i
triang = tri.Triangulation(woa_file[1, ind], woa_file[0, ind])
interpolator = tri.LinearTriInterpolator(triang, woa_file[2, ind])
fig = plt.figure(figsize=(cf.jasa_1clm, 2.5))
ax = fig.add_subplot(1,1,1,projection=ccrs.PlateCarree())
ax.set_extent([-160, -115, 15, 50],crs=ccrs.PlateCarree())
ax.coastlines()
ax.plot(lon_exp, lat_exp, color='C3')
#m.fillcontinents(color="#FFDDCC", lake_color='#DDEEFF')
Xi, Yi = np.meshgrid(xi, yi)
zi = interpolator(Xi, Yi)
data = gaussian_filter(zi, 1)
cs = ax.contour(xi, yi, data, linewidths=0.5, colors='k', levels=np.arange(6, 26, 2))
locs = [(-155.4545454545455, 47.200126321991945),
(-149.54545454545456, 44.78292593479905),
(-135.45454545454547, 43.84985763955887),
(-129.09090909090912, 39.75988868530165),
(-135, 36.89),
(-132.1, 33.66),
(-135., 30.5),
(-139.5, 27.35),
(-146.4, 24.35),
(-151.5509215051357, 20.25)]
lbls = ax.clabel(cs, cs.levels, manual=locs)
ax.stock_img()
gl = ax.gridlines(draw_labels=True)
gl.top_labels = False
gl.right_labels = False
#parallels = np.linspace(20, 50, 5)
#m.drawparallels(parallels,labels=[False,True,True,False])
#meridians = np.linspace(-115, -155, 5)
#m.drawmeridians(meridians,labels=[True,False,False,True])
fig.savefig('reports/jasa/figures/transcet.png', dpi=300)
| en | 0.331613 | #m.fillcontinents(color="#FFDDCC", lake_color='#DDEEFF') #parallels = np.linspace(20, 50, 5) #m.drawparallels(parallels,labels=[False,True,True,False]) #meridians = np.linspace(-115, -155, 5) #m.drawmeridians(meridians,labels=[True,False,False,True]) | 1.903875 | 2 |
module1-introduction-to-sql/rpg_queries.py | lucaspetrus/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 6622205 | <filename>module1-introduction-to-sql/rpg_queries.py
# Questions for Today
"""How many total Characters are there?
How many of each specific subclass?
How many Items are there?
How many of the Items are weapons? How many are not?
How many Items does each character have? (Return first 20 rows)
How many Weapons does each character have? (Return first 20 rows)
On average, how many Items does each Character have?
On average, how many Weapons does each character have?"""
import sqlite3
conn = sqlite3.connect('../module3-nosql-and-document-oriented-databases/rpg_db.sqlite3')
curs = conn.cursor()
"""How many total Characters are there"""
character_query = 'SELECT COUNT(*) FROM charactercreator_character'
curs.execute(character_query)
results = curs.fetchone() # fetchall() is somewhat interchangeable
print(f"Total Characters: {results[0]}")
"""
How Many of Each Specific subclass
"""
cleric_query = 'SELECT COUNT(*) FROM charactercreator_cleric'
curs.execute(cleric_query)
cleric_results = curs.fetchone()
print(f"Number of Cleric Subclass: {cleric_results[0]}")
fighter_query = 'SELECT COUNT(*) FROM charactercreator_fighter'
curs.execute(fighter_query)
fighter_results = curs.fetchone()
print(f"Number of Fighter Subclass: {fighter_results[0]}")
mage_query = 'SELECT COUNT(*) FROM charactercreator_mage'
curs.execute(mage_query)
mage_results = curs.fetchone()
print(f"Number of Mage Subclass: {mage_results[0]}")
necromancer_query = 'SELECT COUNT(*) FROM charactercreator_necromancer'
curs.execute(necromancer_query)
necromancer_results = curs.fetchone()
print(f"Number of Necromancer Subclass: {necromancer_results[0]}")
thief_query = 'SELECT COUNT(*) FROM charactercreator_thief'
curs.execute(thief_query)
thief_results = curs.fetchone()
print(f"Number of Thief Subclass: {thief_results[0]}")
"""
How many Items are there?
"""
armor_query = 'SELECT COUNT(*) FROM armory_item'
curs.execute(armor_query)
items_results = curs.fetchone()
print(f"Number of Items in Armory: {items_results[0]}")
"""
How many of the Items are weapons? How many are not?
"""
weapon_query = 'SELECT COUNT(*) FROM armory_weapon'
curs.execute(weapon_query)
weapon_results = curs.fetchone()
print(f"Number of Weapons in Armory: {weapon_results[0]}")
print(f"Not Weapons: {items_results[0] - weapon_results[0]}")
"""
How many Items does each character have? (Return first 20 rows)
"""
item_per_character = """SELECT character_id, COUNT(DISTINCT item_id)
#Distinct means Unqiue or Value_counts
#subquery below
FROM(SELECT cc.character_id, cc.name AS character_name, ai.item_id, ai.name AS item_name
FROM charactercreator_character AS cc,armory_item AS ai, charactercreator_character_inventory AS cci
WHERE cc.character_id = cci.character_id AND ai.item_id = cci.item_id) ###This WHERE function is the implicit join
GROUP BY 1 ORDER BY 2 DESC #Group by column 1, and Column 2 becomes descending column with items
LIMIT 20;"""
curs.execute(item_per_character)
item_per_character_results = curs.fetchall()
print(f"Total Items for Each Character: {item_per_character_results}")
"""
How many Weapons does each character have? (Return first 20 rows)
"""
weapon_per_character = """SELECT name, COUNT(DISTINCT item_ptr_id) FROM
(SELECT cc.character_id, cc.name, aw.item_ptr_id, aw.power
FROM charactercreator_character AS cc,
armory_weapon AS aw,
charactercreator_character_inventory AS cci
WHERE cc.character_id = cci.character_id
AND aw.item_ptr_id = cci.item_id)
GROUP BY 1 ORDER BY 2 DESC
LIMIT 20;"""
curs.execute(weapon_per_character)
weapon_per_character_result = curs.fetchall()
print(f"Total Weapons for Characters: {weapon_per_character_result}")
| <filename>module1-introduction-to-sql/rpg_queries.py
# Questions for Today
"""How many total Characters are there?
How many of each specific subclass?
How many Items are there?
How many of the Items are weapons? How many are not?
How many Items does each character have? (Return first 20 rows)
How many Weapons does each character have? (Return first 20 rows)
On average, how many Items does each Character have?
On average, how many Weapons does each character have?"""
import sqlite3
conn = sqlite3.connect('../module3-nosql-and-document-oriented-databases/rpg_db.sqlite3')
curs = conn.cursor()
"""How many total Characters are there"""
character_query = 'SELECT COUNT(*) FROM charactercreator_character'
curs.execute(character_query)
results = curs.fetchone() # fetchall() is somewhat interchangeable
print(f"Total Characters: {results[0]}")
"""
How Many of Each Specific subclass
"""
cleric_query = 'SELECT COUNT(*) FROM charactercreator_cleric'
curs.execute(cleric_query)
cleric_results = curs.fetchone()
print(f"Number of Cleric Subclass: {cleric_results[0]}")
fighter_query = 'SELECT COUNT(*) FROM charactercreator_fighter'
curs.execute(fighter_query)
fighter_results = curs.fetchone()
print(f"Number of Fighter Subclass: {fighter_results[0]}")
mage_query = 'SELECT COUNT(*) FROM charactercreator_mage'
curs.execute(mage_query)
mage_results = curs.fetchone()
print(f"Number of Mage Subclass: {mage_results[0]}")
necromancer_query = 'SELECT COUNT(*) FROM charactercreator_necromancer'
curs.execute(necromancer_query)
necromancer_results = curs.fetchone()
print(f"Number of Necromancer Subclass: {necromancer_results[0]}")
thief_query = 'SELECT COUNT(*) FROM charactercreator_thief'
curs.execute(thief_query)
thief_results = curs.fetchone()
print(f"Number of Thief Subclass: {thief_results[0]}")
"""
How many Items are there?
"""
armor_query = 'SELECT COUNT(*) FROM armory_item'
curs.execute(armor_query)
items_results = curs.fetchone()
print(f"Number of Items in Armory: {items_results[0]}")
"""
How many of the Items are weapons? How many are not?
"""
weapon_query = 'SELECT COUNT(*) FROM armory_weapon'
curs.execute(weapon_query)
weapon_results = curs.fetchone()
print(f"Number of Weapons in Armory: {weapon_results[0]}")
print(f"Not Weapons: {items_results[0] - weapon_results[0]}")
"""
How many Items does each character have? (Return first 20 rows)
"""
item_per_character = """SELECT character_id, COUNT(DISTINCT item_id)
#Distinct means Unqiue or Value_counts
#subquery below
FROM(SELECT cc.character_id, cc.name AS character_name, ai.item_id, ai.name AS item_name
FROM charactercreator_character AS cc,armory_item AS ai, charactercreator_character_inventory AS cci
WHERE cc.character_id = cci.character_id AND ai.item_id = cci.item_id) ###This WHERE function is the implicit join
GROUP BY 1 ORDER BY 2 DESC #Group by column 1, and Column 2 becomes descending column with items
LIMIT 20;"""
curs.execute(item_per_character)
item_per_character_results = curs.fetchall()
print(f"Total Items for Each Character: {item_per_character_results}")
"""
How many Weapons does each character have? (Return first 20 rows)
"""
weapon_per_character = """SELECT name, COUNT(DISTINCT item_ptr_id) FROM
(SELECT cc.character_id, cc.name, aw.item_ptr_id, aw.power
FROM charactercreator_character AS cc,
armory_weapon AS aw,
charactercreator_character_inventory AS cci
WHERE cc.character_id = cci.character_id
AND aw.item_ptr_id = cci.item_id)
GROUP BY 1 ORDER BY 2 DESC
LIMIT 20;"""
curs.execute(weapon_per_character)
weapon_per_character_result = curs.fetchall()
print(f"Total Weapons for Characters: {weapon_per_character_result}")
| en | 0.853223 | # Questions for Today How many total Characters are there? How many of each specific subclass? How many Items are there? How many of the Items are weapons? How many are not? How many Items does each character have? (Return first 20 rows) How many Weapons does each character have? (Return first 20 rows) On average, how many Items does each Character have? On average, how many Weapons does each character have? How many total Characters are there # fetchall() is somewhat interchangeable How Many of Each Specific subclass How many Items are there? How many of the Items are weapons? How many are not? How many Items does each character have? (Return first 20 rows) SELECT character_id, COUNT(DISTINCT item_id) #Distinct means Unqiue or Value_counts #subquery below FROM(SELECT cc.character_id, cc.name AS character_name, ai.item_id, ai.name AS item_name FROM charactercreator_character AS cc,armory_item AS ai, charactercreator_character_inventory AS cci WHERE cc.character_id = cci.character_id AND ai.item_id = cci.item_id) ###This WHERE function is the implicit join GROUP BY 1 ORDER BY 2 DESC #Group by column 1, and Column 2 becomes descending column with items LIMIT 20; How many Weapons does each character have? (Return first 20 rows) SELECT name, COUNT(DISTINCT item_ptr_id) FROM (SELECT cc.character_id, cc.name, aw.item_ptr_id, aw.power FROM charactercreator_character AS cc, armory_weapon AS aw, charactercreator_character_inventory AS cci WHERE cc.character_id = cci.character_id AND aw.item_ptr_id = cci.item_id) GROUP BY 1 ORDER BY 2 DESC LIMIT 20; | 3.266214 | 3 |
util/env_util/wrappers/action_wrappers.py | joelouismarino/variational_rl | 15 | 6622206 | import gym
from gym.spaces import Box
import numpy as np
class NormalizeAction(gym.ActionWrapper):
"""
Normalizes the reward to [-1, 1].
"""
def __init__(self, env):
gym.ActionWrapper.__init__(self, env)
self._wrapped_env = env
ub = np.ones(env.action_space.shape)
self.action_space = Box(-1 * ub, ub)
def action(self, action):
lb = self._wrapped_env.action_space.low
ub = self._wrapped_env.action_space.high
scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
return scaled_action
| import gym
from gym.spaces import Box
import numpy as np
class NormalizeAction(gym.ActionWrapper):
"""
Normalizes the reward to [-1, 1].
"""
def __init__(self, env):
gym.ActionWrapper.__init__(self, env)
self._wrapped_env = env
ub = np.ones(env.action_space.shape)
self.action_space = Box(-1 * ub, ub)
def action(self, action):
lb = self._wrapped_env.action_space.low
ub = self._wrapped_env.action_space.high
scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
return scaled_action
| en | 0.777374 | Normalizes the reward to [-1, 1]. | 2.851144 | 3 |
PreFRBLE/PreFRBLE/parameter.py | FRBs/PreFRBLE | 5 | 6622207 | <gh_stars>1-10
redshift_accuracy = 4 # number decimals for redshift accuracy (to prevent numerical misidentification of redshifts)
## regions along LoS
regions = ['MW', 'IGM', 'Inter', 'Host', 'Local']
linestyle_region = {'MW':'--', 'IGM':'-', 'Inter':":", 'Host':"-.", 'Local':"-."}
N_sample = { ## !!! hardcoded, find a better solution
# 'MW' : 1,
'IGM' : 49152,
'Host' : 10**7,
'Inter' : 10**7,
'inter' : 10**7,
'Local' : 10**6,
'population' : 10**7
}
N_sample['Full'] = min( list( N_sample.values() ) )
N_population = { ## number of events in sample to estimate likelihood of host redshift
'SFR': { 'None': 10**7, 'ASKAP_incoh' : 9176 , 'CHIME' : 118822, 'Parkes': 134915 },
'coV': { 'None': 10**7, 'ASKAP_incoh' : 23757, 'CHIME' : 112447, 'Parkes': 122008 },
'SMD': { 'None': 10**7, 'ASKAP_incoh' : 32976, 'CHIME' : 401226, 'Parkes': 396802 },
}
## available models for all regions
models_MW = ['JF12']
models_IGM = ['primordial', 'astrophysical_mean', 'astrophysical_median', 'alpha1-3rd', 'alpha2-3rd', 'alpha3-3rd', 'alpha4-3rd', 'alpha5-3rd', 'alpha6-3rd', 'alpha7-3rd', 'alpha8-3rd', 'alpha9-3rd']
models_Host = ['Rodrigues18']
models_Inter = ['Rodrigues18']
models_Local = [ 'Piro18_wind', 'Piro18_wind+SNR']
## telescopes and cosmic population scenarios
telescopes = [ 'ASKAP', 'ASKAP_incoh', 'CHIME', 'Parkes' ][1:] ## names used in PreFRBLE
populations = [ 'SFR', 'coV', 'SMD' ]
colors_telescope = ['orange','y','c']
linestyles_population = [':','-','--']
## names used in FRBpoppy
telescopes_FRBpoppy = { 'ASKAP':'askap-fly', 'ASKAP_incoh':'askap-incoh', 'CHIME':'chime', 'Parkes':'parkes' }
populations_FRBpoppy = { 'SFR':'sfr', 'SMD':'smd', 'coV':'vol_co' }
## names used in FRBcat
telescopes_FRBcat = { 'ASKAP':'ASKAP', 'ASKAP_incoh':'ASKAP', 'CHIME':'CHIME/FRB', 'Parkes':'parkes' }
telescopes_FRBcat_inv = {v: k for k, v in telescopes_FRBcat.items()}
telescopes_FRBcat_inv['ASKAP'] = 'ASKAP_incoh' ## has to be forced
| redshift_accuracy = 4 # number decimals for redshift accuracy (to prevent numerical misidentification of redshifts)
## regions along LoS
regions = ['MW', 'IGM', 'Inter', 'Host', 'Local']
linestyle_region = {'MW':'--', 'IGM':'-', 'Inter':":", 'Host':"-.", 'Local':"-."}
N_sample = { ## !!! hardcoded, find a better solution
# 'MW' : 1,
'IGM' : 49152,
'Host' : 10**7,
'Inter' : 10**7,
'inter' : 10**7,
'Local' : 10**6,
'population' : 10**7
}
N_sample['Full'] = min( list( N_sample.values() ) )
N_population = { ## number of events in sample to estimate likelihood of host redshift
'SFR': { 'None': 10**7, 'ASKAP_incoh' : 9176 , 'CHIME' : 118822, 'Parkes': 134915 },
'coV': { 'None': 10**7, 'ASKAP_incoh' : 23757, 'CHIME' : 112447, 'Parkes': 122008 },
'SMD': { 'None': 10**7, 'ASKAP_incoh' : 32976, 'CHIME' : 401226, 'Parkes': 396802 },
}
## available models for all regions
models_MW = ['JF12']
models_IGM = ['primordial', 'astrophysical_mean', 'astrophysical_median', 'alpha1-3rd', 'alpha2-3rd', 'alpha3-3rd', 'alpha4-3rd', 'alpha5-3rd', 'alpha6-3rd', 'alpha7-3rd', 'alpha8-3rd', 'alpha9-3rd']
models_Host = ['Rodrigues18']
models_Inter = ['Rodrigues18']
models_Local = [ 'Piro18_wind', 'Piro18_wind+SNR']
## telescopes and cosmic population scenarios
telescopes = [ 'ASKAP', 'ASKAP_incoh', 'CHIME', 'Parkes' ][1:] ## names used in PreFRBLE
populations = [ 'SFR', 'coV', 'SMD' ]
colors_telescope = ['orange','y','c']
linestyles_population = [':','-','--']
## names used in FRBpoppy
telescopes_FRBpoppy = { 'ASKAP':'askap-fly', 'ASKAP_incoh':'askap-incoh', 'CHIME':'chime', 'Parkes':'parkes' }
populations_FRBpoppy = { 'SFR':'sfr', 'SMD':'smd', 'coV':'vol_co' }
## names used in FRBcat
telescopes_FRBcat = { 'ASKAP':'ASKAP', 'ASKAP_incoh':'ASKAP', 'CHIME':'CHIME/FRB', 'Parkes':'parkes' }
telescopes_FRBcat_inv = {v: k for k, v in telescopes_FRBcat.items()}
telescopes_FRBcat_inv['ASKAP'] = 'ASKAP_incoh' ## has to be forced | en | 0.758201 | # number decimals for redshift accuracy (to prevent numerical misidentification of redshifts) ## regions along LoS ## !!! hardcoded, find a better solution # 'MW' : 1, ## number of events in sample to estimate likelihood of host redshift ## available models for all regions ## telescopes and cosmic population scenarios ## names used in PreFRBLE ## names used in FRBpoppy ## names used in FRBcat ## has to be forced | 1.884232 | 2 |
python/flask_tox_pytest_helloworld/helloworld/main.py | mir-dhaka/coding_playground | 2 | 6622208 | <reponame>mir-dhaka/coding_playground
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 damian <damian@C-DZ-E5500>
#
# Distributed under terms of the MIT license.
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
'''hello_world'''
return 'Hello World'
if __name__ == "__main__":
app.run()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 damian <damian@C-DZ-E5500>
#
# Distributed under terms of the MIT license.
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
'''hello_world'''
return 'Hello World'
if __name__ == "__main__":
app.run() | en | 0.642924 | #! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2019 damian <damian@C-DZ-E5500> # # Distributed under terms of the MIT license. hello_world | 2.386165 | 2 |
backend/bin/test/test_traffic_analyzer.py | anjo-ba/PCAP-Analyzer | 4 | 6622209 | import unittest
from io import StringIO
from unittest.mock import patch
import main.traffic_analyzer as traffic_analyzer
class TestTrafficAnalyzerMethods(unittest.TestCase):
@patch("sys.stdout", new_callable=StringIO)
@patch("sys.argv", "test")
def test_main_function(self, mock_stdout) -> None:
error_text = "Usage traffic_analyzer.py [option]\n\n" \
"Option:\n" \
" download: Download information from IANA and IEEE.\n" \
" convert: Converts pcap(ng) files to csv\n" \
" enrich: Enriches csvs with additional information\n" \
" run: Runs convert and enrich\n" \
" run-all: Runs download, convert and enrich\n"
traffic_analyzer.main()
self.assertEqual(mock_stdout.getvalue(), error_text)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestTrafficAnalyzerMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
| import unittest
from io import StringIO
from unittest.mock import patch
import main.traffic_analyzer as traffic_analyzer
class TestTrafficAnalyzerMethods(unittest.TestCase):
@patch("sys.stdout", new_callable=StringIO)
@patch("sys.argv", "test")
def test_main_function(self, mock_stdout) -> None:
error_text = "Usage traffic_analyzer.py [option]\n\n" \
"Option:\n" \
" download: Download information from IANA and IEEE.\n" \
" convert: Converts pcap(ng) files to csv\n" \
" enrich: Enriches csvs with additional information\n" \
" run: Runs convert and enrich\n" \
" run-all: Runs download, convert and enrich\n"
traffic_analyzer.main()
self.assertEqual(mock_stdout.getvalue(), error_text)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestTrafficAnalyzerMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
| none | 1 | 2.727658 | 3 | |
perform.py | amolenaar/roles | 16 | 6622210 | """Test performance between roles and zope3 implementations."""
from timeit import timeit
setup_role = """
from roles import RoleType
class A:
pass
class Role(metaclass=RoleType):
def func(self): pass
a = A()
"""
setup_rolefactory = """
from roles import RoleType
from roles.factory import assignto
class A:
pass
class Role(metaclass=RoleType):
def func(self): pass
@assignto(A)
class Subrole(Role):
pass
a = A()
"""
setup_zope = """
from zope import interface, component
class A:
pass
class Iface(interface.Interface):
pass
class Adapter:
interface.implements(Iface)
component.adapts(A)
def __init__(self, ctx): self.ctx = ctx
def func(self): pass
component.provideAdapter(Adapter)"""
print("Construction of object %2.3fs" % timeit("a=A()", setup=setup_role))
print(
"Construction of roles %2.3fs" % timeit("a=A();Role(a).func()", setup=setup_role)
)
print(
"Construction of roles in context %2.3fs"
% timeit("a=A()\nwith Role.played_by(a): a.func()", setup=setup_role)
)
print(
"Construction of roles from factory %2.3fs"
% timeit("a=A();Role(a).func()", setup=setup_rolefactory)
)
print(
"Construction of roles from factory in context %.3fs"
% timeit("a=A()\nwith Role.played_by(a): a.func()", setup=setup_rolefactory)
)
print(
"Construction of zope adapters %.3fs"
% timeit("a=A();b=Iface(a);b.func()", setup=setup_zope)
)
def profile():
import cProfile
import pstats
from roles import RoleType
class A:
def func(self):
pass
class Role(metaclass=RoleType):
def func(self):
pass
cProfile.run("for x in xrange(10000): Role(a)", "profile.prof")
p = pstats.Stats("profile.prof")
p.strip_dirs().sort_stats("time").print_stats(40)
| """Test performance between roles and zope3 implementations."""
from timeit import timeit
setup_role = """
from roles import RoleType
class A:
pass
class Role(metaclass=RoleType):
def func(self): pass
a = A()
"""
setup_rolefactory = """
from roles import RoleType
from roles.factory import assignto
class A:
pass
class Role(metaclass=RoleType):
def func(self): pass
@assignto(A)
class Subrole(Role):
pass
a = A()
"""
setup_zope = """
from zope import interface, component
class A:
pass
class Iface(interface.Interface):
pass
class Adapter:
interface.implements(Iface)
component.adapts(A)
def __init__(self, ctx): self.ctx = ctx
def func(self): pass
component.provideAdapter(Adapter)"""
print("Construction of object %2.3fs" % timeit("a=A()", setup=setup_role))
print(
"Construction of roles %2.3fs" % timeit("a=A();Role(a).func()", setup=setup_role)
)
print(
"Construction of roles in context %2.3fs"
% timeit("a=A()\nwith Role.played_by(a): a.func()", setup=setup_role)
)
print(
"Construction of roles from factory %2.3fs"
% timeit("a=A();Role(a).func()", setup=setup_rolefactory)
)
print(
"Construction of roles from factory in context %.3fs"
% timeit("a=A()\nwith Role.played_by(a): a.func()", setup=setup_rolefactory)
)
print(
"Construction of zope adapters %.3fs"
% timeit("a=A();b=Iface(a);b.func()", setup=setup_zope)
)
def profile():
import cProfile
import pstats
from roles import RoleType
class A:
def func(self):
pass
class Role(metaclass=RoleType):
def func(self):
pass
cProfile.run("for x in xrange(10000): Role(a)", "profile.prof")
p = pstats.Stats("profile.prof")
p.strip_dirs().sort_stats("time").print_stats(40)
| en | 0.572622 | Test performance between roles and zope3 implementations. from roles import RoleType class A: pass class Role(metaclass=RoleType): def func(self): pass a = A() from roles import RoleType from roles.factory import assignto class A: pass class Role(metaclass=RoleType): def func(self): pass @assignto(A) class Subrole(Role): pass a = A() from zope import interface, component class A: pass class Iface(interface.Interface): pass class Adapter: interface.implements(Iface) component.adapts(A) def __init__(self, ctx): self.ctx = ctx def func(self): pass component.provideAdapter(Adapter) | 2.582439 | 3 |
archiv/models.py | acdh-oeaw/gtrans | 1 | 6622211 | import re
import reversion
import lxml.etree as ET
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from idprovider.models import IdProvider
from vocabs.models import SkosConcept
from entities.models import Person, Place, Institution
from browsing.browsing_utils import model_to_dict
from tei.archiv_utils import MakeTeiDoc
from transkribus.models import TrpBaseModel
@reversion.register()
class ArchResource(IdProvider, TrpBaseModel):
""" Beschreibt eine (archivalische) Resource """
title = models.CharField(
max_length=500, blank=True, verbose_name="Titel des Dokuments",
help_text="Titel des Dokuments"
)
archiv = models.ForeignKey(
Institution, null=True, blank=True,
verbose_name="Archiv",
help_text="Archiv in dem das Dokument aufbewahrt wird",
related_name="has_docs_archived",
on_delete=models.SET_NULL
)
signature = models.TextField(
blank=True, verbose_name="(Archiv)Signatur",
help_text="(Archiv)Signatur"
)
pid = models.CharField(
blank=True, null=True, max_length=250,
verbose_name="Handle-PID",
help_text="Handle-PID"
)
written_date = models.CharField(
max_length=250, blank=True, verbose_name="Datum original",
help_text="Datum original"
)
not_before = models.DateField(
auto_now=False, auto_now_add=False, blank=True, null=True,
verbose_name="Nicht vor normalisiert",
help_text="YYYY-MM-DD"
)
not_after = models.DateField(
auto_now=False, auto_now_add=False, blank=True, null=True,
verbose_name="Nicht nach normalisiert",
help_text="YYYY-MM-DD"
)
res_type = models.ForeignKey(
SkosConcept, null=True, blank=True, verbose_name="Typ des Dokuments",
help_text="Typ des Dokuments.",
related_name="doc_type",
on_delete=models.SET_NULL
)
subject_norm = models.ManyToManyField(
SkosConcept, blank=True,
help_text="Schlagwörter normalisiert",
verbose_name="Schlagwörter normalisiert",
related_name="subject_norm_of"
)
subject_free = models.TextField(
blank=True, null=True, verbose_name="Schlagwörter original",
help_text="Schlagwörter original"
)
abstract = models.TextField(
blank=True, null=True, verbose_name="Zusammenfassung",
help_text="Zusammenfassung"
)
notes = models.TextField(
blank=True, null=True, verbose_name="Anmerkungen",
help_text="Anmerkungen"
)
creator_person = models.ManyToManyField(
Person, blank=True,
help_text="Erzeuger des Dokuments",
verbose_name="Erzeuger des Dokuments(Person)",
related_name="created_by_person"
)
creator_inst = models.ManyToManyField(
Institution, blank=True,
help_text="Erzeuger des Dokuments(Institution)",
verbose_name="Erzeuger des Dokuments(Institution)",
related_name="created_by_inst"
)
mentioned_person = models.ManyToManyField(
Person, blank=True,
help_text="Im Dokument erwähnte Person",
verbose_name="Im Dokument erwähnte Person",
related_name="pers_mentioned_in_res"
)
mentioned_inst = models.ManyToManyField(
Institution, blank=True,
help_text="Im Dokument erwähnte Institution",
verbose_name="Im Dokument erwähnte Institution",
related_name="inst_mentioned_in_res"
)
mentioned_place = models.ManyToManyField(
Place, blank=True,
help_text="Im Dokument erwähnte Orte",
verbose_name="Im Dokument erwähnte Orte",
related_name="place_mentioned_in_res"
)
rel_res = models.ManyToManyField(
'ArchResource', blank=True,
help_text="In Verbindung stehende Dokumente",
verbose_name="In Verbindung stehende Dokumente",
related_name="related_res"
)
permalink = models.CharField(
max_length=500, blank=True, null=True, verbose_name="Permalink",
help_text="Stabiler Link zu einem Digitalisat dieser Resource"
)
creators = models.ManyToManyField(
User, blank=True,
verbose_name="Verantwortlich",
help_text="Verantwortlich für die Erzeugung dieses Datensatzes",
related_name="created_archres"
)
def __str__(self):
if self.title:
return "Titel: {}".format(self.title)[:250]
elif self.signature:
return "Signatur: {}".format(self.signature)
else:
return "ID: {}".format(self.id)
def get_arche_url(self):
return reverse('archiv:arche_res', kwargs={'pk': self.id})
def as_tei_node(self):
my_node = MakeTeiDoc(self)
return my_node.export_full_doc()
def as_tei(self):
return ET.tostring(self.as_tei_node(), pretty_print=True, encoding='UTF-8')
def save_tei(self, file=None):
my_node = MakeTeiDoc(self)
if file is not None:
pass
else:
file = f"{self.id}.xml"
my_node.export_full_doc_str(file)
return file
@classmethod
def get_listview_url(self):
return reverse('archiv:archresource_browse')
@classmethod
def get_createview_url(self):
return reverse('archiv:archresource_create')
def get_tei_url(self):
return reverse('archiv:archresource_xml', kwargs={'pk': self.id})
def get_absolute_url(self):
return reverse('archiv:archresource_detail', kwargs={'pk': self.id})
def get_delete_url(self):
return reverse('archiv:archresource_delete', kwargs={'pk': self.id})
def get_edit_url(self):
return reverse('archiv:archresource_edit', kwargs={'pk': self.id})
def get_next(self):
next = self.get_next_id()
if next:
return reverse(
'archiv:archresource_detail',
kwargs={'pk': next}
)
return False
def get_prev(self):
prev = self.get_prev_id()
if prev:
return reverse(
'archiv:archresource_detail',
kwargs={'pk': prev}
)
return False
def get_next_id(self):
next = self.__class__.objects.filter(id__gt=self.id)
if next:
return next.first().id
return False
def get_prev_id(self):
prev = self.__class__.objects.filter(id__lt=self.id).order_by('-id')
if prev:
return prev.first().id
return False
class Meta:
verbose_name = "Archivalie"
def copy_instance(self):
"""Saves a copy of the current object and returns it"""
obj = self
obj.id = None
obj.save()
return obj
def field_dict(self):
return model_to_dict(self)
| import re
import reversion
import lxml.etree as ET
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from idprovider.models import IdProvider
from vocabs.models import SkosConcept
from entities.models import Person, Place, Institution
from browsing.browsing_utils import model_to_dict
from tei.archiv_utils import MakeTeiDoc
from transkribus.models import TrpBaseModel
@reversion.register()
class ArchResource(IdProvider, TrpBaseModel):
""" Beschreibt eine (archivalische) Resource """
title = models.CharField(
max_length=500, blank=True, verbose_name="Titel des Dokuments",
help_text="Titel des Dokuments"
)
archiv = models.ForeignKey(
Institution, null=True, blank=True,
verbose_name="Archiv",
help_text="Archiv in dem das Dokument aufbewahrt wird",
related_name="has_docs_archived",
on_delete=models.SET_NULL
)
signature = models.TextField(
blank=True, verbose_name="(Archiv)Signatur",
help_text="(Archiv)Signatur"
)
pid = models.CharField(
blank=True, null=True, max_length=250,
verbose_name="Handle-PID",
help_text="Handle-PID"
)
written_date = models.CharField(
max_length=250, blank=True, verbose_name="Datum original",
help_text="Datum original"
)
not_before = models.DateField(
auto_now=False, auto_now_add=False, blank=True, null=True,
verbose_name="Nicht vor normalisiert",
help_text="YYYY-MM-DD"
)
not_after = models.DateField(
auto_now=False, auto_now_add=False, blank=True, null=True,
verbose_name="Nicht nach normalisiert",
help_text="YYYY-MM-DD"
)
res_type = models.ForeignKey(
SkosConcept, null=True, blank=True, verbose_name="Typ des Dokuments",
help_text="Typ des Dokuments.",
related_name="doc_type",
on_delete=models.SET_NULL
)
subject_norm = models.ManyToManyField(
SkosConcept, blank=True,
help_text="Schlagwörter normalisiert",
verbose_name="Schlagwörter normalisiert",
related_name="subject_norm_of"
)
subject_free = models.TextField(
blank=True, null=True, verbose_name="Schlagwörter original",
help_text="Schlagwörter original"
)
abstract = models.TextField(
blank=True, null=True, verbose_name="Zusammenfassung",
help_text="Zusammenfassung"
)
notes = models.TextField(
blank=True, null=True, verbose_name="Anmerkungen",
help_text="Anmerkungen"
)
creator_person = models.ManyToManyField(
Person, blank=True,
help_text="Erzeuger des Dokuments",
verbose_name="Erzeuger des Dokuments(Person)",
related_name="created_by_person"
)
creator_inst = models.ManyToManyField(
Institution, blank=True,
help_text="Erzeuger des Dokuments(Institution)",
verbose_name="Erzeuger des Dokuments(Institution)",
related_name="created_by_inst"
)
mentioned_person = models.ManyToManyField(
Person, blank=True,
help_text="Im Dokument erwähnte Person",
verbose_name="Im Dokument erwähnte Person",
related_name="pers_mentioned_in_res"
)
mentioned_inst = models.ManyToManyField(
Institution, blank=True,
help_text="Im Dokument erwähnte Institution",
verbose_name="Im Dokument erwähnte Institution",
related_name="inst_mentioned_in_res"
)
mentioned_place = models.ManyToManyField(
Place, blank=True,
help_text="Im Dokument erwähnte Orte",
verbose_name="Im Dokument erwähnte Orte",
related_name="place_mentioned_in_res"
)
rel_res = models.ManyToManyField(
'ArchResource', blank=True,
help_text="In Verbindung stehende Dokumente",
verbose_name="In Verbindung stehende Dokumente",
related_name="related_res"
)
permalink = models.CharField(
max_length=500, blank=True, null=True, verbose_name="Permalink",
help_text="Stabiler Link zu einem Digitalisat dieser Resource"
)
creators = models.ManyToManyField(
User, blank=True,
verbose_name="Verantwortlich",
help_text="Verantwortlich für die Erzeugung dieses Datensatzes",
related_name="created_archres"
)
def __str__(self):
if self.title:
return "Titel: {}".format(self.title)[:250]
elif self.signature:
return "Signatur: {}".format(self.signature)
else:
return "ID: {}".format(self.id)
def get_arche_url(self):
return reverse('archiv:arche_res', kwargs={'pk': self.id})
def as_tei_node(self):
my_node = MakeTeiDoc(self)
return my_node.export_full_doc()
def as_tei(self):
return ET.tostring(self.as_tei_node(), pretty_print=True, encoding='UTF-8')
def save_tei(self, file=None):
my_node = MakeTeiDoc(self)
if file is not None:
pass
else:
file = f"{self.id}.xml"
my_node.export_full_doc_str(file)
return file
@classmethod
def get_listview_url(self):
return reverse('archiv:archresource_browse')
@classmethod
def get_createview_url(self):
return reverse('archiv:archresource_create')
def get_tei_url(self):
return reverse('archiv:archresource_xml', kwargs={'pk': self.id})
def get_absolute_url(self):
return reverse('archiv:archresource_detail', kwargs={'pk': self.id})
def get_delete_url(self):
return reverse('archiv:archresource_delete', kwargs={'pk': self.id})
def get_edit_url(self):
return reverse('archiv:archresource_edit', kwargs={'pk': self.id})
def get_next(self):
next = self.get_next_id()
if next:
return reverse(
'archiv:archresource_detail',
kwargs={'pk': next}
)
return False
def get_prev(self):
prev = self.get_prev_id()
if prev:
return reverse(
'archiv:archresource_detail',
kwargs={'pk': prev}
)
return False
def get_next_id(self):
next = self.__class__.objects.filter(id__gt=self.id)
if next:
return next.first().id
return False
def get_prev_id(self):
prev = self.__class__.objects.filter(id__lt=self.id).order_by('-id')
if prev:
return prev.first().id
return False
class Meta:
verbose_name = "Archivalie"
def copy_instance(self):
"""Saves a copy of the current object and returns it"""
obj = self
obj.id = None
obj.save()
return obj
def field_dict(self):
return model_to_dict(self)
| de | 0.749971 | Beschreibt eine (archivalische) Resource Saves a copy of the current object and returns it | 1.868942 | 2 |
hadar/optimizer/lp/mapper.py | hadar-solver/hadar | 1 | 6622212 | <filename>hadar/optimizer/lp/mapper.py
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import numpy as np
from ortools.linear_solver.pywraplp import Solver
from hadar.optimizer.domain.input import Study, InputNetwork
from hadar.optimizer.lp.domain import (
LPLink,
LPConsumption,
LPNode,
LPProduction,
LPStorage,
LPConverter,
)
from hadar.optimizer.domain.output import (
OutputNode,
Result,
OutputNetwork,
OutputConverter,
)
class InputMapper:
"""
Input mapper from global domain to linear programming specific domain
"""
def __init__(self, solver: Solver, study: Study):
"""
Instantiate mapper.
:param solver: ortools solver to used to create variables
:param study: study data
"""
self.solver = solver
self.study = study
def get_node_var(self, network: str, node: str, t: int, scn: int) -> LPNode:
"""
Map InputNode to LPNode.
:param network: network name
:param node: node name
:param t: time step
:param scn: scenario index
:return: LPNode according to node name at t in study
"""
suffix = "inside network=%s on node=%s at t=%d for scn=%d" % (
network,
node,
t,
scn,
)
in_node = self.study.networks[network].nodes[node]
consumptions = [
LPConsumption(
name=c.name,
cost=c.cost[scn, t],
quantity=c.quantity[scn, t],
variable=self.solver.NumVar(
0, float(c.quantity[scn, t]), name="lol=%s %s" % (c.name, suffix)
),
)
for c in in_node.consumptions
]
productions = [
LPProduction(
name=p.name,
cost=p.cost[scn, t],
quantity=p.quantity[scn, t],
variable=self.solver.NumVar(
0, float(p.quantity[scn, t]), "prod=%s %s" % (p.name, suffix)
),
)
for p in in_node.productions
]
storages = [
LPStorage(
name=s.name,
flow_in=s.flow_in[scn, t],
flow_out=s.flow_out[scn, t],
eff=s.eff[scn, t],
capacity=s.capacity[scn, t],
init_capacity=s.init_capacity,
cost=s.cost[scn, t],
var_capacity=self.solver.NumVar(
0,
float(s.capacity[scn, t]),
"storage_capacity=%s %s" % (s.name, suffix),
),
var_flow_in=self.solver.NumVar(
0,
float(s.flow_in[scn, t]),
"storage_flow_in=%s %s" % (s.name, suffix),
),
var_flow_out=self.solver.NumVar(
0,
float(s.flow_out[scn, t]),
"storage_flow_out=%s %s" % (s.name, suffix),
),
)
for s in in_node.storages
]
links = [
LPLink(
dest=l.dest,
cost=l.cost[scn, t],
src=node,
quantity=l.quantity[scn, t],
variable=self.solver.NumVar(
0, float(l.quantity[scn, t]), "link=%s %s" % (l.dest, suffix)
),
)
for l in in_node.links
]
return LPNode(
consumptions=consumptions,
productions=productions,
links=links,
storages=storages,
)
def get_conv_var(self, name: str, t: int, scn: int) -> LPConverter:
"""
Map Converter to LPConverter.
:param name: converter name
:param t: time step
:param scn: scenario index
:return: LPConverter
"""
suffix = "at t=%d for scn=%d" % (t, scn)
v = self.study.converters[name]
src_ratios = {k: v[scn, t] for k, v in v.src_ratios.items()}
return LPConverter(
name=v.name,
src_ratios=src_ratios,
dest_network=v.dest_network,
dest_node=v.dest_node,
cost=v.cost[scn, t],
max=v.max[scn, t],
var_flow_src={
src: self.solver.NumVar(
0,
float(v.max[scn, t] / r),
"flow_src %s %s %s" % (v.name, ":".join(src), suffix),
)
for src, r in src_ratios.items()
},
var_flow_dest=self.solver.NumVar(
0, float(v.max[scn, t]), "flow_dest %s %s" % (v.name, suffix)
),
)
class OutputMapper:
"""
Output mapper from specific linear programming domain to global domain.
"""
def __init__(self, study: Study):
"""
Instantiate mapper.
:param solver: ortools solver to use to fetch variable value
:param study: input study to reproduce structure
"""
zeros = np.zeros((study.nb_scn, study.horizon))
def build_nodes(network: InputNetwork):
return {
name: OutputNode.build_like_input(input, fill=zeros)
for name, input in network.nodes.items()
}
self.networks = {
name: OutputNetwork(nodes=build_nodes(network))
for name, network in study.networks.items()
}
self.converters = {
name: OutputConverter(
name=name,
flow_src={src: zeros for src in conv.src_ratios},
flow_dest=zeros,
)
for name, conv in study.converters.items()
}
def set_node_var(self, network: str, node: str, t: int, scn: int, vars: LPNode):
"""
Map linear programming node to global node (set inside intern attribute).
:param network: network name
:param node: node name
:param t: timestamp index
:param scn: scenario index
:param vars: linear programming node with ortools variables inside
:return: None (use get_result)
"""
out_node = self.networks[network].nodes[node]
for i in range(len(vars.consumptions)):
out_node.consumptions[i].quantity[scn, t] = (
vars.consumptions[i].quantity - vars.consumptions[i].variable
)
for i in range(len(vars.productions)):
out_node.productions[i].quantity[scn, t] = vars.productions[i].variable
for i in range(len(vars.storages)):
out_node.storages[i].capacity[scn, t] = vars.storages[i].var_capacity
out_node.storages[i].flow_in[scn, t] = vars.storages[i].var_flow_in
out_node.storages[i].flow_out[scn, t] = vars.storages[i].var_flow_out
for i in range(len(vars.links)):
self.networks[network].nodes[node].links[i].quantity[scn, t] = vars.links[
i
].variable
def set_converter_var(self, name: str, t: int, scn: int, vars: LPConverter):
for src, var in vars.var_flow_src.items():
self.converters[name].flow_src[src][scn, t] = var
self.converters[name].flow_dest[scn, t] = vars.var_flow_dest
def get_result(self) -> Result:
"""
Get result.
:return: final result after map all nodes
"""
return Result(networks=self.networks, converters=self.converters)
| <filename>hadar/optimizer/lp/mapper.py
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import numpy as np
from ortools.linear_solver.pywraplp import Solver
from hadar.optimizer.domain.input import Study, InputNetwork
from hadar.optimizer.lp.domain import (
LPLink,
LPConsumption,
LPNode,
LPProduction,
LPStorage,
LPConverter,
)
from hadar.optimizer.domain.output import (
OutputNode,
Result,
OutputNetwork,
OutputConverter,
)
class InputMapper:
"""
Input mapper from global domain to linear programming specific domain
"""
def __init__(self, solver: Solver, study: Study):
"""
Instantiate mapper.
:param solver: ortools solver to used to create variables
:param study: study data
"""
self.solver = solver
self.study = study
def get_node_var(self, network: str, node: str, t: int, scn: int) -> LPNode:
"""
Map InputNode to LPNode.
:param network: network name
:param node: node name
:param t: time step
:param scn: scenario index
:return: LPNode according to node name at t in study
"""
suffix = "inside network=%s on node=%s at t=%d for scn=%d" % (
network,
node,
t,
scn,
)
in_node = self.study.networks[network].nodes[node]
consumptions = [
LPConsumption(
name=c.name,
cost=c.cost[scn, t],
quantity=c.quantity[scn, t],
variable=self.solver.NumVar(
0, float(c.quantity[scn, t]), name="lol=%s %s" % (c.name, suffix)
),
)
for c in in_node.consumptions
]
productions = [
LPProduction(
name=p.name,
cost=p.cost[scn, t],
quantity=p.quantity[scn, t],
variable=self.solver.NumVar(
0, float(p.quantity[scn, t]), "prod=%s %s" % (p.name, suffix)
),
)
for p in in_node.productions
]
storages = [
LPStorage(
name=s.name,
flow_in=s.flow_in[scn, t],
flow_out=s.flow_out[scn, t],
eff=s.eff[scn, t],
capacity=s.capacity[scn, t],
init_capacity=s.init_capacity,
cost=s.cost[scn, t],
var_capacity=self.solver.NumVar(
0,
float(s.capacity[scn, t]),
"storage_capacity=%s %s" % (s.name, suffix),
),
var_flow_in=self.solver.NumVar(
0,
float(s.flow_in[scn, t]),
"storage_flow_in=%s %s" % (s.name, suffix),
),
var_flow_out=self.solver.NumVar(
0,
float(s.flow_out[scn, t]),
"storage_flow_out=%s %s" % (s.name, suffix),
),
)
for s in in_node.storages
]
links = [
LPLink(
dest=l.dest,
cost=l.cost[scn, t],
src=node,
quantity=l.quantity[scn, t],
variable=self.solver.NumVar(
0, float(l.quantity[scn, t]), "link=%s %s" % (l.dest, suffix)
),
)
for l in in_node.links
]
return LPNode(
consumptions=consumptions,
productions=productions,
links=links,
storages=storages,
)
def get_conv_var(self, name: str, t: int, scn: int) -> LPConverter:
"""
Map Converter to LPConverter.
:param name: converter name
:param t: time step
:param scn: scenario index
:return: LPConverter
"""
suffix = "at t=%d for scn=%d" % (t, scn)
v = self.study.converters[name]
src_ratios = {k: v[scn, t] for k, v in v.src_ratios.items()}
return LPConverter(
name=v.name,
src_ratios=src_ratios,
dest_network=v.dest_network,
dest_node=v.dest_node,
cost=v.cost[scn, t],
max=v.max[scn, t],
var_flow_src={
src: self.solver.NumVar(
0,
float(v.max[scn, t] / r),
"flow_src %s %s %s" % (v.name, ":".join(src), suffix),
)
for src, r in src_ratios.items()
},
var_flow_dest=self.solver.NumVar(
0, float(v.max[scn, t]), "flow_dest %s %s" % (v.name, suffix)
),
)
class OutputMapper:
"""
Output mapper from specific linear programming domain to global domain.
"""
def __init__(self, study: Study):
"""
Instantiate mapper.
:param solver: ortools solver to use to fetch variable value
:param study: input study to reproduce structure
"""
zeros = np.zeros((study.nb_scn, study.horizon))
def build_nodes(network: InputNetwork):
return {
name: OutputNode.build_like_input(input, fill=zeros)
for name, input in network.nodes.items()
}
self.networks = {
name: OutputNetwork(nodes=build_nodes(network))
for name, network in study.networks.items()
}
self.converters = {
name: OutputConverter(
name=name,
flow_src={src: zeros for src in conv.src_ratios},
flow_dest=zeros,
)
for name, conv in study.converters.items()
}
def set_node_var(self, network: str, node: str, t: int, scn: int, vars: LPNode):
"""
Map linear programming node to global node (set inside intern attribute).
:param network: network name
:param node: node name
:param t: timestamp index
:param scn: scenario index
:param vars: linear programming node with ortools variables inside
:return: None (use get_result)
"""
out_node = self.networks[network].nodes[node]
for i in range(len(vars.consumptions)):
out_node.consumptions[i].quantity[scn, t] = (
vars.consumptions[i].quantity - vars.consumptions[i].variable
)
for i in range(len(vars.productions)):
out_node.productions[i].quantity[scn, t] = vars.productions[i].variable
for i in range(len(vars.storages)):
out_node.storages[i].capacity[scn, t] = vars.storages[i].var_capacity
out_node.storages[i].flow_in[scn, t] = vars.storages[i].var_flow_in
out_node.storages[i].flow_out[scn, t] = vars.storages[i].var_flow_out
for i in range(len(vars.links)):
self.networks[network].nodes[node].links[i].quantity[scn, t] = vars.links[
i
].variable
def set_converter_var(self, name: str, t: int, scn: int, vars: LPConverter):
for src, var in vars.var_flow_src.items():
self.converters[name].flow_src[src][scn, t] = var
self.converters[name].flow_dest[scn, t] = vars.var_flow_dest
def get_result(self) -> Result:
"""
Get result.
:return: final result after map all nodes
"""
return Result(networks=self.networks, converters=self.converters)
| en | 0.672865 | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com) # See AUTHORS.txt # This Source Code Form is subject to the terms of the Apache License, version 2.0. # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. Input mapper from global domain to linear programming specific domain Instantiate mapper. :param solver: ortools solver to used to create variables :param study: study data Map InputNode to LPNode. :param network: network name :param node: node name :param t: time step :param scn: scenario index :return: LPNode according to node name at t in study Map Converter to LPConverter. :param name: converter name :param t: time step :param scn: scenario index :return: LPConverter Output mapper from specific linear programming domain to global domain. Instantiate mapper. :param solver: ortools solver to use to fetch variable value :param study: input study to reproduce structure Map linear programming node to global node (set inside intern attribute). :param network: network name :param node: node name :param t: timestamp index :param scn: scenario index :param vars: linear programming node with ortools variables inside :return: None (use get_result) Get result. :return: final result after map all nodes | 2.44755 | 2 |
union/admin.py | HASSAN1A/Student-Union | 2 | 6622213 | from django.contrib import admin
from .models import StudentUnion,Business,Post,EmergencyService
# Register your models here.
admin.site.register(StudentUnion)
admin.site.register(Business)
admin.site.register(Post)
admin.site.register(EmergencyService)
| from django.contrib import admin
from .models import StudentUnion,Business,Post,EmergencyService
# Register your models here.
admin.site.register(StudentUnion)
admin.site.register(Business)
admin.site.register(Post)
admin.site.register(EmergencyService)
| en | 0.968259 | # Register your models here. | 1.36081 | 1 |
mycnn/data/__init__.py | jacky10001/tf2-mycnn | 0 | 6622214 | <reponame>jacky10001/tf2-mycnn<filename>mycnn/data/__init__.py
# -*- coding: utf-8 -*-
from .cats_vs_dogs import cats_vs_dogs_from_MSCenter
from .cats_vs_dogs import cats_vs_dogs_by_kaggle_zipfile
from .voc_dataset import download_pascal_voc_dataset
from .voc_segment import make_voc_segment_dataset
from .classification import generate_classification_dataset
from .segmentation import generate_segmentation_dataset
__all__ = [
'cats_vs_dogs_from_MSCenter',
'cats_vs_dogs_by_kaggle_zipfile',
'download_pascal_voc_dataset',
'make_voc_segment_dataset',
'generate_classification_dataset',
'generate_segmentation_dataset'
] | # -*- coding: utf-8 -*-
from .cats_vs_dogs import cats_vs_dogs_from_MSCenter
from .cats_vs_dogs import cats_vs_dogs_by_kaggle_zipfile
from .voc_dataset import download_pascal_voc_dataset
from .voc_segment import make_voc_segment_dataset
from .classification import generate_classification_dataset
from .segmentation import generate_segmentation_dataset
__all__ = [
'cats_vs_dogs_from_MSCenter',
'cats_vs_dogs_by_kaggle_zipfile',
'download_pascal_voc_dataset',
'make_voc_segment_dataset',
'generate_classification_dataset',
'generate_segmentation_dataset'
] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.394727 | 1 |
pyChocolate/Chocolate.py | kaankarakoc42/pyChocolate | 1 | 6622215 | <filename>pyChocolate/Chocolate.py
from inspect import stack, getframeinfo,getsource
from colorama import Fore,init
from datetime import datetime
# Before reading code u should know
# -> getframeinfo(stack()[1][0]) function getting data about used code line and
# -> that why we can get debug of a code part from program
white=Fore.LIGHTWHITE_EX
green=Fore.GREEN
red=Fore.RED
reset=Fore.RESET
init()
class pyChocolate:
def File(self,frame,kwargs):
return f"{white} file :{frame.filename}" if ifEqual(kwargs,("file",True)) else ""
def Code(self,frame,color,kwargs):
return f"{white} code: {color}{frame.code_context[0].strip()}{reset}" if ifEqual(kwargs,("code",True)) else ""
def Info(self,frame,output,kwargs):
return f"{white}[Line-{frame.lineno}] ->({green+firstValue(frame.code_context[0].strip())+white}) { green}{pretifyOutput(output)} {self.Code(frame,green,kwargs)} {self.File(frame,kwargs)}"
def Warn(self,frame,output,kwargs):
return f"{white}[Line-{frame.lineno}] { red}{pretifyOutput(output)} {self.Code(frame,red,kwargs)} {self.File(frame,kwargs)}"
def LOG(self,frame,output:"debuging content",kwargs)->"return given output":
print(self.Warn(frame,output,kwargs) if ifEqual(kwargs,("mode","warn")) else self.Info(frame,output,kwargs),reset)
return output
def Catch(self,frame,tryFunc,runFunc):
arg1,arg2=tryFunc[1],runFunc[1]
name1,name2=str(tryFunc[0]).split(" ")[1],str(runFunc[0]).split(" ")[1]
string=f"{white}[Line-{frame.lineno}]->(Catch) Func:{ green}{{0}} {white}args:({{1}}{white}){ green} {white} return:{ green}{{2}} {reset}"
try:
rv=tryFunc[0](*arg1)
args=colorfulArgs(arg1)
print(string.format(name1,args,pretifyOutput(rv)))
except Exception as func1err:
try:
rv=runFunc[0](*arg2)
args=colorfulArgs(arg2)
print(string.format(name2,args,pretifyOutput(rv)))
print(white+f"[Catched]->({green+name1+white})({colorfulArgs(arg1)+white}) "+str(func1err)+reset)
print(getsource(tryFunc[0]))
except Exception as func2err:
print(f"{white}[Line-{frame.lineno}]->({ Fore.LIGHTRED_EX}Catch{white}) { red}'error on both functions' {white}[{ red}{name1}{white},{ red}{name2}{white}]{ reset}")
print(white+f"[Catched]->({green+name1+white})({colorfulArgs(arg1)+white}) "+str(func1err)+reset)
print(getsource(tryFunc[0]))
print(white+f"[Catched]->({green+name2+white})({colorfulArgs(arg2)+white}) "+str(func2err)+reset)
print(getsource(runFunc[0]))
return [func1err,func2err]
return rv
def put(self,text):
date=datetime.now().strftime("%H:%M:%S")
print(white+f"[{date}] "+text+reset)
#-----------ChocolateFuncs----------
def ifEqual(kwargs,tuple_):
return True if tuple_ in list(kwargs.items()) else False
def multiSplit(string,args):
for arg in args:
string=string.replace(arg,args[0])
return string.split(args[0])
def getLog(code):
x=multiSplit(code,["(",")"])
try:
i=x.index("Log")
except:
for s in x:
if "Log" in s:
i=x.index(s)
return x[i+1:len(x)-i-1]
def firstValue(code):
code=getLog(code)
end=""
if len(code)>1:
return code[0]+white+")("+green+"".join(code[1])
rv=" ".join(code).split(",")[0]
if rv[0]=="[" or rv[0]=="{" or rv[0]=="(" or rv[0]=='"':
p={"[":"]","{":"}","(":")",'"':'"'}
end="..."+p[rv[0]]
if rv[0]=='"' and rv.endswith('"'):
end=""
if rv[0]=='{' and rv.endswith('}'):
end=""
if rv[0]=='[' and rv.endswith(']'):
end=""
return rv+end
def colorfulArgs(arg):
return ','.join([ green+str(i)+reset if type(i)!=str else green+'"'+str(i)+'"'+reset for i in arg])
def colorfulDicts(output,indent,innerIndent=False):
innerIndent=indent if innerIndent==True else 0
def colorize():
rv=white+"{\n"
for i in list(output.items()):
rv+=f'{indent*" "} {green}"{i[0]}"{white}:'
if isinstance(i[1], dict):
rv+=colorfulDicts(i[1],indent+2,True)+(indent+2)*" "+"\n"
elif isinstance(i[1], str):
rv+=f'{green}"{i[1]}"{reset},\n'
elif isinstance(i[1],list):
rv+=f"{white}[{colorfulArgs(i[1])}{white}]\n"
else:
rv+=f'{i[1]},\n'
return rv
comma="," if innerIndent else ""
return f"{green}"+colorize()+white+(innerIndent*" ")+"}"+comma
def pretifyOutput(output):
if type(output)==str:
return f'"{output}"'
elif type(output)==dict:
return f"{white}rv={green}Dict\n"+colorfulDicts(output,4)+"\n"
elif type(output)==list:
return white+"["+colorfulArgs(output)+white+"]"
else:
return output
#-----------exporting---------------
Chocolate=pyChocolate()
def Log(output:"debuging content",**kwargs)->"return given output":
return Chocolate.LOG(getframeinfo(stack()[1][0]),output,kwargs)
def Catch(tryFunc:"function",runFunc:"function")->"return given output":
return Chocolate.Catch(getframeinfo(stack()[1][0]),tryFunc,runFunc)
def put(text):
Chocolate.put(text)
#-------------Done------------------
| <filename>pyChocolate/Chocolate.py
from inspect import stack, getframeinfo,getsource
from colorama import Fore,init
from datetime import datetime
# Before reading code u should know
# -> getframeinfo(stack()[1][0]) function getting data about used code line and
# -> that why we can get debug of a code part from program
white=Fore.LIGHTWHITE_EX
green=Fore.GREEN
red=Fore.RED
reset=Fore.RESET
init()
class pyChocolate:
def File(self,frame,kwargs):
return f"{white} file :{frame.filename}" if ifEqual(kwargs,("file",True)) else ""
def Code(self,frame,color,kwargs):
return f"{white} code: {color}{frame.code_context[0].strip()}{reset}" if ifEqual(kwargs,("code",True)) else ""
def Info(self,frame,output,kwargs):
return f"{white}[Line-{frame.lineno}] ->({green+firstValue(frame.code_context[0].strip())+white}) { green}{pretifyOutput(output)} {self.Code(frame,green,kwargs)} {self.File(frame,kwargs)}"
def Warn(self,frame,output,kwargs):
return f"{white}[Line-{frame.lineno}] { red}{pretifyOutput(output)} {self.Code(frame,red,kwargs)} {self.File(frame,kwargs)}"
def LOG(self,frame,output:"debuging content",kwargs)->"return given output":
print(self.Warn(frame,output,kwargs) if ifEqual(kwargs,("mode","warn")) else self.Info(frame,output,kwargs),reset)
return output
def Catch(self,frame,tryFunc,runFunc):
arg1,arg2=tryFunc[1],runFunc[1]
name1,name2=str(tryFunc[0]).split(" ")[1],str(runFunc[0]).split(" ")[1]
string=f"{white}[Line-{frame.lineno}]->(Catch) Func:{ green}{{0}} {white}args:({{1}}{white}){ green} {white} return:{ green}{{2}} {reset}"
try:
rv=tryFunc[0](*arg1)
args=colorfulArgs(arg1)
print(string.format(name1,args,pretifyOutput(rv)))
except Exception as func1err:
try:
rv=runFunc[0](*arg2)
args=colorfulArgs(arg2)
print(string.format(name2,args,pretifyOutput(rv)))
print(white+f"[Catched]->({green+name1+white})({colorfulArgs(arg1)+white}) "+str(func1err)+reset)
print(getsource(tryFunc[0]))
except Exception as func2err:
print(f"{white}[Line-{frame.lineno}]->({ Fore.LIGHTRED_EX}Catch{white}) { red}'error on both functions' {white}[{ red}{name1}{white},{ red}{name2}{white}]{ reset}")
print(white+f"[Catched]->({green+name1+white})({colorfulArgs(arg1)+white}) "+str(func1err)+reset)
print(getsource(tryFunc[0]))
print(white+f"[Catched]->({green+name2+white})({colorfulArgs(arg2)+white}) "+str(func2err)+reset)
print(getsource(runFunc[0]))
return [func1err,func2err]
return rv
def put(self,text):
date=datetime.now().strftime("%H:%M:%S")
print(white+f"[{date}] "+text+reset)
#-----------ChocolateFuncs----------
def ifEqual(kwargs,tuple_):
return True if tuple_ in list(kwargs.items()) else False
def multiSplit(string,args):
for arg in args:
string=string.replace(arg,args[0])
return string.split(args[0])
def getLog(code):
x=multiSplit(code,["(",")"])
try:
i=x.index("Log")
except:
for s in x:
if "Log" in s:
i=x.index(s)
return x[i+1:len(x)-i-1]
def firstValue(code):
code=getLog(code)
end=""
if len(code)>1:
return code[0]+white+")("+green+"".join(code[1])
rv=" ".join(code).split(",")[0]
if rv[0]=="[" or rv[0]=="{" or rv[0]=="(" or rv[0]=='"':
p={"[":"]","{":"}","(":")",'"':'"'}
end="..."+p[rv[0]]
if rv[0]=='"' and rv.endswith('"'):
end=""
if rv[0]=='{' and rv.endswith('}'):
end=""
if rv[0]=='[' and rv.endswith(']'):
end=""
return rv+end
def colorfulArgs(arg):
return ','.join([ green+str(i)+reset if type(i)!=str else green+'"'+str(i)+'"'+reset for i in arg])
def colorfulDicts(output,indent,innerIndent=False):
innerIndent=indent if innerIndent==True else 0
def colorize():
rv=white+"{\n"
for i in list(output.items()):
rv+=f'{indent*" "} {green}"{i[0]}"{white}:'
if isinstance(i[1], dict):
rv+=colorfulDicts(i[1],indent+2,True)+(indent+2)*" "+"\n"
elif isinstance(i[1], str):
rv+=f'{green}"{i[1]}"{reset},\n'
elif isinstance(i[1],list):
rv+=f"{white}[{colorfulArgs(i[1])}{white}]\n"
else:
rv+=f'{i[1]},\n'
return rv
comma="," if innerIndent else ""
return f"{green}"+colorize()+white+(innerIndent*" ")+"}"+comma
def pretifyOutput(output):
if type(output)==str:
return f'"{output}"'
elif type(output)==dict:
return f"{white}rv={green}Dict\n"+colorfulDicts(output,4)+"\n"
elif type(output)==list:
return white+"["+colorfulArgs(output)+white+"]"
else:
return output
#-----------exporting---------------
Chocolate=pyChocolate()
def Log(output:"debuging content",**kwargs)->"return given output":
return Chocolate.LOG(getframeinfo(stack()[1][0]),output,kwargs)
def Catch(tryFunc:"function",runFunc:"function")->"return given output":
return Chocolate.Catch(getframeinfo(stack()[1][0]),tryFunc,runFunc)
def put(text):
Chocolate.put(text)
#-------------Done------------------
| en | 0.482206 | # Before reading code u should know # -> getframeinfo(stack()[1][0]) function getting data about used code line and # -> that why we can get debug of a code part from program #-----------ChocolateFuncs---------- #-----------exporting--------------- #-------------Done------------------ | 2.747956 | 3 |
Neural Collaborative Filtering/CleanData.py | IanSullivan/Recommendation | 0 | 6622216 | <reponame>IanSullivan/Recommendation<gh_stars>0
import pandas as pd
import numpy as np
import random
import time
src = "D:\\data\\h&m images\\h-and-m-personalized-fashion-recommendations\\transactions_train.csv"
# src = "dummy.csv"
final_df_name = 'indexCustomersLabeled20.csv'
df_size = 20000
df = pd.read_csv(src)
df = df[:df_size]
# Looking for all unique values to map them to an index, embedding layer requires indexes
customerSet = set()
custormer2Idx = dict()
itemSet = set()
item2Idx = dict()
print(df_size)
print(df.columns)
[customerSet.add(i) for i in df['customer_id']]
print(len(customerSet), ' customer set size')
[itemSet.add(i) for i in df['article_id']]
print(len(itemSet), ' item set size')
for i, customer in enumerate(customerSet):
custormer2Idx[customer] = i
for i, item in enumerate(itemSet):
item2Idx[item] = i
n_negatives = 2
time1 = time.time()
print("real values")
# loop through the data frame with relevant columns, label of 1 indicates it is a real customer item pair
result = np.array([(custormer2Idx[x], item2Idx[y], z, 1.0) for x, y, z in zip(df['customer_id'], df['article_id'],
df['price'])])
print(abs(time1 - time.time()))
print("fake values")
time1 = time.time()
setList = list(customerSet)
# label of 0 indicates it is a fake customer item pair ie; the customer never purchased the item in the data set
for i in range(n_negatives):
fake_results = np.array([(custormer2Idx[random.choice(setList)], item2Idx[y], z, 0.0) for y, z in
zip(df['article_id'], df['price'])])
result = np.vstack((result, fake_results))
print(abs(time1 - time.time()))
# Save to csv
df = pd.DataFrame(data=result, columns=['customer_id', 'article_id', 'price', 'label'], index=None)
df.to_csv(final_df_name)
| import pandas as pd
import numpy as np
import random
import time
src = "D:\\data\\h&m images\\h-and-m-personalized-fashion-recommendations\\transactions_train.csv"
# src = "dummy.csv"
final_df_name = 'indexCustomersLabeled20.csv'
df_size = 20000
df = pd.read_csv(src)
df = df[:df_size]
# Looking for all unique values to map them to an index, embedding layer requires indexes
customerSet = set()
custormer2Idx = dict()
itemSet = set()
item2Idx = dict()
print(df_size)
print(df.columns)
[customerSet.add(i) for i in df['customer_id']]
print(len(customerSet), ' customer set size')
[itemSet.add(i) for i in df['article_id']]
print(len(itemSet), ' item set size')
for i, customer in enumerate(customerSet):
custormer2Idx[customer] = i
for i, item in enumerate(itemSet):
item2Idx[item] = i
n_negatives = 2
time1 = time.time()
print("real values")
# loop through the data frame with relevant columns, label of 1 indicates it is a real customer item pair
result = np.array([(custormer2Idx[x], item2Idx[y], z, 1.0) for x, y, z in zip(df['customer_id'], df['article_id'],
df['price'])])
print(abs(time1 - time.time()))
print("fake values")
time1 = time.time()
setList = list(customerSet)
# label of 0 indicates it is a fake customer item pair ie; the customer never purchased the item in the data set
for i in range(n_negatives):
fake_results = np.array([(custormer2Idx[random.choice(setList)], item2Idx[y], z, 0.0) for y, z in
zip(df['article_id'], df['price'])])
result = np.vstack((result, fake_results))
print(abs(time1 - time.time()))
# Save to csv
df = pd.DataFrame(data=result, columns=['customer_id', 'article_id', 'price', 'label'], index=None)
df.to_csv(final_df_name) | en | 0.733328 | # src = "dummy.csv" # Looking for all unique values to map them to an index, embedding layer requires indexes # loop through the data frame with relevant columns, label of 1 indicates it is a real customer item pair # label of 0 indicates it is a fake customer item pair ie; the customer never purchased the item in the data set # Save to csv | 3.053117 | 3 |
config.py | Alexwell/flask-task-manager | 0 | 6622217 | <filename>config.py
# -*- coding: utf-8 -*-
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'test-secret-key-12ew5fesa7azo14cWfgQfghNccf55'
WTF_CSRF_ENABLED = False
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
| <filename>config.py
# -*- coding: utf-8 -*-
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'test-secret-key-12ew5fesa7azo14cWfgQfghNccf55'
WTF_CSRF_ENABLED = False
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
| en | 0.625722 | # -*- coding: utf-8 -*- # SQLAlchemy settings | 1.857419 | 2 |
aug_runner.py | pabloduque0/cnn_deconv_viz | 0 | 6622218 | <reponame>pabloduque0/cnn_deconv_viz
from augmentation.combineds import wassersteingan
import numpy as np
from preprocessing.imageparser import ImageParser
from constants import *
import gc
import os
import cv2
parser = ImageParser(path_utrech='../Utrecht/subjects',
path_singapore='../Singapore/subjects',
path_amsterdam='../GE3T/subjects')
utrech_dataset, singapore_dataset, amsterdam_dataset = parser.get_all_images_and_labels()
t1_utrecht, flair_utrecht, labels_utrecht, white_mask_utrecht, distance_utrecht = parser.get_all_sets_paths(utrech_dataset)
t1_singapore, flair_singapore, labels_singapore, white_mask_singapore, distance_singapore = parser.get_all_sets_paths(singapore_dataset)
t1_amsterdam, flair_amsterdam, labels_amsterdam, white_mask_amsterdam, distance_amsterdam = parser.get_all_sets_paths(amsterdam_dataset)
slice_shape = SLICE_SHAPE
print('Utrecht: ', len(t1_utrecht), len(flair_utrecht), len(labels_utrecht))
print('Singapore: ', len(t1_singapore), len(flair_singapore), len(labels_singapore))
print('Amsterdam: ', len(t1_amsterdam), len(flair_amsterdam), len(labels_amsterdam))
"""
LABELS DATA
"""
rm_extra_top = 14
rm_extra_bot = 17
rm_extra_amsterdam_bot = 21
rm_extra_amsterdam_top = 14
final_label_imgs = parser.preprocess_all_labels([labels_utrecht,
labels_singapore,
labels_amsterdam], slice_shape, [UTRECH_N_SLICES,
SINGAPORE_N_SLICES,
AMSTERDAM_N_SLICES],
REMOVE_TOP + rm_extra_top,
REMOVE_BOT + rm_extra_bot,
(rm_extra_amsterdam_top, rm_extra_amsterdam_bot))
'''
T1 DATA
'''
rm_total = (REMOVE_TOP + REMOVE_BOT) + rm_extra_top + rm_extra_bot
utrecht_normalized_t1 = parser.preprocess_dataset_t1(t1_utrecht, slice_shape, UTRECH_N_SLICES,
REMOVE_TOP + rm_extra_top, REMOVE_BOT + rm_extra_bot, norm_type="stand")
utrecht_normalized_t1 = parser.normalize_neg_pos_one(utrecht_normalized_t1, UTRECH_N_SLICES - rm_total)
singapore_normalized_t1 = parser.preprocess_dataset_t1(t1_singapore, slice_shape, SINGAPORE_N_SLICES,
REMOVE_TOP + rm_extra_top, REMOVE_BOT + rm_extra_bot, norm_type="stand")
singapore_normalized_t1 = parser.normalize_neg_pos_one(singapore_normalized_t1, SINGAPORE_N_SLICES - rm_total)
amsterdam_normalized_t1 = parser.preprocess_dataset_t1(t1_amsterdam, slice_shape, AMSTERDAM_N_SLICES,
REMOVE_TOP + rm_extra_top + rm_extra_amsterdam_top,
REMOVE_BOT + rm_extra_bot + rm_extra_amsterdam_bot, norm_type="stand")
amsterdam_normalized_t1 = parser.normalize_neg_pos_one(amsterdam_normalized_t1,
AMSTERDAM_N_SLICES - rm_total - rm_extra_amsterdam_bot - rm_extra_amsterdam_top)
del t1_utrecht, t1_singapore, t1_amsterdam
'''
FLAIR DATA
'''
utrecht_stand_flairs = parser.preprocess_dataset_flair(flair_utrecht, slice_shape, UTRECH_N_SLICES,
REMOVE_TOP + rm_extra_top, REMOVE_BOT + rm_extra_bot, norm_type="stand")
utrecht_stand_flairs = parser.normalize_neg_pos_one(utrecht_stand_flairs, UTRECH_N_SLICES - rm_total)
singapore_stand_flairs = parser.preprocess_dataset_flair(flair_singapore, slice_shape, SINGAPORE_N_SLICES,
REMOVE_TOP + rm_extra_top, REMOVE_BOT + rm_extra_bot, norm_type="stand")
singapore_stand_flairs = parser.normalize_neg_pos_one(singapore_stand_flairs, SINGAPORE_N_SLICES - rm_total)
amsterdam_stand_flairs = parser.preprocess_dataset_flair(flair_amsterdam, slice_shape, AMSTERDAM_N_SLICES,
REMOVE_TOP + rm_extra_top + rm_extra_amsterdam_top,
REMOVE_BOT + rm_extra_bot + rm_extra_amsterdam_bot, norm_type="stand")
amsterdam_stand_flairs = parser.normalize_neg_pos_one(amsterdam_stand_flairs,
AMSTERDAM_N_SLICES - rm_total - rm_extra_amsterdam_bot - rm_extra_amsterdam_top)
del flair_utrecht, flair_singapore, flair_amsterdam
'''
DATA CONCAT
'''
normalized_t1 = np.concatenate([utrecht_normalized_t1,
singapore_normalized_t1,
amsterdam_normalized_t1], axis=0)
normalized_flairs = np.concatenate([utrecht_stand_flairs,
singapore_stand_flairs,
amsterdam_stand_flairs], axis=0)
del utrecht_normalized_t1, singapore_normalized_t1, amsterdam_normalized_t1
del utrecht_stand_flairs, singapore_stand_flairs, amsterdam_stand_flairs
data_t1 = np.expand_dims(np.asanyarray(normalized_t1), axis=3)
data_flair = np.expand_dims(np.asanyarray(normalized_flairs), axis=3)
all_data = np.concatenate([data_t1, data_flair], axis=3)
del data_t1, data_flair
gc.collect()
for img in all_data:
cv2.imshow("hi", np.concatenate([img[..., 0], img[..., 1]], axis=1))
cv2.waitKey(0)
training_name = "wasserstein_gan_test1_v1"
base_path = os.getcwd()
print("HEREEEE ", (*all_data.shape[1:-1], all_data.shape[-1]))
GAN = wassersteingan.WassersteinGAN(img_shape=(*all_data.shape[1:-1], all_data.shape[-1]), noise_shape=(128,))
GAN.train(all_data, base_path=base_path, training_name=training_name,
epochs=5000, batch_size=16, save_interval=50)
| from augmentation.combineds import wassersteingan
import numpy as np
from preprocessing.imageparser import ImageParser
from constants import *
import gc
import os
import cv2
parser = ImageParser(path_utrech='../Utrecht/subjects',
path_singapore='../Singapore/subjects',
path_amsterdam='../GE3T/subjects')
utrech_dataset, singapore_dataset, amsterdam_dataset = parser.get_all_images_and_labels()
t1_utrecht, flair_utrecht, labels_utrecht, white_mask_utrecht, distance_utrecht = parser.get_all_sets_paths(utrech_dataset)
t1_singapore, flair_singapore, labels_singapore, white_mask_singapore, distance_singapore = parser.get_all_sets_paths(singapore_dataset)
t1_amsterdam, flair_amsterdam, labels_amsterdam, white_mask_amsterdam, distance_amsterdam = parser.get_all_sets_paths(amsterdam_dataset)
slice_shape = SLICE_SHAPE
print('Utrecht: ', len(t1_utrecht), len(flair_utrecht), len(labels_utrecht))
print('Singapore: ', len(t1_singapore), len(flair_singapore), len(labels_singapore))
print('Amsterdam: ', len(t1_amsterdam), len(flair_amsterdam), len(labels_amsterdam))
"""
LABELS DATA
"""
rm_extra_top = 14
rm_extra_bot = 17
rm_extra_amsterdam_bot = 21
rm_extra_amsterdam_top = 14
final_label_imgs = parser.preprocess_all_labels([labels_utrecht,
labels_singapore,
labels_amsterdam], slice_shape, [UTRECH_N_SLICES,
SINGAPORE_N_SLICES,
AMSTERDAM_N_SLICES],
REMOVE_TOP + rm_extra_top,
REMOVE_BOT + rm_extra_bot,
(rm_extra_amsterdam_top, rm_extra_amsterdam_bot))
'''
T1 DATA
'''
rm_total = (REMOVE_TOP + REMOVE_BOT) + rm_extra_top + rm_extra_bot
utrecht_normalized_t1 = parser.preprocess_dataset_t1(t1_utrecht, slice_shape, UTRECH_N_SLICES,
REMOVE_TOP + rm_extra_top, REMOVE_BOT + rm_extra_bot, norm_type="stand")
utrecht_normalized_t1 = parser.normalize_neg_pos_one(utrecht_normalized_t1, UTRECH_N_SLICES - rm_total)
singapore_normalized_t1 = parser.preprocess_dataset_t1(t1_singapore, slice_shape, SINGAPORE_N_SLICES,
REMOVE_TOP + rm_extra_top, REMOVE_BOT + rm_extra_bot, norm_type="stand")
singapore_normalized_t1 = parser.normalize_neg_pos_one(singapore_normalized_t1, SINGAPORE_N_SLICES - rm_total)
amsterdam_normalized_t1 = parser.preprocess_dataset_t1(t1_amsterdam, slice_shape, AMSTERDAM_N_SLICES,
REMOVE_TOP + rm_extra_top + rm_extra_amsterdam_top,
REMOVE_BOT + rm_extra_bot + rm_extra_amsterdam_bot, norm_type="stand")
amsterdam_normalized_t1 = parser.normalize_neg_pos_one(amsterdam_normalized_t1,
AMSTERDAM_N_SLICES - rm_total - rm_extra_amsterdam_bot - rm_extra_amsterdam_top)
del t1_utrecht, t1_singapore, t1_amsterdam
'''
FLAIR DATA
'''
utrecht_stand_flairs = parser.preprocess_dataset_flair(flair_utrecht, slice_shape, UTRECH_N_SLICES,
REMOVE_TOP + rm_extra_top, REMOVE_BOT + rm_extra_bot, norm_type="stand")
utrecht_stand_flairs = parser.normalize_neg_pos_one(utrecht_stand_flairs, UTRECH_N_SLICES - rm_total)
singapore_stand_flairs = parser.preprocess_dataset_flair(flair_singapore, slice_shape, SINGAPORE_N_SLICES,
REMOVE_TOP + rm_extra_top, REMOVE_BOT + rm_extra_bot, norm_type="stand")
singapore_stand_flairs = parser.normalize_neg_pos_one(singapore_stand_flairs, SINGAPORE_N_SLICES - rm_total)
amsterdam_stand_flairs = parser.preprocess_dataset_flair(flair_amsterdam, slice_shape, AMSTERDAM_N_SLICES,
REMOVE_TOP + rm_extra_top + rm_extra_amsterdam_top,
REMOVE_BOT + rm_extra_bot + rm_extra_amsterdam_bot, norm_type="stand")
amsterdam_stand_flairs = parser.normalize_neg_pos_one(amsterdam_stand_flairs,
AMSTERDAM_N_SLICES - rm_total - rm_extra_amsterdam_bot - rm_extra_amsterdam_top)
del flair_utrecht, flair_singapore, flair_amsterdam
'''
DATA CONCAT
'''
normalized_t1 = np.concatenate([utrecht_normalized_t1,
singapore_normalized_t1,
amsterdam_normalized_t1], axis=0)
normalized_flairs = np.concatenate([utrecht_stand_flairs,
singapore_stand_flairs,
amsterdam_stand_flairs], axis=0)
del utrecht_normalized_t1, singapore_normalized_t1, amsterdam_normalized_t1
del utrecht_stand_flairs, singapore_stand_flairs, amsterdam_stand_flairs
data_t1 = np.expand_dims(np.asanyarray(normalized_t1), axis=3)
data_flair = np.expand_dims(np.asanyarray(normalized_flairs), axis=3)
all_data = np.concatenate([data_t1, data_flair], axis=3)
del data_t1, data_flair
gc.collect()
for img in all_data:
cv2.imshow("hi", np.concatenate([img[..., 0], img[..., 1]], axis=1))
cv2.waitKey(0)
training_name = "wasserstein_gan_test1_v1"
base_path = os.getcwd()
print("HEREEEE ", (*all_data.shape[1:-1], all_data.shape[-1]))
GAN = wassersteingan.WassersteinGAN(img_shape=(*all_data.shape[1:-1], all_data.shape[-1]), noise_shape=(128,))
GAN.train(all_data, base_path=base_path, training_name=training_name,
epochs=5000, batch_size=16, save_interval=50) | en | 0.359067 | LABELS DATA T1 DATA FLAIR DATA DATA CONCAT | 2.23494 | 2 |
loot_generator/__init__.py | Tengro/lootgenerator | 1 | 6622219 | <filename>loot_generator/__init__.py<gh_stars>1-10
__version__ = '0.1.0'
__author__ = '<NAME>'
# Version synonym
VERSION = __version__ | <filename>loot_generator/__init__.py<gh_stars>1-10
__version__ = '0.1.0'
__author__ = '<NAME>'
# Version synonym
VERSION = __version__ | en | 0.963459 | # Version synonym | 1.024621 | 1 |
ex-mundo1/ex025.py | PedroPegado/ex-cursoemvideo | 0 | 6622220 | print('\033[1;35m===== EX 025 =====\033[m')
x = input('Ponha seu nome completo: ')
y = x.lower()
z = 'silva' in y
print(f'Seu nome contém o sobrenome Silva? {z}') | print('\033[1;35m===== EX 025 =====\033[m')
x = input('Ponha seu nome completo: ')
y = x.lower()
z = 'silva' in y
print(f'Seu nome contém o sobrenome Silva? {z}') | none | 1 | 3.609927 | 4 | |
archivematica/fetchDip.py | kngreaves/scripts | 15 | 6622221 | #! usr/bin/env python
# fetch_dip.py
# This script is designed to be run at regular intervals, for example from a crontab.
#
# Downloads a DIP from Archivematica to the TMP_DIR and extracts the tarball.
# Derivatives are created for each file in its objects directory, and they are moved,
# along with the original file, to the DESTINATION_DIR.
#
# Tested on Python 3.7.0. Requires Python requests library (http://docs.python-requests.org/en/master/)
# and Imagemagick with Ghostscript ()
import glob
import json
import logging
import os
import requests
import shutil
import subprocess
import tarfile
# Logging
LOG_FILE = 'fetch-dip-log.txt'
LOG_LEVEL = 'INFO'
# System locations
DESTINATION_DIR = '/am/dest/'
TMP_DIR = '/am/tmp/'
# File to store UUIDs of already-downloaded DIPs
DOWNLOADED_DIPS_FILE = '/am/downloads.json'
# Archivematica configs
ARCHIVEMATICA_USERNAME = 'user'
ARCHIVEMATICA_API_KEY = 'apikey'
ARCHIVEMATICA_HEADERS = {"Authorization": "ApiKey {}:{}".format(ARCHIVEMATICA_USERNAME, ARCHIVEMATICA_API_KEY)}
ARCHIVEMATICA_BASEURL = 'http://archivematica-storage-service-url:port/api/v2/'
ARCHIVEMATICA_PIPELINE_UUID = 'pipeline-uuid'
logging.basicConfig(filename=LOG_FILE, format='%(asctime)s %(message)s', level=getattr(logging, LOG_LEVEL))
class ArchivematicaClientError(Exception): pass
class DIPFetcherError(Exception): pass
class DIPFetcher():
def __init__(self):
self.tmp = TMP_DIR
self.dest = DESTINATION_DIR
self.client = ArchivematicaClient()
self.downloads = DOWNLOADED_DIPS_FILE
for dir in [self.tmp, self.dest]:
if not os.path.isdir(dir):
raise DIPFetcherError("{} must be created".format(dir))
if not os.path.isfile(self.downloads):
raise DIPFetcherError("{} must be created".format(self.downloads))
try:
open(self.downloads, 'r')
except json.decoder.JSONDecodeError:
raise DIPFetcherError("{} is not valid JSON".format(self.downloads))
def run(self):
logging.info('*** Starting routine ***')
package_count = 0
# Load list of previously downloaded DIPs from external file
with open(self.downloads, 'r') as f:
downloaded_list = json.load(f)
for package in self.client.retrieve_paged('file/', params={'package_type': 'DIP'}):
if (package['origin_pipeline'].split('/')[-2] == ARCHIVEMATICA_PIPELINE_UUID) and (package['uuid'] not in downloaded_list):
self.uuid = package['uuid']
try:
self.download_package(package)
self.extract_objects(os.path.join(self.tmp, "{}.tar".format(self.uuid)), self.tmp)
downloaded_list.append(self.uuid)
self.make_derivatives()
self.move_files()
self.cleanup()
package_count += 1
except Exception as e:
logging.error(e)
continue
# Dump updated list of downloaded packages to external file
with open(self.downloads, 'w') as f:
json.dump(downloaded_list, f)
logging.info('*** Routine complete. {} DIPs downloaded and processed ***'.format(package_count))
def make_derivatives(self):
logging.debug("Creating derivatives for {}".format(self.uuid))
for object in self.objects:
commands = (
('Thumbnail with a height of 100px', "convert {}[0] -thumbnail 'x100' `echo {}`".format(object, "{}_thumb.jpg".format(os.path.splitext(object)[0]))),
('Square thumbnail 75x75 px', "convert {}[0] -thumbnail '75x75^' -gravity 'Center' -crop '75x75+0+0' `echo {}`".format(object, "{}_thumb75.jpg".format(os.path.splitext(object)[0]))),
('Square thumbnail 300x300 px', "convert {}[0] -thumbnail '300x300^' -gravity 'Center' -crop '300x300+0+0' `echo {}`".format(object, "{}_thumb300.jpg".format(os.path.splitext(object)[0]))),
('File with proportions of 1.9w to 1h', "convert {}[0] -gravity 'North' -crop '100%x53%+0+0' `echo {}`".format(object, "{}_thumbfb.jpg".format(os.path.splitext(object)[0]))),
)
for cmd in commands:
logging.debug(cmd[0])
proc = subprocess.Popen(cmd[1], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
next_line = proc.stdout.readline().decode("utf-8")
if not next_line:
break
logging.debug(next_line)
ecode = proc.wait()
if ecode != 0:
continue
def move_files(self):
for obj in self.objects:
for f in glob.glob("{}*".format(os.path.splitext(obj)[0])):
logging.debug("Moving {} to {}".format(f, self.dest))
os.rename(f, os.path.join(self.dest, os.path.basename(f)))
def download_package(self, package_json):
logging.debug("Downloading {}".format(self.uuid))
response = self.client.retrieve('/file/{}/download/'.format(self.uuid), stream=True)
extension = os.path.splitext(package_json['current_path'])[1]
if not extension:
extension = '.tar'
with open(os.path.join(self.tmp, '{}{}'.format(self.uuid, extension)), "wb") as package:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
package.write(chunk)
return package
def extract_objects(self, archive, dest):
logging.debug("Extracting {}".format(self.uuid))
self.objects = []
ext = os.path.splitext(archive)[1]
if ext == '.tar':
tf = tarfile.open(archive, 'r')
tf.extractall(dest)
for member in tf.members:
if 'objects/' in member.name:
os.rename(os.path.join(dest, member.name), os.path.join(dest, os.path.basename(member.name)))
self.objects.append(os.path.join(dest, os.path.basename(member.name)))
tf.close()
else:
raise DIPFetcherError("Unrecognized archive extension", ext)
return dest
def cleanup(self):
logging.debug("Cleaning up {}".format(self.tmp))
for d in os.listdir(self.tmp):
file_path = os.path.join(self.tmp, d)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
class ArchivematicaClient(object):
def __init__(self):
self.username = ARCHIVEMATICA_USERNAME
self.api_key = ARCHIVEMATICA_API_KEY
self.headers = ARCHIVEMATICA_HEADERS
self.baseurl = ARCHIVEMATICA_BASEURL
def retrieve(self, uri, *args, **kwargs):
full_url = "/".join([self.baseurl.rstrip("/"), uri.lstrip("/")])
response = requests.get(full_url, headers=self.headers, *args, **kwargs)
if response:
return response
else:
raise ArchivematicaClientError("Could not return a valid response for {}".format(full_url))
def retrieve_paged(self, uri, *args, limit=10, **kwargs):
full_url = "/".join([self.baseurl.rstrip("/"), uri.lstrip("/")])
params = {"limit": limit, "offset": 0}
if "params" in kwargs:
params.update(**kwargs['params'])
del kwargs['params']
current_page = requests.get(full_url, params=params, headers=self.headers, **kwargs)
if not current_page:
raise ArchivematicaClientError("Authentication error while retrieving {}".format(full_url))
current_json = current_page.json()
if current_json.get('meta'):
while current_json['meta']['offset'] <= current_json['meta']['total_count']:
for obj in current_json['objects']:
yield obj
if not current_json['meta']['next']: break
params['offset'] += limit
current_page = requests.get(full_url, params=params, headers=self.headers, **kwargs)
current_json = current_page.json()
else:
raise ArchivematicaClientError("retrieve_paged doesn't know how to handle {}".format(full_url))
DIPFetcher().run()
| #! usr/bin/env python
# fetch_dip.py
# This script is designed to be run at regular intervals, for example from a crontab.
#
# Downloads a DIP from Archivematica to the TMP_DIR and extracts the tarball.
# Derivatives are created for each file in its objects directory, and they are moved,
# along with the original file, to the DESTINATION_DIR.
#
# Tested on Python 3.7.0. Requires Python requests library (http://docs.python-requests.org/en/master/)
# and Imagemagick with Ghostscript ()
import glob
import json
import logging
import os
import requests
import shutil
import subprocess
import tarfile
# Logging
LOG_FILE = 'fetch-dip-log.txt'
LOG_LEVEL = 'INFO'
# System locations
DESTINATION_DIR = '/am/dest/'
TMP_DIR = '/am/tmp/'
# File to store UUIDs of already-downloaded DIPs
DOWNLOADED_DIPS_FILE = '/am/downloads.json'
# Archivematica configs
ARCHIVEMATICA_USERNAME = 'user'
ARCHIVEMATICA_API_KEY = 'apikey'
ARCHIVEMATICA_HEADERS = {"Authorization": "ApiKey {}:{}".format(ARCHIVEMATICA_USERNAME, ARCHIVEMATICA_API_KEY)}
ARCHIVEMATICA_BASEURL = 'http://archivematica-storage-service-url:port/api/v2/'
ARCHIVEMATICA_PIPELINE_UUID = 'pipeline-uuid'
logging.basicConfig(filename=LOG_FILE, format='%(asctime)s %(message)s', level=getattr(logging, LOG_LEVEL))
class ArchivematicaClientError(Exception): pass
class DIPFetcherError(Exception): pass
class DIPFetcher():
def __init__(self):
self.tmp = TMP_DIR
self.dest = DESTINATION_DIR
self.client = ArchivematicaClient()
self.downloads = DOWNLOADED_DIPS_FILE
for dir in [self.tmp, self.dest]:
if not os.path.isdir(dir):
raise DIPFetcherError("{} must be created".format(dir))
if not os.path.isfile(self.downloads):
raise DIPFetcherError("{} must be created".format(self.downloads))
try:
open(self.downloads, 'r')
except json.decoder.JSONDecodeError:
raise DIPFetcherError("{} is not valid JSON".format(self.downloads))
def run(self):
logging.info('*** Starting routine ***')
package_count = 0
# Load list of previously downloaded DIPs from external file
with open(self.downloads, 'r') as f:
downloaded_list = json.load(f)
for package in self.client.retrieve_paged('file/', params={'package_type': 'DIP'}):
if (package['origin_pipeline'].split('/')[-2] == ARCHIVEMATICA_PIPELINE_UUID) and (package['uuid'] not in downloaded_list):
self.uuid = package['uuid']
try:
self.download_package(package)
self.extract_objects(os.path.join(self.tmp, "{}.tar".format(self.uuid)), self.tmp)
downloaded_list.append(self.uuid)
self.make_derivatives()
self.move_files()
self.cleanup()
package_count += 1
except Exception as e:
logging.error(e)
continue
# Dump updated list of downloaded packages to external file
with open(self.downloads, 'w') as f:
json.dump(downloaded_list, f)
logging.info('*** Routine complete. {} DIPs downloaded and processed ***'.format(package_count))
def make_derivatives(self):
logging.debug("Creating derivatives for {}".format(self.uuid))
for object in self.objects:
commands = (
('Thumbnail with a height of 100px', "convert {}[0] -thumbnail 'x100' `echo {}`".format(object, "{}_thumb.jpg".format(os.path.splitext(object)[0]))),
('Square thumbnail 75x75 px', "convert {}[0] -thumbnail '75x75^' -gravity 'Center' -crop '75x75+0+0' `echo {}`".format(object, "{}_thumb75.jpg".format(os.path.splitext(object)[0]))),
('Square thumbnail 300x300 px', "convert {}[0] -thumbnail '300x300^' -gravity 'Center' -crop '300x300+0+0' `echo {}`".format(object, "{}_thumb300.jpg".format(os.path.splitext(object)[0]))),
('File with proportions of 1.9w to 1h', "convert {}[0] -gravity 'North' -crop '100%x53%+0+0' `echo {}`".format(object, "{}_thumbfb.jpg".format(os.path.splitext(object)[0]))),
)
for cmd in commands:
logging.debug(cmd[0])
proc = subprocess.Popen(cmd[1], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
next_line = proc.stdout.readline().decode("utf-8")
if not next_line:
break
logging.debug(next_line)
ecode = proc.wait()
if ecode != 0:
continue
def move_files(self):
for obj in self.objects:
for f in glob.glob("{}*".format(os.path.splitext(obj)[0])):
logging.debug("Moving {} to {}".format(f, self.dest))
os.rename(f, os.path.join(self.dest, os.path.basename(f)))
def download_package(self, package_json):
logging.debug("Downloading {}".format(self.uuid))
response = self.client.retrieve('/file/{}/download/'.format(self.uuid), stream=True)
extension = os.path.splitext(package_json['current_path'])[1]
if not extension:
extension = '.tar'
with open(os.path.join(self.tmp, '{}{}'.format(self.uuid, extension)), "wb") as package:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
package.write(chunk)
return package
def extract_objects(self, archive, dest):
logging.debug("Extracting {}".format(self.uuid))
self.objects = []
ext = os.path.splitext(archive)[1]
if ext == '.tar':
tf = tarfile.open(archive, 'r')
tf.extractall(dest)
for member in tf.members:
if 'objects/' in member.name:
os.rename(os.path.join(dest, member.name), os.path.join(dest, os.path.basename(member.name)))
self.objects.append(os.path.join(dest, os.path.basename(member.name)))
tf.close()
else:
raise DIPFetcherError("Unrecognized archive extension", ext)
return dest
def cleanup(self):
logging.debug("Cleaning up {}".format(self.tmp))
for d in os.listdir(self.tmp):
file_path = os.path.join(self.tmp, d)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
class ArchivematicaClient(object):
def __init__(self):
self.username = ARCHIVEMATICA_USERNAME
self.api_key = ARCHIVEMATICA_API_KEY
self.headers = ARCHIVEMATICA_HEADERS
self.baseurl = ARCHIVEMATICA_BASEURL
def retrieve(self, uri, *args, **kwargs):
full_url = "/".join([self.baseurl.rstrip("/"), uri.lstrip("/")])
response = requests.get(full_url, headers=self.headers, *args, **kwargs)
if response:
return response
else:
raise ArchivematicaClientError("Could not return a valid response for {}".format(full_url))
def retrieve_paged(self, uri, *args, limit=10, **kwargs):
full_url = "/".join([self.baseurl.rstrip("/"), uri.lstrip("/")])
params = {"limit": limit, "offset": 0}
if "params" in kwargs:
params.update(**kwargs['params'])
del kwargs['params']
current_page = requests.get(full_url, params=params, headers=self.headers, **kwargs)
if not current_page:
raise ArchivematicaClientError("Authentication error while retrieving {}".format(full_url))
current_json = current_page.json()
if current_json.get('meta'):
while current_json['meta']['offset'] <= current_json['meta']['total_count']:
for obj in current_json['objects']:
yield obj
if not current_json['meta']['next']: break
params['offset'] += limit
current_page = requests.get(full_url, params=params, headers=self.headers, **kwargs)
current_json = current_page.json()
else:
raise ArchivematicaClientError("retrieve_paged doesn't know how to handle {}".format(full_url))
DIPFetcher().run()
| en | 0.871718 | #! usr/bin/env python # fetch_dip.py # This script is designed to be run at regular intervals, for example from a crontab. # # Downloads a DIP from Archivematica to the TMP_DIR and extracts the tarball. # Derivatives are created for each file in its objects directory, and they are moved, # along with the original file, to the DESTINATION_DIR. # # Tested on Python 3.7.0. Requires Python requests library (http://docs.python-requests.org/en/master/) # and Imagemagick with Ghostscript () # Logging # System locations # File to store UUIDs of already-downloaded DIPs # Archivematica configs # Load list of previously downloaded DIPs from external file # Dump updated list of downloaded packages to external file | 2.461854 | 2 |
stlearn/spatials/trajectory/set_root.py | duypham2108/stLearn | 67 | 6622222 | from anndata import AnnData
from typing import Optional, Union
import numpy as np
from stlearn.spatials.trajectory.utils import _correlation_test_helper
def set_root(adata: AnnData, use_label: str, cluster: str, use_raw: bool = False):
"""\
Automatically set the root index.
Parameters
----------
adata
Annotated data matrix.
use_label
Use label result of clustering method.
cluster
Choose cluster to use as root
use_raw
Use the raw layer
Returns
-------
Root index
"""
tmp_adata = adata.copy()
# Subset the data based on the chosen cluster
tmp_adata = tmp_adata[
tmp_adata.obs[tmp_adata.obs[use_label] == str(cluster)].index, :
]
if use_raw == True:
tmp_adata = tmp_adata.raw.to_adata()
# Borrow from Cellrank to calculate CytoTrace score
num_exp_genes = np.array((tmp_adata.X > 0).sum(axis=1)).reshape(-1)
gene_corr, _, _, _ = _correlation_test_helper(tmp_adata.X.T, num_exp_genes[:, None])
tmp_adata.var["gene_corr"] = gene_corr
# Use top 1000 genes rather than top 200 genes
top_1000 = tmp_adata.var.sort_values(by="gene_corr", ascending=False).index[:1000]
tmp_adata.var["correlates"] = False
tmp_adata.var.loc[top_1000, "correlates"] = True
corr_mask = tmp_adata.var["correlates"]
imputed_exp = tmp_adata[:, corr_mask].X
# Scale ct score
cytotrace_score = np.mean(imputed_exp, axis=1)
cytotrace_score -= np.min(cytotrace_score)
cytotrace_score /= np.max(cytotrace_score)
# Get the root index
local_index = np.argmax(cytotrace_score)
obs_name = tmp_adata.obs.iloc[local_index].name
return np.where(adata.obs_names == obs_name)[0][0]
| from anndata import AnnData
from typing import Optional, Union
import numpy as np
from stlearn.spatials.trajectory.utils import _correlation_test_helper
def set_root(adata: AnnData, use_label: str, cluster: str, use_raw: bool = False):
"""\
Automatically set the root index.
Parameters
----------
adata
Annotated data matrix.
use_label
Use label result of clustering method.
cluster
Choose cluster to use as root
use_raw
Use the raw layer
Returns
-------
Root index
"""
tmp_adata = adata.copy()
# Subset the data based on the chosen cluster
tmp_adata = tmp_adata[
tmp_adata.obs[tmp_adata.obs[use_label] == str(cluster)].index, :
]
if use_raw == True:
tmp_adata = tmp_adata.raw.to_adata()
# Borrow from Cellrank to calculate CytoTrace score
num_exp_genes = np.array((tmp_adata.X > 0).sum(axis=1)).reshape(-1)
gene_corr, _, _, _ = _correlation_test_helper(tmp_adata.X.T, num_exp_genes[:, None])
tmp_adata.var["gene_corr"] = gene_corr
# Use top 1000 genes rather than top 200 genes
top_1000 = tmp_adata.var.sort_values(by="gene_corr", ascending=False).index[:1000]
tmp_adata.var["correlates"] = False
tmp_adata.var.loc[top_1000, "correlates"] = True
corr_mask = tmp_adata.var["correlates"]
imputed_exp = tmp_adata[:, corr_mask].X
# Scale ct score
cytotrace_score = np.mean(imputed_exp, axis=1)
cytotrace_score -= np.min(cytotrace_score)
cytotrace_score /= np.max(cytotrace_score)
# Get the root index
local_index = np.argmax(cytotrace_score)
obs_name = tmp_adata.obs.iloc[local_index].name
return np.where(adata.obs_names == obs_name)[0][0]
| en | 0.549744 | \ Automatically set the root index. Parameters ---------- adata Annotated data matrix. use_label Use label result of clustering method. cluster Choose cluster to use as root use_raw Use the raw layer Returns ------- Root index # Subset the data based on the chosen cluster # Borrow from Cellrank to calculate CytoTrace score # Use top 1000 genes rather than top 200 genes # Scale ct score # Get the root index | 2.520762 | 3 |
test_repo/a/a.py | antoniopugliese/module-structure | 0 | 6622223 | """
This is module a.
"""
from ..b import b_func
a = "this is file 'a'"
def a_func():
inside_a_func = 'inside a_func()'
b_func()
print("a")
| """
This is module a.
"""
from ..b import b_func
a = "this is file 'a'"
def a_func():
inside_a_func = 'inside a_func()'
b_func()
print("a")
| en | 0.221108 | This is module a. | 3.218747 | 3 |
particletracking/statistics/order_6.py | JamesDownsLab/particletracking | 0 | 6622224 | <filename>particletracking/statistics/order_6.py
import numpy as np
from scipy import spatial
def order_process(features):
points = features[['x', 'y', 'r']].values
orders = order_and_neighbors(points[:, :2])
features['order_r_nearest_6'] = np.real(orders).astype('float32')
features['order_i_nearest_6'] = np.imag(orders).astype('float32')
return features
def order_and_neighbors(points):
tree = spatial.cKDTree(points)
dists, indices = tree.query(points, 7)
neighbour_indices = indices[:, 1:]
neighbour_positions = points[neighbour_indices, :]
neighbour_vectors = neighbour_positions - points[:, np.newaxis, :]
angles = np.angle(
neighbour_vectors[:, :, 0] + 1j * neighbour_vectors[:, :, 1])
steps = np.exp(6j * angles)
orders = np.sum(steps, axis=1)
orders /= 6
return orders
| <filename>particletracking/statistics/order_6.py
import numpy as np
from scipy import spatial
def order_process(features):
points = features[['x', 'y', 'r']].values
orders = order_and_neighbors(points[:, :2])
features['order_r_nearest_6'] = np.real(orders).astype('float32')
features['order_i_nearest_6'] = np.imag(orders).astype('float32')
return features
def order_and_neighbors(points):
tree = spatial.cKDTree(points)
dists, indices = tree.query(points, 7)
neighbour_indices = indices[:, 1:]
neighbour_positions = points[neighbour_indices, :]
neighbour_vectors = neighbour_positions - points[:, np.newaxis, :]
angles = np.angle(
neighbour_vectors[:, :, 0] + 1j * neighbour_vectors[:, :, 1])
steps = np.exp(6j * angles)
orders = np.sum(steps, axis=1)
orders /= 6
return orders
| none | 1 | 2.228335 | 2 | |
notebooks/plot_3freq.py | nedlrichards/canope_gw_scatter | 0 | 6622225 | import numpy as np
import scipy.signal as sig
import scipy.io as load_mat
from math import pi
import matplotlib.pyplot as plt
from src import xponder
#plt.ion()
xp = xponder()
for hr in range(24):
load_file = 'nav_253' + f'{hr:02}' + '5458.nc'
try:
p_raw, p_raw_ft = xp.load_raw(load_file)
except:
continue
p_filt_11 = xp.filter_raw(0, p_raw_ft)
p_win_11 = xp.window_sb(p_filt_11)
p_filt_115 = xp.filter_raw(1, p_raw_ft)
p_win_115 = xp.window_sb(p_filt_115)
p_filt_12 = xp.filter_raw(2, p_raw_ft)
p_win_12 = xp.window_sb(p_filt_12)
fig, ax = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(6.5, 6))
ax[0].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_11)).T, 'C0')
ax[0].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_115)).T - 24, '0.4')
ax[0].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_12)).T - 24, '0.4')
ax[1].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_11)).T - 24, '0.4')
ax[1].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_115)).T, 'C1')
ax[1].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_12)).T - 24, '0.4')
ax[2].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_11)).T - 24, '0.4')
ax[2].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_115)).T - 24, '0.4')
ax[2].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_12)).T, 'C2')
ax[0].set_ylim(110, 160)
ax[0].set_xlim(7.5, 9.0)
fig.savefig('notebooks/figures/' + load_file.split('.')[0] + '.png', dpi=300)
plt.close(fig)
| import numpy as np
import scipy.signal as sig
import scipy.io as load_mat
from math import pi
import matplotlib.pyplot as plt
from src import xponder
#plt.ion()
xp = xponder()
for hr in range(24):
load_file = 'nav_253' + f'{hr:02}' + '5458.nc'
try:
p_raw, p_raw_ft = xp.load_raw(load_file)
except:
continue
p_filt_11 = xp.filter_raw(0, p_raw_ft)
p_win_11 = xp.window_sb(p_filt_11)
p_filt_115 = xp.filter_raw(1, p_raw_ft)
p_win_115 = xp.window_sb(p_filt_115)
p_filt_12 = xp.filter_raw(2, p_raw_ft)
p_win_12 = xp.window_sb(p_filt_12)
fig, ax = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(6.5, 6))
ax[0].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_11)).T, 'C0')
ax[0].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_115)).T - 24, '0.4')
ax[0].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_12)).T - 24, '0.4')
ax[1].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_11)).T - 24, '0.4')
ax[1].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_115)).T, 'C1')
ax[1].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_12)).T - 24, '0.4')
ax[2].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_11)).T - 24, '0.4')
ax[2].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_115)).T - 24, '0.4')
ax[2].plot(xp.t_a_filt, 20 * np.log10(np.abs(p_filt_12)).T, 'C2')
ax[0].set_ylim(110, 160)
ax[0].set_xlim(7.5, 9.0)
fig.savefig('notebooks/figures/' + load_file.split('.')[0] + '.png', dpi=300)
plt.close(fig)
| ru | 0.419184 | #plt.ion() | 2.002217 | 2 |
backend/server/tests/wrapper_test/mock_fixture.py | FlickerSoul/Graphery | 5 | 6622226 | <filename>backend/server/tests/wrapper_test/mock_fixture.py
import os
import pathlib
from uuid import UUID
import pytest
from django.conf import settings
from django.core.files import File
from backend.model.TutorialRelatedModel import Category, Tutorial, Graph, GraphPriority, Code, ExecResultJson, Uploads
from backend.model.UserModel import User, ROLES
@pytest.fixture(scope='session')
def stored_mock_user(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
u = User.objects.create(**{
'id': UUID('96e65d54-8daa-4ba0-bf3a-1169acc81b59'),
'username': 'mock_user',
'email': '<EMAIL>',
'password': 'password', # omitted since the password field is a encrypted version of it
'first_name': 'mo',
'last_name': 'ck',
'role': ROLES.AUTHOR,
})
return u
@pytest.fixture()
def one_time_mock_user(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
u = User.objects.create(**{
'id': UUID('c3ab4052-4188-404b-a1a5-1dc7ce5112f7'),
'username': 'one_time_user',
'email': '<EMAIL>',
'password': 'password', # omitted since the password field is a encrypted version of it
'first_name': 'one',
'last_name': 'time',
'role': ROLES.VISITOR,
})
yield u
with django_db_blocker.unblock():
u.delete()
@pytest.fixture()
def temp_mock_user():
return User(**{
'id': UUID('96e65d54-8daa-4ba0-bf3a-1169acc81b59'),
'username': 'mock_user',
'email': '<EMAIL>',
'password': 'password', # omitted since the password field is a encrypted version of it
'first_name': 'mo',
'last_name': 'ck',
'role': ROLES.AUTHOR,
})
@pytest.fixture(scope='session')
def stored_mock_category(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
c = Category.objects.create(**{
'id': UUID('a58912ae-0343-4827-9dc1-b8518faf13ff'),
'category': 'mock_category',
'is_published': True
})
return c
@pytest.fixture()
def one_time_mock_category(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
c = Category.objects.create(**{
'id': UUID('c7b36800-f84f-4b3b-9077-6b8d389445af'),
'category': 'one_time_mock_category',
'is_published': True
})
yield c
with django_db_blocker.unblock():
c.delete()
@pytest.fixture()
def temp_mock_category():
return Category(**{
'id': UUID('a58912ae-0343-4827-9dc1-b8518faf13ff'),
'category': 'mock_category',
'is_published': True
})
@pytest.fixture(scope='session')
def stored_mock_tutorial_anchor(django_db_setup, django_db_blocker, stored_mock_category):
with django_db_blocker.unblock():
t = Tutorial.objects.create(**{
'id': UUID('b0015ac8-5376-4b99-b649-6f25771dbd91'),
'url': 'mock-test-tutorial',
'name': 'mock test tutorial',
'section': 1,
'level': 210,
'is_published': True
})
t.categories.add(stored_mock_category)
return t
@pytest.fixture()
def one_time_mock_tutorial_anchor(django_db_setup, django_db_blocker, stored_mock_category):
with django_db_blocker.unblock():
t = Tutorial.objects.create(**{
'id': UUID('98158c8f-9e57-4222-bd22-834863cfbeb6'),
'url': 'one-time-mock-test-tutorial',
'name': 'one time mock test tutorial',
'section': 1,
'level': 212,
'is_published': True
})
t.categories.add(stored_mock_category)
yield t
with django_db_blocker.unblock():
t.delete()
@pytest.fixture(scope='session')
def stored_mock_graph(django_db_setup, django_db_blocker,
stored_mock_user, stored_mock_category, stored_mock_tutorial_anchor):
with django_db_blocker.unblock():
g = Graph.objects.create(**{
'id': UUID('6a831c16-903d-47d8-94ac-61d8bd419bd3'),
'url': 'make-new-model-test-graph',
'name': 'make nem model test graph',
'priority': GraphPriority.MAIN,
'cyjs': {'json': 'hello'},
'is_published': True,
})
g.categories.set([stored_mock_category])
g.authors.set([stored_mock_user])
g.tutorials.set([stored_mock_tutorial_anchor])
return g
@pytest.fixture()
def one_time_mock_graph(django_db_setup, django_db_blocker,
stored_mock_user, stored_mock_category, stored_mock_tutorial_anchor):
with django_db_blocker.unblock():
g = Graph.objects.create(**{
'id': UUID('e4fa4bdc-6189-4cbc-bc7a-ab6767100cfa'),
'url': 'make-one-time-model-test-graph',
'name': 'make one time model test graph',
'priority': GraphPriority.MAIN,
'cyjs': {'json': 'hello'},
'is_published': True,
})
g.categories.set([stored_mock_category])
g.authors.set([stored_mock_user])
g.tutorials.set([stored_mock_tutorial_anchor])
yield g
with django_db_blocker.unblock():
g.delete()
@pytest.fixture(scope='session')
def stored_mock_code(django_db_setup, django_db_blocker, stored_mock_tutorial_anchor):
with django_db_blocker.unblock():
return Code.objects.create(**{
'id': UUID('24d137dc-5cc2-4ace-b71c-e5b9386a2281'),
'name': 'stored mock code',
'tutorial': stored_mock_tutorial_anchor,
'code': 'def hello(): \tprint("hello world!")'
})
@pytest.fixture()
def one_time_mock_code(django_db_setup, django_db_blocker, one_time_mock_tutorial_anchor):
with django_db_blocker.unblock():
c = Code.objects.create(**{
'id': UUID('8ceb0d01-cd29-4fe9-a37b-758b8e6d943c'),
'name': 'one time mock code',
'tutorial': one_time_mock_tutorial_anchor,
'code': 'def hello(): \tprint("hello world!!!")'
})
yield c
with django_db_blocker.unblock():
c.delete()
@pytest.fixture(scope='session')
def stored_mock_execution_result(django_db_setup, django_db_blocker,
stored_mock_code, stored_mock_graph):
with django_db_blocker.unblock():
return ExecResultJson.objects.create(
**{
'id': UUID('1b9952bf-fd26-4189-b657-8a2a982e9c23'),
'code': stored_mock_code,
'graph': stored_mock_graph,
'json': {'object': 'hello world'},
# no breakpoints for now
}
)
@pytest.fixture()
def one_time_mock_execution_result(django_db_setup, django_db_blocker,
one_time_mock_code, one_time_mock_graph):
with django_db_blocker.unblock():
e = ExecResultJson.objects.create(
**{
'id': UUID('fd82bcc0-0886-4a56-88b8-1d3c1d110bd4'),
'code': one_time_mock_code,
'graph': one_time_mock_graph,
'json': {'object': 'hello world!'},
# no breakpoints for now
}
)
yield e
with django_db_blocker.unblock():
e.delete()
_FILE_PATH_ROOT = pathlib.Path(settings.MEDIA_ROOT)
_STORED_TEST_FILE = _FILE_PATH_ROOT / 'stored_temp'
_ONE_TIME_TEST_FILE = _FILE_PATH_ROOT / 'one_time_temp'
_ADD_ON_TEST_FILE = _FILE_PATH_ROOT / 'add_on_temp'
@pytest.fixture(scope='session')
def stored_test_file():
with open(_STORED_TEST_FILE, 'w+') as test_file:
test_file.write('temp')
yield File(test_file)
os.remove(_STORED_TEST_FILE)
@pytest.fixture()
def one_time_test_file():
with open(_ONE_TIME_TEST_FILE, 'w+') as test_file:
test_file.write('temp')
yield File(test_file)
os.remove(_ONE_TIME_TEST_FILE)
@pytest.fixture()
def add_on_test_file():
with open(_ADD_ON_TEST_FILE, 'w+') as test_file:
test_file.write('temp')
yield File(test_file)
os.remove(_ADD_ON_TEST_FILE)
@pytest.fixture(scope='session')
def stored_uploads(django_db_setup, django_db_blocker, stored_test_file):
with django_db_blocker.unblock():
return Uploads.objects.create(
id=UUID('d4c31662-2de5-44c3-af1d-bad04360dab1'),
file=stored_test_file,
alias='uploads store testing'
)
@pytest.fixture()
def one_time_uploads(django_db_setup, django_db_blocker, one_time_test_file):
with django_db_blocker.unblock():
u = Uploads.objects.create(
id=UUID('b6150a4e-f8bb-4f46-932d-9cb80b1279b5'),
file=one_time_test_file,
alias='uploads temp testing'
)
yield u
with django_db_blocker.unblock():
u.delete()
| <filename>backend/server/tests/wrapper_test/mock_fixture.py
import os
import pathlib
from uuid import UUID
import pytest
from django.conf import settings
from django.core.files import File
from backend.model.TutorialRelatedModel import Category, Tutorial, Graph, GraphPriority, Code, ExecResultJson, Uploads
from backend.model.UserModel import User, ROLES
@pytest.fixture(scope='session')
def stored_mock_user(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
u = User.objects.create(**{
'id': UUID('96e65d54-8daa-4ba0-bf3a-1169acc81b59'),
'username': 'mock_user',
'email': '<EMAIL>',
'password': 'password', # omitted since the password field is a encrypted version of it
'first_name': 'mo',
'last_name': 'ck',
'role': ROLES.AUTHOR,
})
return u
@pytest.fixture()
def one_time_mock_user(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
u = User.objects.create(**{
'id': UUID('c3ab4052-4188-404b-a1a5-1dc7ce5112f7'),
'username': 'one_time_user',
'email': '<EMAIL>',
'password': 'password', # omitted since the password field is a encrypted version of it
'first_name': 'one',
'last_name': 'time',
'role': ROLES.VISITOR,
})
yield u
with django_db_blocker.unblock():
u.delete()
@pytest.fixture()
def temp_mock_user():
return User(**{
'id': UUID('96e65d54-8daa-4ba0-bf3a-1169acc81b59'),
'username': 'mock_user',
'email': '<EMAIL>',
'password': 'password', # omitted since the password field is a encrypted version of it
'first_name': 'mo',
'last_name': 'ck',
'role': ROLES.AUTHOR,
})
@pytest.fixture(scope='session')
def stored_mock_category(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
c = Category.objects.create(**{
'id': UUID('a58912ae-0343-4827-9dc1-b8518faf13ff'),
'category': 'mock_category',
'is_published': True
})
return c
@pytest.fixture()
def one_time_mock_category(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
c = Category.objects.create(**{
'id': UUID('c7b36800-f84f-4b3b-9077-6b8d389445af'),
'category': 'one_time_mock_category',
'is_published': True
})
yield c
with django_db_blocker.unblock():
c.delete()
@pytest.fixture()
def temp_mock_category():
return Category(**{
'id': UUID('a58912ae-0343-4827-9dc1-b8518faf13ff'),
'category': 'mock_category',
'is_published': True
})
@pytest.fixture(scope='session')
def stored_mock_tutorial_anchor(django_db_setup, django_db_blocker, stored_mock_category):
with django_db_blocker.unblock():
t = Tutorial.objects.create(**{
'id': UUID('b0015ac8-5376-4b99-b649-6f25771dbd91'),
'url': 'mock-test-tutorial',
'name': 'mock test tutorial',
'section': 1,
'level': 210,
'is_published': True
})
t.categories.add(stored_mock_category)
return t
@pytest.fixture()
def one_time_mock_tutorial_anchor(django_db_setup, django_db_blocker, stored_mock_category):
with django_db_blocker.unblock():
t = Tutorial.objects.create(**{
'id': UUID('98158c8f-9e57-4222-bd22-834863cfbeb6'),
'url': 'one-time-mock-test-tutorial',
'name': 'one time mock test tutorial',
'section': 1,
'level': 212,
'is_published': True
})
t.categories.add(stored_mock_category)
yield t
with django_db_blocker.unblock():
t.delete()
@pytest.fixture(scope='session')
def stored_mock_graph(django_db_setup, django_db_blocker,
stored_mock_user, stored_mock_category, stored_mock_tutorial_anchor):
with django_db_blocker.unblock():
g = Graph.objects.create(**{
'id': UUID('6a831c16-903d-47d8-94ac-61d8bd419bd3'),
'url': 'make-new-model-test-graph',
'name': 'make nem model test graph',
'priority': GraphPriority.MAIN,
'cyjs': {'json': 'hello'},
'is_published': True,
})
g.categories.set([stored_mock_category])
g.authors.set([stored_mock_user])
g.tutorials.set([stored_mock_tutorial_anchor])
return g
@pytest.fixture()
def one_time_mock_graph(django_db_setup, django_db_blocker,
stored_mock_user, stored_mock_category, stored_mock_tutorial_anchor):
with django_db_blocker.unblock():
g = Graph.objects.create(**{
'id': UUID('e4fa4bdc-6189-4cbc-bc7a-ab6767100cfa'),
'url': 'make-one-time-model-test-graph',
'name': 'make one time model test graph',
'priority': GraphPriority.MAIN,
'cyjs': {'json': 'hello'},
'is_published': True,
})
g.categories.set([stored_mock_category])
g.authors.set([stored_mock_user])
g.tutorials.set([stored_mock_tutorial_anchor])
yield g
with django_db_blocker.unblock():
g.delete()
@pytest.fixture(scope='session')
def stored_mock_code(django_db_setup, django_db_blocker, stored_mock_tutorial_anchor):
with django_db_blocker.unblock():
return Code.objects.create(**{
'id': UUID('24d137dc-5cc2-4ace-b71c-e5b9386a2281'),
'name': 'stored mock code',
'tutorial': stored_mock_tutorial_anchor,
'code': 'def hello(): \tprint("hello world!")'
})
@pytest.fixture()
def one_time_mock_code(django_db_setup, django_db_blocker, one_time_mock_tutorial_anchor):
with django_db_blocker.unblock():
c = Code.objects.create(**{
'id': UUID('8ceb0d01-cd29-4fe9-a37b-758b8e6d943c'),
'name': 'one time mock code',
'tutorial': one_time_mock_tutorial_anchor,
'code': 'def hello(): \tprint("hello world!!!")'
})
yield c
with django_db_blocker.unblock():
c.delete()
@pytest.fixture(scope='session')
def stored_mock_execution_result(django_db_setup, django_db_blocker,
stored_mock_code, stored_mock_graph):
with django_db_blocker.unblock():
return ExecResultJson.objects.create(
**{
'id': UUID('1b9952bf-fd26-4189-b657-8a2a982e9c23'),
'code': stored_mock_code,
'graph': stored_mock_graph,
'json': {'object': 'hello world'},
# no breakpoints for now
}
)
@pytest.fixture()
def one_time_mock_execution_result(django_db_setup, django_db_blocker,
one_time_mock_code, one_time_mock_graph):
with django_db_blocker.unblock():
e = ExecResultJson.objects.create(
**{
'id': UUID('fd82bcc0-0886-4a56-88b8-1d3c1d110bd4'),
'code': one_time_mock_code,
'graph': one_time_mock_graph,
'json': {'object': 'hello world!'},
# no breakpoints for now
}
)
yield e
with django_db_blocker.unblock():
e.delete()
_FILE_PATH_ROOT = pathlib.Path(settings.MEDIA_ROOT)
_STORED_TEST_FILE = _FILE_PATH_ROOT / 'stored_temp'
_ONE_TIME_TEST_FILE = _FILE_PATH_ROOT / 'one_time_temp'
_ADD_ON_TEST_FILE = _FILE_PATH_ROOT / 'add_on_temp'
@pytest.fixture(scope='session')
def stored_test_file():
with open(_STORED_TEST_FILE, 'w+') as test_file:
test_file.write('temp')
yield File(test_file)
os.remove(_STORED_TEST_FILE)
@pytest.fixture()
def one_time_test_file():
with open(_ONE_TIME_TEST_FILE, 'w+') as test_file:
test_file.write('temp')
yield File(test_file)
os.remove(_ONE_TIME_TEST_FILE)
@pytest.fixture()
def add_on_test_file():
with open(_ADD_ON_TEST_FILE, 'w+') as test_file:
test_file.write('temp')
yield File(test_file)
os.remove(_ADD_ON_TEST_FILE)
@pytest.fixture(scope='session')
def stored_uploads(django_db_setup, django_db_blocker, stored_test_file):
with django_db_blocker.unblock():
return Uploads.objects.create(
id=UUID('d4c31662-2de5-44c3-af1d-bad04360dab1'),
file=stored_test_file,
alias='uploads store testing'
)
@pytest.fixture()
def one_time_uploads(django_db_setup, django_db_blocker, one_time_test_file):
with django_db_blocker.unblock():
u = Uploads.objects.create(
id=UUID('b6150a4e-f8bb-4f46-932d-9cb80b1279b5'),
file=one_time_test_file,
alias='uploads temp testing'
)
yield u
with django_db_blocker.unblock():
u.delete()
| en | 0.955627 | # omitted since the password field is a encrypted version of it # omitted since the password field is a encrypted version of it # omitted since the password field is a encrypted version of it # no breakpoints for now # no breakpoints for now | 2.111997 | 2 |
hlrobot_gazebo/scripts/publisher.py | liuxiao916/HLRobot_gazebo | 8 | 6622227 | <filename>hlrobot_gazebo/scripts/publisher.py
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64MultiArray
#path = '/home/liuxiao/catkin_ws/src/HLRobot_gazebo/cubicTrajectoryPlanning/data/PPB/littlestar.txt'
parent_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
path = os.path.join(parent_path,"cubicTrajectoryPlanning/data/PPB/littlestar.txt")
def publish():
PI = 3.1415926
pub = rospy.Publisher('/HL_controller/command', Float64MultiArray, queue_size=10)
rospy.init_node('commander', anonymous=True)
rate = rospy.Rate(1000)
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if not rospy.is_shutdown():
txtdata = line.split()
angle = []
angle.append(float(txtdata[0])/180*PI)
angle.append(float(txtdata[1])/180*PI)
angle.append(float(txtdata[2])/180*PI)
angle.append(float(txtdata[3])/180*PI)
angle.append(float(txtdata[4])/180*PI)
angle.append(float(txtdata[5])/180*PI)
command = Float64MultiArray(data = angle)
pub.publish(command)
rate.sleep()
if __name__ == '__main__':
try:
publish()
except rospy.ROSInterruptException:
pass | <filename>hlrobot_gazebo/scripts/publisher.py
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64MultiArray
#path = '/home/liuxiao/catkin_ws/src/HLRobot_gazebo/cubicTrajectoryPlanning/data/PPB/littlestar.txt'
parent_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
path = os.path.join(parent_path,"cubicTrajectoryPlanning/data/PPB/littlestar.txt")
def publish():
PI = 3.1415926
pub = rospy.Publisher('/HL_controller/command', Float64MultiArray, queue_size=10)
rospy.init_node('commander', anonymous=True)
rate = rospy.Rate(1000)
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if not rospy.is_shutdown():
txtdata = line.split()
angle = []
angle.append(float(txtdata[0])/180*PI)
angle.append(float(txtdata[1])/180*PI)
angle.append(float(txtdata[2])/180*PI)
angle.append(float(txtdata[3])/180*PI)
angle.append(float(txtdata[4])/180*PI)
angle.append(float(txtdata[5])/180*PI)
command = Float64MultiArray(data = angle)
pub.publish(command)
rate.sleep()
if __name__ == '__main__':
try:
publish()
except rospy.ROSInterruptException:
pass | en | 0.383555 | #!/usr/bin/env python #path = '/home/liuxiao/catkin_ws/src/HLRobot_gazebo/cubicTrajectoryPlanning/data/PPB/littlestar.txt' | 2.178475 | 2 |
screens/mobility.py | IvyHan2013/Mobile-Visualization | 0 | 6622228 | from . import HomeScreen
from kivy.lang import Builder
Builder.load_file('screens/mobility.kv')
class MobilityScreen(HomeScreen):
pass
| from . import HomeScreen
from kivy.lang import Builder
Builder.load_file('screens/mobility.kv')
class MobilityScreen(HomeScreen):
pass
| none | 1 | 1.175871 | 1 | |
python/raft/timer_thread.py | chenzhaoplus/vraft | 23 | 6622229 | import sys
import threading
from random import randrange
import logging
from monitor import send_state_update
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO)
from Candidate import Candidate, VoteRequest
from Follower import Follower
from Leader import Leader
from cluster import Cluster, ELECTION_TIMEOUT_MAX
cluster = Cluster()
class TimerThread(threading.Thread):
def __init__(self, node_id):
threading.Thread.__init__(self)
self.node = cluster[node_id]
self.node_state = Follower(self.node)
self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX))
self.election_timer = threading.Timer(self.election_timeout, self.become_candidate)
def become_leader(self):
logging.info(f'{self} become leader and start to send heartbeat ... ')
send_state_update(self.node_state, self.election_timeout)
self.node_state = Leader(self.node_state)
self.node_state.heartbeat()
def become_candidate(self):
logging.warning(f'heartbeat is timeout: {int(self.election_timeout)} s')
logging.info(f'{self} become candidate and start to request vote ... ')
send_state_update(self.node_state, self.election_timeout)
self.node_state = Candidate(self.node_state)
self.node_state.elect()
if self.node_state.win():
self.become_leader()
else:
self.become_follower()
# input: candidate (id, term, lastLogIndex, lastLogTerm)
# output: term, vote_granted
# rule:
# 1. return false if candidate.term < current_term
# 2. return true if (voteFor is None or voteFor==candidate.id) and candidate's log is newer than receiver's
def vote(self, vote_request: VoteRequest):
logging.info(f'{self} got vote request: {vote_request} ')
vote_result = self.node_state.vote(vote_request)
if vote_result[0]:
self.become_follower()
logging.info(f'{self} return vote result: {vote_result} ')
return vote_result
def become_follower(self):
timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX))
if type(self.node_state) != Follower:
logging.info(f'{self} become follower ... ')
self.node_state = Follower(self.node)
logging.info(f'{self} reset election timer {timeout} s ... ')
send_state_update(self.node_state, timeout)
self.election_timer.cancel()
self.election_timer = threading.Timer(timeout, self.become_candidate)
self.election_timer.start()
def run(self):
self.become_follower()
def __repr__(self):
return f'{type(self).__name__, self.node_state}'
if __name__ == '__main__':
timerThread = TimerThread(int(sys.argv[1]))
timerThread.start()
| import sys
import threading
from random import randrange
import logging
from monitor import send_state_update
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO)
from Candidate import Candidate, VoteRequest
from Follower import Follower
from Leader import Leader
from cluster import Cluster, ELECTION_TIMEOUT_MAX
cluster = Cluster()
class TimerThread(threading.Thread):
def __init__(self, node_id):
threading.Thread.__init__(self)
self.node = cluster[node_id]
self.node_state = Follower(self.node)
self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX))
self.election_timer = threading.Timer(self.election_timeout, self.become_candidate)
def become_leader(self):
logging.info(f'{self} become leader and start to send heartbeat ... ')
send_state_update(self.node_state, self.election_timeout)
self.node_state = Leader(self.node_state)
self.node_state.heartbeat()
def become_candidate(self):
logging.warning(f'heartbeat is timeout: {int(self.election_timeout)} s')
logging.info(f'{self} become candidate and start to request vote ... ')
send_state_update(self.node_state, self.election_timeout)
self.node_state = Candidate(self.node_state)
self.node_state.elect()
if self.node_state.win():
self.become_leader()
else:
self.become_follower()
# input: candidate (id, term, lastLogIndex, lastLogTerm)
# output: term, vote_granted
# rule:
# 1. return false if candidate.term < current_term
# 2. return true if (voteFor is None or voteFor==candidate.id) and candidate's log is newer than receiver's
def vote(self, vote_request: VoteRequest):
logging.info(f'{self} got vote request: {vote_request} ')
vote_result = self.node_state.vote(vote_request)
if vote_result[0]:
self.become_follower()
logging.info(f'{self} return vote result: {vote_result} ')
return vote_result
def become_follower(self):
timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX))
if type(self.node_state) != Follower:
logging.info(f'{self} become follower ... ')
self.node_state = Follower(self.node)
logging.info(f'{self} reset election timer {timeout} s ... ')
send_state_update(self.node_state, timeout)
self.election_timer.cancel()
self.election_timer = threading.Timer(timeout, self.become_candidate)
self.election_timer.start()
def run(self):
self.become_follower()
def __repr__(self):
return f'{type(self).__name__, self.node_state}'
if __name__ == '__main__':
timerThread = TimerThread(int(sys.argv[1]))
timerThread.start()
| en | 0.640115 | # input: candidate (id, term, lastLogIndex, lastLogTerm) # output: term, vote_granted # rule: # 1. return false if candidate.term < current_term # 2. return true if (voteFor is None or voteFor==candidate.id) and candidate's log is newer than receiver's | 2.61062 | 3 |
cap_4/listagem/digits.py | rsmonteiro2021/execicios_python | 1 | 6622230 | #Estatíticas simples com uma lista de números.
digits = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
print(min(digits))
print(max(digits))
print(sum(digits))
#List Comprehensions
squares = [value**2 for value in range(1,11)]
print(squares)
| #Estatíticas simples com uma lista de números.
digits = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
print(min(digits))
print(max(digits))
print(sum(digits))
#List Comprehensions
squares = [value**2 for value in range(1,11)]
print(squares)
| pt | 0.981839 | #Estatíticas simples com uma lista de números. #List Comprehensions | 4.023802 | 4 |
capital_spans.py | patmanteau/panflutist | 0 | 6622231 | #!/usr/bin/env python
"""
Panflute filter for setting C A P I T A L text, with tracking. Use if []{.smallcaps}
is too weak.
Usage:
- In Pandoc markdown, use a bracketed Span of class allcaps: [TRAJAN]{.allcaps}
- The produced LaTeX output has several requirements:
- The Microtype package for tracking (i.e., its \\textls and \\microtypecontext
commands). In the preamble, load Microtype like so:
\\usepackage{microtype}
- A Microtype "allcaps" tracking context for tracking parameters. E.g.:
\SetTracking[
context = allcaps,
spacing = {300*,,},
outer spacing = {300*,,}
]{encoding={*}, shape=*}{70}
- A \\textuppercase macro to do the grunt work of calling the proper commands:
\\newcommand{\\textuppercase}[1]{%
\\addfontfeatures{Numbers={Proportional,Lining}}%
\\microtypecontext{tracking=allcaps}%
\\textls{\\MakeUppercase{#1}}}%
- The fontspec package to select proportional lining figures:
\\usepackage{fontspec}
- For further information on Microtype's and fontspec's configuration,
see their respective CTAN entries.
"""
from jinja2tex import latex_env
import panflute as pf
UPPERCASE = latex_env.from_string(r'\textuppercase{<< text >>}')
def action(e, doc):
if isinstance(e, pf.Span) and 'allcaps' in e.classes:
if doc.format == 'latex':
tex = UPPERCASE.render(text=pf.stringify(e))
return pf.RawInline(tex, format='latex')
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""
Panflute filter for setting C A P I T A L text, with tracking. Use if []{.smallcaps}
is too weak.
Usage:
- In Pandoc markdown, use a bracketed Span of class allcaps: [TRAJAN]{.allcaps}
- The produced LaTeX output has several requirements:
- The Microtype package for tracking (i.e., its \\textls and \\microtypecontext
commands). In the preamble, load Microtype like so:
\\usepackage{microtype}
- A Microtype "allcaps" tracking context for tracking parameters. E.g.:
\SetTracking[
context = allcaps,
spacing = {300*,,},
outer spacing = {300*,,}
]{encoding={*}, shape=*}{70}
- A \\textuppercase macro to do the grunt work of calling the proper commands:
\\newcommand{\\textuppercase}[1]{%
\\addfontfeatures{Numbers={Proportional,Lining}}%
\\microtypecontext{tracking=allcaps}%
\\textls{\\MakeUppercase{#1}}}%
- The fontspec package to select proportional lining figures:
\\usepackage{fontspec}
- For further information on Microtype's and fontspec's configuration,
see their respective CTAN entries.
"""
from jinja2tex import latex_env
import panflute as pf
UPPERCASE = latex_env.from_string(r'\textuppercase{<< text >>}')
def action(e, doc):
if isinstance(e, pf.Span) and 'allcaps' in e.classes:
if doc.format == 'latex':
tex = UPPERCASE.render(text=pf.stringify(e))
return pf.RawInline(tex, format='latex')
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
| en | 0.59151 | #!/usr/bin/env python Panflute filter for setting C A P I T A L text, with tracking. Use if []{.smallcaps} is too weak. Usage: - In Pandoc markdown, use a bracketed Span of class allcaps: [TRAJAN]{.allcaps} - The produced LaTeX output has several requirements: - The Microtype package for tracking (i.e., its \\textls and \\microtypecontext commands). In the preamble, load Microtype like so: \\usepackage{microtype} - A Microtype "allcaps" tracking context for tracking parameters. E.g.: \SetTracking[ context = allcaps, spacing = {300*,,}, outer spacing = {300*,,} ]{encoding={*}, shape=*}{70} - A \\textuppercase macro to do the grunt work of calling the proper commands: \\newcommand{\\textuppercase}[1]{% \\addfontfeatures{Numbers={Proportional,Lining}}% \\microtypecontext{tracking=allcaps}% \\textls{\\MakeUppercase{#1}}}% - The fontspec package to select proportional lining figures: \\usepackage{fontspec} - For further information on Microtype's and fontspec's configuration, see their respective CTAN entries. | 2.54065 | 3 |
sdk/python/pulumi_aws_native/apigateway/get_gateway_response.py | pulumi/pulumi-aws-native | 29 | 6622232 | <filename>sdk/python/pulumi_aws_native/apigateway/get_gateway_response.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGatewayResponseResult',
'AwaitableGetGatewayResponseResult',
'get_gateway_response',
'get_gateway_response_output',
]
@pulumi.output_type
class GetGatewayResponseResult:
def __init__(__self__, id=None, response_parameters=None, response_templates=None, status_code=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if response_parameters and not isinstance(response_parameters, dict):
raise TypeError("Expected argument 'response_parameters' to be a dict")
pulumi.set(__self__, "response_parameters", response_parameters)
if response_templates and not isinstance(response_templates, dict):
raise TypeError("Expected argument 'response_templates' to be a dict")
pulumi.set(__self__, "response_templates", response_templates)
if status_code and not isinstance(status_code, str):
raise TypeError("Expected argument 'status_code' to be a str")
pulumi.set(__self__, "status_code", status_code)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
A Cloudformation auto generated ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
"""
The response parameters (paths, query strings, and headers) for the response.
"""
return pulumi.get(self, "response_parameters")
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> Optional[Any]:
"""
The response templates for the response.
"""
return pulumi.get(self, "response_templates")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
"""
The HTTP status code for the response.
"""
return pulumi.get(self, "status_code")
class AwaitableGetGatewayResponseResult(GetGatewayResponseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGatewayResponseResult(
id=self.id,
response_parameters=self.response_parameters,
response_templates=self.response_templates,
status_code=self.status_code)
def get_gateway_response(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGatewayResponseResult:
"""
Resource Type definition for AWS::ApiGateway::GatewayResponse
:param str id: A Cloudformation auto generated ID.
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:apigateway:getGatewayResponse', __args__, opts=opts, typ=GetGatewayResponseResult).value
return AwaitableGetGatewayResponseResult(
id=__ret__.id,
response_parameters=__ret__.response_parameters,
response_templates=__ret__.response_templates,
status_code=__ret__.status_code)
@_utilities.lift_output_func(get_gateway_response)
def get_gateway_response_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGatewayResponseResult]:
"""
Resource Type definition for AWS::ApiGateway::GatewayResponse
:param str id: A Cloudformation auto generated ID.
"""
...
| <filename>sdk/python/pulumi_aws_native/apigateway/get_gateway_response.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGatewayResponseResult',
'AwaitableGetGatewayResponseResult',
'get_gateway_response',
'get_gateway_response_output',
]
@pulumi.output_type
class GetGatewayResponseResult:
def __init__(__self__, id=None, response_parameters=None, response_templates=None, status_code=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if response_parameters and not isinstance(response_parameters, dict):
raise TypeError("Expected argument 'response_parameters' to be a dict")
pulumi.set(__self__, "response_parameters", response_parameters)
if response_templates and not isinstance(response_templates, dict):
raise TypeError("Expected argument 'response_templates' to be a dict")
pulumi.set(__self__, "response_templates", response_templates)
if status_code and not isinstance(status_code, str):
raise TypeError("Expected argument 'status_code' to be a str")
pulumi.set(__self__, "status_code", status_code)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
A Cloudformation auto generated ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
"""
The response parameters (paths, query strings, and headers) for the response.
"""
return pulumi.get(self, "response_parameters")
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> Optional[Any]:
"""
The response templates for the response.
"""
return pulumi.get(self, "response_templates")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
"""
The HTTP status code for the response.
"""
return pulumi.get(self, "status_code")
class AwaitableGetGatewayResponseResult(GetGatewayResponseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGatewayResponseResult(
id=self.id,
response_parameters=self.response_parameters,
response_templates=self.response_templates,
status_code=self.status_code)
def get_gateway_response(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGatewayResponseResult:
"""
Resource Type definition for AWS::ApiGateway::GatewayResponse
:param str id: A Cloudformation auto generated ID.
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:apigateway:getGatewayResponse', __args__, opts=opts, typ=GetGatewayResponseResult).value
return AwaitableGetGatewayResponseResult(
id=__ret__.id,
response_parameters=__ret__.response_parameters,
response_templates=__ret__.response_templates,
status_code=__ret__.status_code)
@_utilities.lift_output_func(get_gateway_response)
def get_gateway_response_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGatewayResponseResult]:
"""
Resource Type definition for AWS::ApiGateway::GatewayResponse
:param str id: A Cloudformation auto generated ID.
"""
...
| en | 0.731317 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** A Cloudformation auto generated ID. The response parameters (paths, query strings, and headers) for the response. The response templates for the response. The HTTP status code for the response. # pylint: disable=using-constant-test Resource Type definition for AWS::ApiGateway::GatewayResponse :param str id: A Cloudformation auto generated ID. Resource Type definition for AWS::ApiGateway::GatewayResponse :param str id: A Cloudformation auto generated ID. | 1.821557 | 2 |
keshe/models.py | kongqiahaha/databasekeshe | 0 | 6622233 | <filename>keshe/models.py<gh_stars>0
from django.utils import timezone
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
# 图书
class Book(models.Model):
b_id = models.AutoField(primary_key=True)
b_name = models.CharField(max_length=30)
b_author = models.CharField(max_length=20)
b_isbn = models.CharField(max_length=40)
b_public = models.CharField(max_length=30)
b_total = models.IntegerField(default=0)
b_lave = models.IntegerField(default=0)
b_type = models.ForeignKey("BookType", on_delete=models.CASCADE)
# 老师
class Teacher(User):
type = models.ForeignKey("Type", on_delete=models.CASCADE)
borrow = models.IntegerField(default=0)
# 学生
class Student(User):
max_borrow = models.IntegerField(default=5)
borrow=models.IntegerField(default=0)
# 职称类型
class Type(models.Model):
type = models.CharField(max_length=20)
max_borrow = models.IntegerField(default=0)
# 老师借阅
class BorrowTeacher(models.Model):
teacher = models.ForeignKey("Teacher", on_delete=models.CASCADE)
book = models.ForeignKey("Book", on_delete=models.CASCADE)
borrow_date = models.DateField(default=timezone.now)
# 学生借阅
class BorrowStudent(models.Model):
student = models.ForeignKey("Student", on_delete=models.CASCADE)
book = models.ForeignKey("Book", on_delete=models.CASCADE)
borrow_date = models.DateField(default=timezone.now)
# 图书类型
class BookType(models.Model):
bookType = models.CharField(max_length=20)
| <filename>keshe/models.py<gh_stars>0
from django.utils import timezone
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
# 图书
class Book(models.Model):
b_id = models.AutoField(primary_key=True)
b_name = models.CharField(max_length=30)
b_author = models.CharField(max_length=20)
b_isbn = models.CharField(max_length=40)
b_public = models.CharField(max_length=30)
b_total = models.IntegerField(default=0)
b_lave = models.IntegerField(default=0)
b_type = models.ForeignKey("BookType", on_delete=models.CASCADE)
# 老师
class Teacher(User):
type = models.ForeignKey("Type", on_delete=models.CASCADE)
borrow = models.IntegerField(default=0)
# 学生
class Student(User):
max_borrow = models.IntegerField(default=5)
borrow=models.IntegerField(default=0)
# 职称类型
class Type(models.Model):
type = models.CharField(max_length=20)
max_borrow = models.IntegerField(default=0)
# 老师借阅
class BorrowTeacher(models.Model):
teacher = models.ForeignKey("Teacher", on_delete=models.CASCADE)
book = models.ForeignKey("Book", on_delete=models.CASCADE)
borrow_date = models.DateField(default=timezone.now)
# 学生借阅
class BorrowStudent(models.Model):
student = models.ForeignKey("Student", on_delete=models.CASCADE)
book = models.ForeignKey("Book", on_delete=models.CASCADE)
borrow_date = models.DateField(default=timezone.now)
# 图书类型
class BookType(models.Model):
bookType = models.CharField(max_length=20)
| zh | 0.67504 | # Create your models here. # 图书 # 老师 # 学生 # 职称类型 # 老师借阅 # 学生借阅 # 图书类型 | 2.459894 | 2 |
tests/hmc_test.py | dfm/rmhmc | 4 | 6622234 | import jax.numpy as jnp
import numpy as np
from jax import random
from rmhmc.hmc import hmc
from .problems import banana
def test_divergence() -> None:
system = hmc(banana(False, False), initial_step_size=1000.0)
state = system.init(jnp.array([0.3, 0.5]))
state_ = system.step(state, random.PRNGKey(5))
assert state_[2].diverging
assert not state_[2].accept
np.testing.assert_allclose(state_[2].accept_prob, 0.0)
| import jax.numpy as jnp
import numpy as np
from jax import random
from rmhmc.hmc import hmc
from .problems import banana
def test_divergence() -> None:
system = hmc(banana(False, False), initial_step_size=1000.0)
state = system.init(jnp.array([0.3, 0.5]))
state_ = system.step(state, random.PRNGKey(5))
assert state_[2].diverging
assert not state_[2].accept
np.testing.assert_allclose(state_[2].accept_prob, 0.0)
| none | 1 | 2.369296 | 2 | |
control_input_value.py | EFatihAydin/random_sentence | 0 | 6622235 | from functools import reduce
import numpy as np
#clean : clean text by turkish words
def clean(text):
d = { "Ş":"ş", "İ":"i", "Ü":"ü", "Ç":"ç", "Ö":"ö", "Ğ":"ğ", "I":"ı", "Î":"ı", "Û":"u", "Â":"a" , "â":"a" , "î":"ı" , "û":"u" }
text = reduce( lambda x, y: x.replace( y,d[y] ),d,text )
text = text.lower()
text = text.strip()
return text
#trinity: parse line into three characters
def trinity(row):
for i in range(len(row)-2):
yield ''.join(row[i:i + 3])
#variable
totalch = 0
twchar = []
#char: columns name for matrix
char = "abcçdefgğhıijklmnoöprsştuüvyzqwx "
#twchar: rows name for matrix
for i in char:
for j in char:
twchar.append( i+j )
#print(len(char))#53
#print(len(twchar))#2809
#add number to name for create matrix
mrowname = dict( [ (k,v) for v,k in enumerate(char)] )
mcolname = dict( [ (k,v) for v,k in enumerate(twchar)] )
#print( mrowname )
#print( mcolname )
#read matrix in file
with open("probobility_matrix.txt", "r") as file:
matris = eval(file.readline())
#trial steps
while True:
text = input("Denenecek kelimeyi giriniz : ")
text = clean( text )
text = text.split()
totalch = 0
pay = 0
minum = 0
ort = 0
q75 = 0
liste = []
for word in text:
for a, b ,c in trinity(word):
pay += matris[mcolname[a+b]][mrowname[c]]
liste.append(matris[mcolname[a+b]][mrowname[c]])
totalch +=1
if minum == 0:
minum = pay
elif minum > pay:
minum = pay
ort = pay / totalch
if ort >=0.00003:
print(str(ort) + 'Anlamlı')
else:
print(str(ort) + 'Anlamsız') | from functools import reduce
import numpy as np
#clean : clean text by turkish words
def clean(text):
d = { "Ş":"ş", "İ":"i", "Ü":"ü", "Ç":"ç", "Ö":"ö", "Ğ":"ğ", "I":"ı", "Î":"ı", "Û":"u", "Â":"a" , "â":"a" , "î":"ı" , "û":"u" }
text = reduce( lambda x, y: x.replace( y,d[y] ),d,text )
text = text.lower()
text = text.strip()
return text
#trinity: parse line into three characters
def trinity(row):
for i in range(len(row)-2):
yield ''.join(row[i:i + 3])
#variable
totalch = 0
twchar = []
#char: columns name for matrix
char = "abcçdefgğhıijklmnoöprsştuüvyzqwx "
#twchar: rows name for matrix
for i in char:
for j in char:
twchar.append( i+j )
#print(len(char))#53
#print(len(twchar))#2809
#add number to name for create matrix
mrowname = dict( [ (k,v) for v,k in enumerate(char)] )
mcolname = dict( [ (k,v) for v,k in enumerate(twchar)] )
#print( mrowname )
#print( mcolname )
#read matrix in file
with open("probobility_matrix.txt", "r") as file:
matris = eval(file.readline())
#trial steps
while True:
text = input("Denenecek kelimeyi giriniz : ")
text = clean( text )
text = text.split()
totalch = 0
pay = 0
minum = 0
ort = 0
q75 = 0
liste = []
for word in text:
for a, b ,c in trinity(word):
pay += matris[mcolname[a+b]][mrowname[c]]
liste.append(matris[mcolname[a+b]][mrowname[c]])
totalch +=1
if minum == 0:
minum = pay
elif minum > pay:
minum = pay
ort = pay / totalch
if ort >=0.00003:
print(str(ort) + 'Anlamlı')
else:
print(str(ort) + 'Anlamsız') | en | 0.548031 | #clean : clean text by turkish words #trinity: parse line into three characters #variable #char: columns name for matrix #twchar: rows name for matrix #print(len(char))#53 #print(len(twchar))#2809 #add number to name for create matrix #print( mrowname ) #print( mcolname ) #read matrix in file #trial steps | 3.742527 | 4 |
mundo_02_est_controle/ex052.py | icarofilho/estudonauta_python | 0 | 6622236 | num = int(input("Digite um número: "))
qtd = 0
for n in range(1,num+1):
if num % n == 0:
print(f"\033[34m{n}",end="\033[m ")
qtd += 1
else:
print(f"\033[33m{n}",end="\033[m ")
if qtd == 2:
print(f"\nO número {num} foi divisível {qtd} vezes.\nE por isso ele é PRIMO")
else:
print(f"\nO número {num} foi divisível {qtd} vezes.\nE por isso ele NÃO é PRIMO") | num = int(input("Digite um número: "))
qtd = 0
for n in range(1,num+1):
if num % n == 0:
print(f"\033[34m{n}",end="\033[m ")
qtd += 1
else:
print(f"\033[33m{n}",end="\033[m ")
if qtd == 2:
print(f"\nO número {num} foi divisível {qtd} vezes.\nE por isso ele é PRIMO")
else:
print(f"\nO número {num} foi divisível {qtd} vezes.\nE por isso ele NÃO é PRIMO") | none | 1 | 4.043223 | 4 | |
utils/quantize_model.py | raja-kumar/Hybrid_and_Non-uniform_quantization | 0 | 6622237 | import torch
import torch.nn as nn
import copy
from .quantization_utils.quant_modules import *
from pytorchcv.models.common import ConvBlock
from pytorchcv.models.shufflenetv2 import ShuffleUnit, ShuffleInitBlock
def quantize_model(model):
"""
Recursively quantize a pretrained single-precision model to int8 quantized model
model: pretrained single-precision model
"""
# quantize convolutional and linear layers to 8-bit
if type(model) == nn.Conv2d:
quant_mod = Quant_Conv2d(weight_bit=4)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.Linear:
quant_mod = Quant_Linear(weight_bit=8)
quant_mod.set_param(model)
return quant_mod
# quantize all the activation to 8-bit
elif type(model) == nn.ReLU or type(model) == nn.ReLU6:
return nn.Sequential(*[model, QuantAct(activation_bit=8)])
# recursively use the quantized module to replace the single-precision module
elif type(model) == nn.Sequential:
mods = []
for n, m in model.named_children():
mods.append(quantize_model(m))
return nn.Sequential(*mods)
else:
q_model = copy.deepcopy(model)
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
setattr(q_model, attr, quantize_model(mod))
return q_model
def freeze_model(model):
"""
freeze the activation range
"""
if type(model) == QuantAct:
model.fix()
elif type(model) == nn.Sequential:
mods = []
for n, m in model.named_children():
freeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
freeze_model(mod)
return model
def unfreeze_model(model):
"""
unfreeze the activation range
"""
if type(model) == QuantAct:
model.unfix()
elif type(model) == nn.Sequential:
mods = []
for n, m in model.named_children():
unfreeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
unfreeze_model(mod)
return model
| import torch
import torch.nn as nn
import copy
from .quantization_utils.quant_modules import *
from pytorchcv.models.common import ConvBlock
from pytorchcv.models.shufflenetv2 import ShuffleUnit, ShuffleInitBlock
def quantize_model(model):
"""
Recursively quantize a pretrained single-precision model to int8 quantized model
model: pretrained single-precision model
"""
# quantize convolutional and linear layers to 8-bit
if type(model) == nn.Conv2d:
quant_mod = Quant_Conv2d(weight_bit=4)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.Linear:
quant_mod = Quant_Linear(weight_bit=8)
quant_mod.set_param(model)
return quant_mod
# quantize all the activation to 8-bit
elif type(model) == nn.ReLU or type(model) == nn.ReLU6:
return nn.Sequential(*[model, QuantAct(activation_bit=8)])
# recursively use the quantized module to replace the single-precision module
elif type(model) == nn.Sequential:
mods = []
for n, m in model.named_children():
mods.append(quantize_model(m))
return nn.Sequential(*mods)
else:
q_model = copy.deepcopy(model)
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
setattr(q_model, attr, quantize_model(mod))
return q_model
def freeze_model(model):
"""
freeze the activation range
"""
if type(model) == QuantAct:
model.fix()
elif type(model) == nn.Sequential:
mods = []
for n, m in model.named_children():
freeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
freeze_model(mod)
return model
def unfreeze_model(model):
"""
unfreeze the activation range
"""
if type(model) == QuantAct:
model.unfix()
elif type(model) == nn.Sequential:
mods = []
for n, m in model.named_children():
unfreeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
unfreeze_model(mod)
return model
| en | 0.716891 | Recursively quantize a pretrained single-precision model to int8 quantized model
model: pretrained single-precision model # quantize convolutional and linear layers to 8-bit # quantize all the activation to 8-bit # recursively use the quantized module to replace the single-precision module freeze the activation range unfreeze the activation range | 2.607368 | 3 |
nucosMQ/nucosServer.py | NuCOS/nucosMQ | 1 | 6622238 | from __future__ import print_function
from __future__ import absolute_import
from .nucos23 import ispython3
if ispython3:
import socketserver
import queue
else:
import SocketServer as socketserver
import Queue as queue
from threading import Thread
import time
import copy
import socket
from inspect import isclass, ismethod
from collections import defaultdict
from .nucosLogger import Logger
from .nucosMessage import NucosIncomingMessage, NucosOutgoingMessage, SocketArray, EOM, unicoding
logger = Logger('nucosServer', ["clientip","user"])
logger.format('[%(asctime)-15s] %(name)-8s %(levelname)-7s %(clientip)s %(user)s -- %(message)s')
logger.level("DEBUG")
connection_sid = {}
connection_auth_uid = {}
connection_auth_addr = {}
#palace = {}
on_disconnect = [] #disconnect-handler
on_connect = [] #connect-handler
on_receive = [] #receive-handler
on_shutdown = []
AUTH = None
ON_CLIENTEVENT = None
SERVE_FOREVER = True
SHUTDOWN = False
TIMEOUT = 300.0
palace = defaultdict(list)
queue = queue.Queue()
from .nucosQueue import NucosQueue
t_auth = None
def cleanup(addr, conn, close=True):
"""
cleans all traces of connection data in the globals
close the socket if close-flag is True, otherwise not
"""
uid = ""
if addr in connection_auth_addr.keys():
uid = connection_auth_addr[addr]
logger.log(msg= 'Cleanup', clientip=addr, user=uid)
if close:
conn.close()
connection_sid.pop(addr)
#except:
# pass
try:
palace.pop(uid)
except:
pass
try:
connection_auth_addr.pop(addr)
connection_auth_uid.pop(uid)
except:
pass
#TODO remove singular rooms
#print(connection_sid, connection_auth, palace)
return
answer_stack = defaultdict(list)
class ServerHandler(socketserver.BaseRequestHandler):
"""
The server handler class
"""
no_auth = False
def handle(self):
global AUTH, t_auth
conn = self.request
conn.settimeout(TIMEOUT) #longest possible open connection without any message
addr = self.client_address
logger.log(msg= 'Incoming connection', clientip=addr)
connection_sid.update({addr:conn}) #append the socket connection
if AUTH:
t_auth = Thread(target=self.authenticate, args=(addr,conn))
t_auth.daemon = True
t_auth.start()
else:
self.no_auth = True
fullData = SocketArray()
while True:
try:
receivedData = SocketArray(conn.recv(1024))
except socket.timeout:
logger.log(lvl="WARNING", msg="server socket timeout")
receivedData = SocketArray.empty()
except socket.error as ex:
logger.log(lvl="WARNING", msg="server socket error %s"%ex)
receivedData = SocketArray.empty()
break
####
# kill server logic:
if not queue.empty():
msg = queue.get()
else:
msg = ""
if msg=="kill-server":
logger.log(lvl="DEBUG", msg="connection killed")
if connection_sid: #kill all other threads in subsequence
queue.put("kill-server")
break
####
if receivedData:
fullData = fullData.ext(receivedData)
if len(receivedData) == 1024:
logger.log(lvl="DEBUG", msg="max length 1024 %s"%receivedData)
if not fullData.endswith(EOM):
logger.log(lvl="DEBUG", msg="continue listening")
continue
logger.log(lvl="DEBUG", msg="received package of length %i" % len(receivedData))
logger.log(lvl="DEBUG", msg="payload: %s"%receivedData)
if addr not in connection_auth_addr.keys() and not self.no_auth: #only for not authenticated clients put the data in the wait-stack
answer_stack[conn].append(fullData)
fullData = SocketArray()
continue
if ON_CLIENTEVENT:
ON_CLIENTEVENT(addr, fullData)
fullData = SocketArray()
continue
else:
if addr in connection_sid.keys():
cleanup(addr, conn, close=True) #close or not close ???? why ?
logger.log(lvl="DEBUG", msg="stop this connection now")
break
def authenticate(self, addr, conn):
logger.log(msg='Start auth-process')
AUTH(addr, conn)
return
class ThreadingTCPServer(socketserver.ThreadingTCPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class SingleConnectionServer():
"""
A single connection Server: accepts only one connection
"""
def __init__(self, IP_PORT, udp=False):
if udp:
socktype = socket.SOCK_DGRAM
else:
socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_INET, socktype)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.IP_PORT = IP_PORT
self.no_auth = False
self.udp = udp
def serve_forever(self):
global AUTH, ON_CLIENTEVENT
try:
self.socket.bind(self.IP_PORT)
except socket.error as ex:
logger.log(lvl="DEBUG", msg="single server socket exception %s"%ex)
self.socket.close()
raise Exception
if not self.udp:
self.socket.listen(1)
(conn, addr) = self.socket.accept()
logger.log(msg= 'Incoming connection (single-server)', clientip=addr)
connection_sid.update({addr:conn}) #append the socket connection
if AUTH:
t = Thread(target=self.authenticate, args=(addr,conn))
t.daemon = True
t.start()
else:
self.no_auth = True
fullData = SocketArray()
logger.log(lvl="DEBUG", msg="start listening")
while True:
try:
if not self.udp:
receivedData = SocketArray(conn.recv(1024))
else:
receivedData = self.socket.recvfrom(1024)
except socket.timeout:
logger.log(lvl="WARNING", msg="server socket timeout")
receivedData = SocketArray.empty()
except socket.error as ex:
logger.log(lvl="WARNING", msg="server socket error %s"%ex)
receivedData = SocketArray.empty()
raise Exception
if not queue.empty():
msg = queue.get()
else:
msg = ""
if msg=="kill-server":
logger.log(lvl="DEBUG", msg="single server killed")
break
if receivedData:
if self.udp:
receivedData = receivedData[0]
addr = receivedData[1]
logger.log(lvl="DEBUG", msg="message received %s"%receivedData)
fullData = fullData.ext(receivedData)
if len(receivedData) == 1024:
logger.log(lvl="DEBUG", msg="max length 1024 %s"%receivedData)
if not fullData.endswith(EOM):
logger.log(lvl="DEBUG", msg="continue listening")
continue
logger.log(lvl="DEBUG", msg="received package of length %i" % len(receivedData))
logger.log(lvl="DEBUG", msg="payload: %s"%receivedData)
if addr not in connection_auth_addr.keys() and not self.no_auth: #only for not authenticated clients put the data in the wait-stack
answer_stack[conn].append(receivedData)
fullData = SocketArray()
continue
if ON_CLIENTEVENT:
ON_CLIENTEVENT(addr, fullData)
fullData = SocketArray()
continue
else:
cleanup(addr, conn, close=True)
logger.log(lvl="DEBUG", msg="stop single-server now")
break
def authenticate(self, addr, conn):
logger.log(msg='Start auth-process')
AUTH(addr, conn)
return
def shutdown(self):
pass
def server_close(self):
pass
class NucosServer():
"""
base NuCOS socket class on server side
implements protocol on top of tcp/ip socket
accepts either one or many clients (depends on single_server flag) and starts them in individual threads.
do_auth is a function handler which accepts 3 arguments: uid, signature, challenge
"""
def __init__(self,IP,PORT, do_auth=None, single_server=False, timeout=300.0, udp=False):
global AUTH, ON_CLIENTEVENT
self.logger = logger
self.auth_final = None
self.IP = IP
self.PORT = PORT
self.in_auth_process = []
self.send_later = []
self.queue = NucosQueue()
self.shutdown_process = []
self.udp = udp
if isclass(do_auth):
AUTH = self._auth_protocoll
self.do_auth_obj = do_auth()
if ismethod(self.do_auth_obj.auth_final):
self.auth_final = self.do_auth_obj.auth_final
else:
raise Exception("auth class has no auth_final")
if ismethod(self.do_auth_obj.auth_challenge):
self.auth_challenge = self.do_auth_obj.auth_challenge
else:
raise Exception("auth class has no auth_challenge")
elif do_auth is None:
self.logger.log(lvl="INFO", msg="no auth selected")
AUTH = None
else:
raise Exception("only class as do_auth accepted")
self.single_server = single_server
if udp:
self.single_server = True
if not self.single_server:
self.srv = ThreadingTCPServer((IP, PORT), ServerHandler)
else:
self.srv = SingleConnectionServer((IP,PORT), udp=self.udp)
ON_CLIENTEVENT = lambda u,x: self._on_clientEvent(u,x)
TIMEOUT = timeout
self.auth_status = {}
self.event_callbacks = defaultdict(list)
def getsockname(self):
return self.srv.socket.getsockname()
def _reinitialize(self):
"""
re-initialize a killed server
"""
while not queue.empty():
queue.get()
self.auth_status = {}
self.shutdown_process = []
self.logger.log(lvl="DEBUG", msg="reinitialize the server")
if not self.single_server:
self.srv = ThreadingTCPServer((self.IP, self.PORT), ServerHandler)
else:
self.srv = SingleConnectionServer((self.IP,self.PORT))
def start(self):
"""
start a non-blocking server
"""
self.logger.log(lvl="INFO", msg="... try to start server")
t = Thread(target=self.srv.serve_forever)
t.daemon = True
t.start()
time.sleep(0.2) #startup time for server
def is_connected(self, conn):
return conn in connection_sid.values()
def ping(self, conn):
"""
send a ping event and wait for a pong (blocking call, since it expects the answer right away)
"""
start_time = time.time()
while conn in self.in_auth_process:
tau = time.time()-start_time
time.sleep(0.1)
if tau > self.ping_timeout:
return False
self.logger.log(lvl="INFO", msg="send a ping, expects a pong")
self.send(conn, "ping", "")
self.queue.put_topic("ping-server","wait")
msg = self.queue.get_topic("pong-server", timeout=5.0)
if msg == "done":
return True
else:
return False
def send(self, conn, event, content, room=''):
"""
the send command for a given connection conn, all other send commands must call send to prevent auth-protocoll confusion
"""
logger.log(lvl="DEBUG", msg="send via conn: %s | %s | %s"%(conn, event, content))
if conn in self.in_auth_process:
self.send_later.append((conn, event,content))
self.logger.log(lvl="WARNING", msg="no send during auth: %s %s %s"%(conn, event,content))
return True
return self._send(conn, event, content, room)
def _send(self, conn, event, content, room=''):
"""
finalize the send process
"""
if self.udp:
return
self.logger.log(lvl="DEBUG", msg="try to do _send: %s %s %s"%(conn, event,content))
if not room:
data = { "event":event, "content":content }
else:
data = { "event":event, "content":content, "room":room }
message = NucosOutgoingMessage(data)
payload,error = message.payload()
if error:
logerror = "outgoing msg error e: %s pl: %s type(pl): %s"%(error,payload,type(payload))
self.logger.log(lvl="ERROR",msg=logerror)
#raise Exception(logerror)
return False
try:
conn.send(payload)
return True
except socket.error as ex:
self.logger.log(lvl="ERROR",msg="socket error during send-process %s %s %s"%(ex, conn, connection_sid))
return False
def _flush(self):
"""
send all pre-processed send commands during auth process
"""
for conn,event,content in self.send_later:
self.send(conn,event,content)
self.send_later = []
def send_all(self, event, content):
"""
send a message to all connected clients
"""
if connection_sid:
for addr, conn in connection_sid.items():
self.send(conn, event, content)
def publish(self, room, event, content):
"""
send a message to all clients in a room
"""
#conn = self.get_conn(room)
self.wait_for_auth()
logger.log(lvl="DEBUG", msg="send in room: %s | %s | %s"%(room,event,content))
for _room, uids in palace.items():
if _room == room:
for uid in uids:
addr = connection_auth_uid[uid]
conn = connection_sid[addr]
self.send(conn, event, content, room)
def join_room(self, room, uid):
"""
append a user to a room, if uid is not anonymous and the desired room is not one of the other users (they should stay private)
"""
if not uid=="anonymous" and not room in connection_auth_uid:
logger.log(lvl="DEBUG", msg="user %s entered room %s"%(uid,room))
palace[room].append(uid)
def _on_clientEvent(self, addr, payload):
"""
for every client event this function is called
internal events:
----------------
shutdown
ping
pong
"""
if addr in connection_auth_addr.keys():
uid = connection_auth_addr[addr]
else:
uid = "anonymous"
incoming = NucosIncomingMessage(payload)
msgs, error = incoming.msgs()
if error:
logger.log(lvl="WARNING", msg="error in incoming message: %s"%error)
for msg in msgs:
event = unicoding(msg["event"])
content = unicoding(msg["content"])
if 'room' in msg.keys():
room = msg["room"]
else:
room = ''
logger.log(lvl="INFO", msg="incoming clientEvent: %s | %s | %s"%(event,content,room), user=uid)
if self.udp:
for _event, funcs in self.event_callbacks.items():
if _event == "all":
for f in funcs:
f(content)
if _event == event:
for f in funcs:
f(content)
else:
continue
return
if room:
self.publish(room,event,content)
return
if event == "shutdown":
self.send(connection_sid[addr], "shutdown", "confirmed")
self.shutdown_process.append(uid)
elif event == "ping":
self.send(connection_sid[addr], "pong", "")
elif event == "pong":
msg = self.queue.get_topic("ping-server", timeout=10.0)
if not msg == "wait":
self.logger.log(lvl="ERROR", msg="pong received no ping send %s"%msg)
self.logger.log(lvl="INFO", msg="pong received")
self.queue.put_topic("pong-server", "done")
elif event == "subscripe":
self.join_room(content, uid)
else:
for _event, funcs in self.event_callbacks.items():
if _event == "all":
for f in funcs:
f(content)
if _event == event:
for f in funcs:
f(content)
else:
continue
def close(self):
queue.put("kill-server")
logger.log(lvl="WARNING", msg="server is forced to shut-down now")
cosid = copy.copy(connection_sid)
for addr,conn in cosid.items():
#gracefully:
self.send(conn, "shutdown", "now")
time.sleep(0.1)
cleanup(addr, conn)
self.srv.shutdown()
self.srv.server_close()
self._reinitialize()
def wait_for_auth(self):
start_time = time.time()
while True:
if connection_auth_uid:
return
else:
tau = time.time() - start_time
if tau > 5:
return
else:
time.sleep(0.1)
def get_conn(self, uid):
#uid = unicoding(uid)
start_time = time.time()
while True:
if uid in self.shutdown_process:
break
#print(connection_sid, connection_auth_uid,uid)
if uid in connection_auth_uid.keys():
return connection_sid[connection_auth_uid[uid]]
elif uid == "anonymous": #take the first which is connected
if connection_sid:
#print (connection_sid)
return list(connection_sid.values())[0]
else:
tau = time.time() - start_time
if tau > 5:
return None
else:
time.sleep(0.1)
def wait_for_answer(self, conn):
"""
blocking call for waiting for a client answer, which is connected via conn
"""
start_time = time.time()
while True:
tau = time.time()-start_time
if tau > 1.0:
logger.log(lvl="WARNING", msg="auth failed")
return
if answer_stack[conn]:
payload = answer_stack[conn].pop(0)
incoming = NucosIncomingMessage(payload)
msgs, error = incoming.msgs()
if error:
logger.log("incoming message error: %i"%error)
return None
#logger.log("from wait-loop: %s"%(msgs,))
if msgs:
return msgs[0]
def add_event_callback(self, event, handler):
"""
adds an external function or method as a callback for an incoming event
if event is "all" the callback will be called for every event
the argument of an callback is the content
def my_callback(content):
print(content)
Client.add_event_callback("should print content",my_callback)
"""
delegate = lambda x: handler(x)
self.event_callbacks[unicoding(event)].append(delegate)
def _auth_protocoll(self, addr, conn):
"""
definition of the authentification protocoll: start_auth, challenge_auth, auth_final
"""
global SHUTDOWN
############################################################
# step 1: start_auth event
self.in_auth_process.append(conn)
self._send(conn, "start_auth", "")
data = self.wait_for_answer(conn)
if data:
uid = data["content"]
else:
cleanup(addr,conn)
return
############################################################
# step 2: hand out the challenge and receive signature
challenge = self.auth_challenge(uid=uid)
self._send(conn, "challenge_auth", challenge) #TODO introduce an AUTH object with challenge creation
data = self.wait_for_answer(conn) #TODO define timeout!!!
if data:
signature = data["content"]
event = data["event"]
else:
cleanup(addr,conn)
return
if not event == "signature":
cleanup(addr,conn)
return
#if queue.get() == "kill-auth":
# #print("kill-auth")
# cleanup(addr,conn)
# return
############################################################
# step 3: check the signature and send a result to the client
if self.auth_final(uid=uid, signature=signature, challenge=challenge):
connection_auth_uid.update({uid:addr})
connection_auth_addr.update({addr:uid})
palace.update({uid:[uid]}) #create a room with the uid as name
self._send(conn, "auth_final", "success")
self.in_auth_process.remove(conn)
self._flush()
else:
self._send(conn, "auth_final", "failed")
self.in_auth_process.remove(conn)
cleanup(addr,conn)
#self.srv.server_close()
#self.srv.shutdown()
| from __future__ import print_function
from __future__ import absolute_import
from .nucos23 import ispython3
if ispython3:
import socketserver
import queue
else:
import SocketServer as socketserver
import Queue as queue
from threading import Thread
import time
import copy
import socket
from inspect import isclass, ismethod
from collections import defaultdict
from .nucosLogger import Logger
from .nucosMessage import NucosIncomingMessage, NucosOutgoingMessage, SocketArray, EOM, unicoding
logger = Logger('nucosServer', ["clientip","user"])
logger.format('[%(asctime)-15s] %(name)-8s %(levelname)-7s %(clientip)s %(user)s -- %(message)s')
logger.level("DEBUG")
connection_sid = {}
connection_auth_uid = {}
connection_auth_addr = {}
#palace = {}
on_disconnect = [] #disconnect-handler
on_connect = [] #connect-handler
on_receive = [] #receive-handler
on_shutdown = []
AUTH = None
ON_CLIENTEVENT = None
SERVE_FOREVER = True
SHUTDOWN = False
TIMEOUT = 300.0
palace = defaultdict(list)
queue = queue.Queue()
from .nucosQueue import NucosQueue
t_auth = None
def cleanup(addr, conn, close=True):
"""
cleans all traces of connection data in the globals
close the socket if close-flag is True, otherwise not
"""
uid = ""
if addr in connection_auth_addr.keys():
uid = connection_auth_addr[addr]
logger.log(msg= 'Cleanup', clientip=addr, user=uid)
if close:
conn.close()
connection_sid.pop(addr)
#except:
# pass
try:
palace.pop(uid)
except:
pass
try:
connection_auth_addr.pop(addr)
connection_auth_uid.pop(uid)
except:
pass
#TODO remove singular rooms
#print(connection_sid, connection_auth, palace)
return
answer_stack = defaultdict(list)
class ServerHandler(socketserver.BaseRequestHandler):
"""
The server handler class
"""
no_auth = False
def handle(self):
global AUTH, t_auth
conn = self.request
conn.settimeout(TIMEOUT) #longest possible open connection without any message
addr = self.client_address
logger.log(msg= 'Incoming connection', clientip=addr)
connection_sid.update({addr:conn}) #append the socket connection
if AUTH:
t_auth = Thread(target=self.authenticate, args=(addr,conn))
t_auth.daemon = True
t_auth.start()
else:
self.no_auth = True
fullData = SocketArray()
while True:
try:
receivedData = SocketArray(conn.recv(1024))
except socket.timeout:
logger.log(lvl="WARNING", msg="server socket timeout")
receivedData = SocketArray.empty()
except socket.error as ex:
logger.log(lvl="WARNING", msg="server socket error %s"%ex)
receivedData = SocketArray.empty()
break
####
# kill server logic:
if not queue.empty():
msg = queue.get()
else:
msg = ""
if msg=="kill-server":
logger.log(lvl="DEBUG", msg="connection killed")
if connection_sid: #kill all other threads in subsequence
queue.put("kill-server")
break
####
if receivedData:
fullData = fullData.ext(receivedData)
if len(receivedData) == 1024:
logger.log(lvl="DEBUG", msg="max length 1024 %s"%receivedData)
if not fullData.endswith(EOM):
logger.log(lvl="DEBUG", msg="continue listening")
continue
logger.log(lvl="DEBUG", msg="received package of length %i" % len(receivedData))
logger.log(lvl="DEBUG", msg="payload: %s"%receivedData)
if addr not in connection_auth_addr.keys() and not self.no_auth: #only for not authenticated clients put the data in the wait-stack
answer_stack[conn].append(fullData)
fullData = SocketArray()
continue
if ON_CLIENTEVENT:
ON_CLIENTEVENT(addr, fullData)
fullData = SocketArray()
continue
else:
if addr in connection_sid.keys():
cleanup(addr, conn, close=True) #close or not close ???? why ?
logger.log(lvl="DEBUG", msg="stop this connection now")
break
def authenticate(self, addr, conn):
logger.log(msg='Start auth-process')
AUTH(addr, conn)
return
class ThreadingTCPServer(socketserver.ThreadingTCPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class SingleConnectionServer():
"""
A single connection Server: accepts only one connection
"""
def __init__(self, IP_PORT, udp=False):
if udp:
socktype = socket.SOCK_DGRAM
else:
socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_INET, socktype)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.IP_PORT = IP_PORT
self.no_auth = False
self.udp = udp
def serve_forever(self):
global AUTH, ON_CLIENTEVENT
try:
self.socket.bind(self.IP_PORT)
except socket.error as ex:
logger.log(lvl="DEBUG", msg="single server socket exception %s"%ex)
self.socket.close()
raise Exception
if not self.udp:
self.socket.listen(1)
(conn, addr) = self.socket.accept()
logger.log(msg= 'Incoming connection (single-server)', clientip=addr)
connection_sid.update({addr:conn}) #append the socket connection
if AUTH:
t = Thread(target=self.authenticate, args=(addr,conn))
t.daemon = True
t.start()
else:
self.no_auth = True
fullData = SocketArray()
logger.log(lvl="DEBUG", msg="start listening")
while True:
try:
if not self.udp:
receivedData = SocketArray(conn.recv(1024))
else:
receivedData = self.socket.recvfrom(1024)
except socket.timeout:
logger.log(lvl="WARNING", msg="server socket timeout")
receivedData = SocketArray.empty()
except socket.error as ex:
logger.log(lvl="WARNING", msg="server socket error %s"%ex)
receivedData = SocketArray.empty()
raise Exception
if not queue.empty():
msg = queue.get()
else:
msg = ""
if msg=="kill-server":
logger.log(lvl="DEBUG", msg="single server killed")
break
if receivedData:
if self.udp:
receivedData = receivedData[0]
addr = receivedData[1]
logger.log(lvl="DEBUG", msg="message received %s"%receivedData)
fullData = fullData.ext(receivedData)
if len(receivedData) == 1024:
logger.log(lvl="DEBUG", msg="max length 1024 %s"%receivedData)
if not fullData.endswith(EOM):
logger.log(lvl="DEBUG", msg="continue listening")
continue
logger.log(lvl="DEBUG", msg="received package of length %i" % len(receivedData))
logger.log(lvl="DEBUG", msg="payload: %s"%receivedData)
if addr not in connection_auth_addr.keys() and not self.no_auth: #only for not authenticated clients put the data in the wait-stack
answer_stack[conn].append(receivedData)
fullData = SocketArray()
continue
if ON_CLIENTEVENT:
ON_CLIENTEVENT(addr, fullData)
fullData = SocketArray()
continue
else:
cleanup(addr, conn, close=True)
logger.log(lvl="DEBUG", msg="stop single-server now")
break
def authenticate(self, addr, conn):
logger.log(msg='Start auth-process')
AUTH(addr, conn)
return
def shutdown(self):
pass
def server_close(self):
pass
class NucosServer():
"""
base NuCOS socket class on server side
implements protocol on top of tcp/ip socket
accepts either one or many clients (depends on single_server flag) and starts them in individual threads.
do_auth is a function handler which accepts 3 arguments: uid, signature, challenge
"""
def __init__(self,IP,PORT, do_auth=None, single_server=False, timeout=300.0, udp=False):
global AUTH, ON_CLIENTEVENT
self.logger = logger
self.auth_final = None
self.IP = IP
self.PORT = PORT
self.in_auth_process = []
self.send_later = []
self.queue = NucosQueue()
self.shutdown_process = []
self.udp = udp
if isclass(do_auth):
AUTH = self._auth_protocoll
self.do_auth_obj = do_auth()
if ismethod(self.do_auth_obj.auth_final):
self.auth_final = self.do_auth_obj.auth_final
else:
raise Exception("auth class has no auth_final")
if ismethod(self.do_auth_obj.auth_challenge):
self.auth_challenge = self.do_auth_obj.auth_challenge
else:
raise Exception("auth class has no auth_challenge")
elif do_auth is None:
self.logger.log(lvl="INFO", msg="no auth selected")
AUTH = None
else:
raise Exception("only class as do_auth accepted")
self.single_server = single_server
if udp:
self.single_server = True
if not self.single_server:
self.srv = ThreadingTCPServer((IP, PORT), ServerHandler)
else:
self.srv = SingleConnectionServer((IP,PORT), udp=self.udp)
ON_CLIENTEVENT = lambda u,x: self._on_clientEvent(u,x)
TIMEOUT = timeout
self.auth_status = {}
self.event_callbacks = defaultdict(list)
def getsockname(self):
return self.srv.socket.getsockname()
def _reinitialize(self):
"""
re-initialize a killed server
"""
while not queue.empty():
queue.get()
self.auth_status = {}
self.shutdown_process = []
self.logger.log(lvl="DEBUG", msg="reinitialize the server")
if not self.single_server:
self.srv = ThreadingTCPServer((self.IP, self.PORT), ServerHandler)
else:
self.srv = SingleConnectionServer((self.IP,self.PORT))
def start(self):
"""
start a non-blocking server
"""
self.logger.log(lvl="INFO", msg="... try to start server")
t = Thread(target=self.srv.serve_forever)
t.daemon = True
t.start()
time.sleep(0.2) #startup time for server
def is_connected(self, conn):
return conn in connection_sid.values()
def ping(self, conn):
"""
send a ping event and wait for a pong (blocking call, since it expects the answer right away)
"""
start_time = time.time()
while conn in self.in_auth_process:
tau = time.time()-start_time
time.sleep(0.1)
if tau > self.ping_timeout:
return False
self.logger.log(lvl="INFO", msg="send a ping, expects a pong")
self.send(conn, "ping", "")
self.queue.put_topic("ping-server","wait")
msg = self.queue.get_topic("pong-server", timeout=5.0)
if msg == "done":
return True
else:
return False
def send(self, conn, event, content, room=''):
"""
the send command for a given connection conn, all other send commands must call send to prevent auth-protocoll confusion
"""
logger.log(lvl="DEBUG", msg="send via conn: %s | %s | %s"%(conn, event, content))
if conn in self.in_auth_process:
self.send_later.append((conn, event,content))
self.logger.log(lvl="WARNING", msg="no send during auth: %s %s %s"%(conn, event,content))
return True
return self._send(conn, event, content, room)
def _send(self, conn, event, content, room=''):
"""
finalize the send process
"""
if self.udp:
return
self.logger.log(lvl="DEBUG", msg="try to do _send: %s %s %s"%(conn, event,content))
if not room:
data = { "event":event, "content":content }
else:
data = { "event":event, "content":content, "room":room }
message = NucosOutgoingMessage(data)
payload,error = message.payload()
if error:
logerror = "outgoing msg error e: %s pl: %s type(pl): %s"%(error,payload,type(payload))
self.logger.log(lvl="ERROR",msg=logerror)
#raise Exception(logerror)
return False
try:
conn.send(payload)
return True
except socket.error as ex:
self.logger.log(lvl="ERROR",msg="socket error during send-process %s %s %s"%(ex, conn, connection_sid))
return False
def _flush(self):
"""
send all pre-processed send commands during auth process
"""
for conn,event,content in self.send_later:
self.send(conn,event,content)
self.send_later = []
def send_all(self, event, content):
"""
send a message to all connected clients
"""
if connection_sid:
for addr, conn in connection_sid.items():
self.send(conn, event, content)
def publish(self, room, event, content):
"""
send a message to all clients in a room
"""
#conn = self.get_conn(room)
self.wait_for_auth()
logger.log(lvl="DEBUG", msg="send in room: %s | %s | %s"%(room,event,content))
for _room, uids in palace.items():
if _room == room:
for uid in uids:
addr = connection_auth_uid[uid]
conn = connection_sid[addr]
self.send(conn, event, content, room)
def join_room(self, room, uid):
"""
append a user to a room, if uid is not anonymous and the desired room is not one of the other users (they should stay private)
"""
if not uid=="anonymous" and not room in connection_auth_uid:
logger.log(lvl="DEBUG", msg="user %s entered room %s"%(uid,room))
palace[room].append(uid)
def _on_clientEvent(self, addr, payload):
"""
for every client event this function is called
internal events:
----------------
shutdown
ping
pong
"""
if addr in connection_auth_addr.keys():
uid = connection_auth_addr[addr]
else:
uid = "anonymous"
incoming = NucosIncomingMessage(payload)
msgs, error = incoming.msgs()
if error:
logger.log(lvl="WARNING", msg="error in incoming message: %s"%error)
for msg in msgs:
event = unicoding(msg["event"])
content = unicoding(msg["content"])
if 'room' in msg.keys():
room = msg["room"]
else:
room = ''
logger.log(lvl="INFO", msg="incoming clientEvent: %s | %s | %s"%(event,content,room), user=uid)
if self.udp:
for _event, funcs in self.event_callbacks.items():
if _event == "all":
for f in funcs:
f(content)
if _event == event:
for f in funcs:
f(content)
else:
continue
return
if room:
self.publish(room,event,content)
return
if event == "shutdown":
self.send(connection_sid[addr], "shutdown", "confirmed")
self.shutdown_process.append(uid)
elif event == "ping":
self.send(connection_sid[addr], "pong", "")
elif event == "pong":
msg = self.queue.get_topic("ping-server", timeout=10.0)
if not msg == "wait":
self.logger.log(lvl="ERROR", msg="pong received no ping send %s"%msg)
self.logger.log(lvl="INFO", msg="pong received")
self.queue.put_topic("pong-server", "done")
elif event == "subscripe":
self.join_room(content, uid)
else:
for _event, funcs in self.event_callbacks.items():
if _event == "all":
for f in funcs:
f(content)
if _event == event:
for f in funcs:
f(content)
else:
continue
def close(self):
queue.put("kill-server")
logger.log(lvl="WARNING", msg="server is forced to shut-down now")
cosid = copy.copy(connection_sid)
for addr,conn in cosid.items():
#gracefully:
self.send(conn, "shutdown", "now")
time.sleep(0.1)
cleanup(addr, conn)
self.srv.shutdown()
self.srv.server_close()
self._reinitialize()
def wait_for_auth(self):
start_time = time.time()
while True:
if connection_auth_uid:
return
else:
tau = time.time() - start_time
if tau > 5:
return
else:
time.sleep(0.1)
def get_conn(self, uid):
#uid = unicoding(uid)
start_time = time.time()
while True:
if uid in self.shutdown_process:
break
#print(connection_sid, connection_auth_uid,uid)
if uid in connection_auth_uid.keys():
return connection_sid[connection_auth_uid[uid]]
elif uid == "anonymous": #take the first which is connected
if connection_sid:
#print (connection_sid)
return list(connection_sid.values())[0]
else:
tau = time.time() - start_time
if tau > 5:
return None
else:
time.sleep(0.1)
def wait_for_answer(self, conn):
"""
blocking call for waiting for a client answer, which is connected via conn
"""
start_time = time.time()
while True:
tau = time.time()-start_time
if tau > 1.0:
logger.log(lvl="WARNING", msg="auth failed")
return
if answer_stack[conn]:
payload = answer_stack[conn].pop(0)
incoming = NucosIncomingMessage(payload)
msgs, error = incoming.msgs()
if error:
logger.log("incoming message error: %i"%error)
return None
#logger.log("from wait-loop: %s"%(msgs,))
if msgs:
return msgs[0]
def add_event_callback(self, event, handler):
"""
adds an external function or method as a callback for an incoming event
if event is "all" the callback will be called for every event
the argument of an callback is the content
def my_callback(content):
print(content)
Client.add_event_callback("should print content",my_callback)
"""
delegate = lambda x: handler(x)
self.event_callbacks[unicoding(event)].append(delegate)
def _auth_protocoll(self, addr, conn):
"""
definition of the authentification protocoll: start_auth, challenge_auth, auth_final
"""
global SHUTDOWN
############################################################
# step 1: start_auth event
self.in_auth_process.append(conn)
self._send(conn, "start_auth", "")
data = self.wait_for_answer(conn)
if data:
uid = data["content"]
else:
cleanup(addr,conn)
return
############################################################
# step 2: hand out the challenge and receive signature
challenge = self.auth_challenge(uid=uid)
self._send(conn, "challenge_auth", challenge) #TODO introduce an AUTH object with challenge creation
data = self.wait_for_answer(conn) #TODO define timeout!!!
if data:
signature = data["content"]
event = data["event"]
else:
cleanup(addr,conn)
return
if not event == "signature":
cleanup(addr,conn)
return
#if queue.get() == "kill-auth":
# #print("kill-auth")
# cleanup(addr,conn)
# return
############################################################
# step 3: check the signature and send a result to the client
if self.auth_final(uid=uid, signature=signature, challenge=challenge):
connection_auth_uid.update({uid:addr})
connection_auth_addr.update({addr:uid})
palace.update({uid:[uid]}) #create a room with the uid as name
self._send(conn, "auth_final", "success")
self.in_auth_process.remove(conn)
self._flush()
else:
self._send(conn, "auth_final", "failed")
self.in_auth_process.remove(conn)
cleanup(addr,conn)
#self.srv.server_close()
#self.srv.shutdown()
| en | 0.7116 | #palace = {} #disconnect-handler #connect-handler #receive-handler cleans all traces of connection data in the globals close the socket if close-flag is True, otherwise not #except: # pass #TODO remove singular rooms #print(connection_sid, connection_auth, palace) The server handler class #longest possible open connection without any message #append the socket connection #### # kill server logic: #kill all other threads in subsequence #### #only for not authenticated clients put the data in the wait-stack #close or not close ???? why ? A single connection Server: accepts only one connection #append the socket connection #only for not authenticated clients put the data in the wait-stack base NuCOS socket class on server side implements protocol on top of tcp/ip socket accepts either one or many clients (depends on single_server flag) and starts them in individual threads. do_auth is a function handler which accepts 3 arguments: uid, signature, challenge re-initialize a killed server start a non-blocking server #startup time for server send a ping event and wait for a pong (blocking call, since it expects the answer right away) the send command for a given connection conn, all other send commands must call send to prevent auth-protocoll confusion finalize the send process #raise Exception(logerror) send all pre-processed send commands during auth process send a message to all connected clients send a message to all clients in a room #conn = self.get_conn(room) append a user to a room, if uid is not anonymous and the desired room is not one of the other users (they should stay private) for every client event this function is called internal events: ---------------- shutdown ping pong #gracefully: #uid = unicoding(uid) #print(connection_sid, connection_auth_uid,uid) #take the first which is connected #print (connection_sid) blocking call for waiting for a client answer, which is connected via conn #logger.log("from wait-loop: %s"%(msgs,)) adds an external function or method as a callback for an incoming event if event is "all" the callback will be called for every event the argument of an callback is the content def my_callback(content): print(content) Client.add_event_callback("should print content",my_callback) definition of the authentification protocoll: start_auth, challenge_auth, auth_final ############################################################ # step 1: start_auth event ############################################################ # step 2: hand out the challenge and receive signature #TODO introduce an AUTH object with challenge creation #TODO define timeout!!! #if queue.get() == "kill-auth": # #print("kill-auth") # cleanup(addr,conn) # return ############################################################ # step 3: check the signature and send a result to the client #create a room with the uid as name #self.srv.server_close() #self.srv.shutdown() | 1.957857 | 2 |
tdc3/models/py/data/Util.py | TDC3Tool/TDC3 | 0 | 6622239 | from os import listdir
from os.path import isfile, join
from torch.utils.data import random_split
def json_files_in_dir(dir):
return [join(dir, f) for f in listdir(
dir) if isfile(join(dir, f)) and f.endswith(".json")]
def split_dataset(dataset, train_perc):
print("Splitting data into training and validation sets")
train_size = int(train_perc*len(dataset))
validate_size = len(dataset) - train_size
train_dataset, validate_dataset = random_split(
dataset, lengths=[train_size, validate_size])
print(f"{len(train_dataset)} training samples, {len(validate_dataset)} validation samples")
return train_dataset, validate_dataset
| from os import listdir
from os.path import isfile, join
from torch.utils.data import random_split
def json_files_in_dir(dir):
return [join(dir, f) for f in listdir(
dir) if isfile(join(dir, f)) and f.endswith(".json")]
def split_dataset(dataset, train_perc):
print("Splitting data into training and validation sets")
train_size = int(train_perc*len(dataset))
validate_size = len(dataset) - train_size
train_dataset, validate_dataset = random_split(
dataset, lengths=[train_size, validate_size])
print(f"{len(train_dataset)} training samples, {len(validate_dataset)} validation samples")
return train_dataset, validate_dataset
| none | 1 | 2.747796 | 3 | |
src/Crawler.py | dubzzz/py-linkedin-crawler | 0 | 6622240 | <reponame>dubzzz/py-linkedin-crawler<filename>src/Crawler.py
import sys
import json
import re
import requests
from collections import deque
class Crawler:
# Static attributes
PROFILE_URL = "https://www.linkedin.com/profile/view?id={id}"
CONTACTS_PER_PROFILE = 10
PROFILE_CONTACTS = "https://www.linkedin.com/profile/profile-v2-connections?id={id}&offset={offset}&count={per_profile}&distance=0&type=INITIAL"
def __init__(self, login, password):
# Open login page in order to avoid CSRF problems
print("Opening sign in page...")
login_page_info = requests.get("https://www.linkedin.com/uas/login?goback=&trk=hb_signin")
login_page = login_page_info.text.replace("\n", " ")
# Find the form
m = re.search(r"<form action=\"https://www.linkedin.com/uas/login-submit\" method=\"POST\" name=\"login\" novalidate=\"novalidate\" id=\"login\" class=\"ajax-form\" data-jsenabled=\"check\">(?P<content>.*)</form>", login_page)
if not m:
raise Exception("Missing login form")
inputs = re.findall(r"<input [^>]*name=\"(?P<name>[^\"]*)\" [^>]*value=\"(?P<value>[^\"]*)\"[^>]*>", m.group(1))
# Find relevant fields details
values = dict()
for input_field in inputs:
name = input_field[0]
value = input_field[1]
values[name] = value
# Add login/password in the fields
values["session_key"] = login
values["session_password"] = password
# Log in
print "\nSigning in..."
login_info = requests.post("https://www.linkedin.com/uas/login-submit", params=values, cookies=login_page_info.cookies)
# Save cookies for next calls
self.cookies = login_info.cookies
self.already_asked = set()
self.already_tested = set()
self.to_be_tested = deque()
self.crawl_from_connections_conditions = list()
self.targets_full_profile = list()
self.targets_short_profile = list()
def add_to_be_tested(self, profile_details):
"""
Add a profile in self.to_be_tested
Perform checks before adding anything
"""
if profile_details["id"] not in self.already_asked:
# Check Crawl from connections conditions
for condition in self.crawl_from_connections_conditions:
if not condition.is_crawlable(profile_details):
return False
# Update targets
for target in self.targets_short_profile:
target.check_if_targeted(profile_details)
# This profile is correct, and added to 'to_be_tested' queue
print "\t\t>", profile_details["details"]
self.already_asked.add(profile_details["id"])
self.to_be_tested.append(profile_details)
return True
else:
return False
def add(self, profile_id):
"""
Add a profile in self.to_be_tested
Perform checks before adding anything
"""
return self.add_to_be_tested({"id": int(profile_id), "details": "N.A.", "depth": 0})
def add_crawl_from_connections(self, condition):
"""
Add a condition to verify when adding new profile to be crawled for data
eg.: you only want to deal with profiles from company X
eg.: you only want to deal with profiles of people with an A in their full name
/!\ does not apply to profiles already in self.to_be_tested
"""
self.crawl_from_connections_conditions.append(condition)
def add_target_full_profile(self, target):
"""
You are looking for someone you met on a fair. You know the company, the first name.
Try to find its LinkedIn profile with this feature
full profile requires to go on the person's profile
"""
self.targets_full_profile.append(target)
def add_target_short_profile(self, target):
"""
Same as add_target_full_profile
Does not need full profile but just some details: headline, fullname..
"""
self.targets_short_profile.append(target)
def has_next(self):
""" Return True if it has at least one remaining profile id in self.to_be_tested """
return True if self.to_be_tested else False
def has_found_targets_full_profile(self):
for target in self.targets_full_profile:
if not target.has_found_target():
return False
return True
def has_found_targets_short_profile(self):
for target in self.targets_short_profile:
if not target.has_found_target():
return False
return True
def get_targets_full_profile(self):
return self.targets_full_profile
def get_targets_short_profile(self):
return self.targets_short_profile
def visit_next(self):
""" Crawl the webpages corresponding to the next profile """
new_contacts = 0
# Visit profile webpage
# Visited profile should receive a notification
# Get id
current = self.to_be_tested.popleft() # Remove in chronological order
print "\n[%d/%d] Scanning %s - %s..." % (len(self.already_tested)+1, len(self.already_asked), current["id"], current["details"])
self.already_tested.add(current["id"])
# HTTP request and update cookies for next calls
print "\tOpening profile: %s" % Crawler.PROFILE_URL.format(id=current["id"])
contact_profile_info = requests.get(Crawler.PROFILE_URL.format(id=current["id"]), cookies=self.cookies)
self.cookies = contact_profile_info.cookies
# Retrieve profile details
current = self.get_profile_details(current, contact_profile_info)
# Update targets
for target in self.targets_full_profile:
target.check_if_targeted(current)
# Retrive its contacts from JSON files
new_contacts += self.get_next_contacts(current)
print "\t%d new contacts" % new_contacts
def get_profile_details(self, current, profile_webpage):
# Find and analyse every json data included into the profile webpage
# it contains data concerning current user details, endorsers..
#with open("profile.html", "w+") as f:
# f.write(profile_webpage.text.encode("utf-8"))
jsons_current_info = re.findall(r"(?P<json>\{[^}^{]*\})", profile_webpage.text.encode("utf-8"))
json_objects = list()
for js_current in jsons_current_info:
try:
json_objects.append(json.loads(js_current))
except ValueError, e: # Invalid syntax
#print "\tERROR > JSON from profile: Invalid syntax"
continue
del jsons_current_info
# More user details
for js_tmp in json_objects:
# Check if the current JSON contains an user id
try:
memberID = int(js_tmp["memberID"])
except KeyError:
continue
except ValueError: # for int(.)
continue
except TypeError: # for int(.)
continue
# Check if this user id is the one in current
if memberID != current["id"]:
continue
# Add details to current user
for key, value in js_tmp.items():
if key not in current:
current[key] = value
#print "\t- %s: %s" % (unicode(key), unicode(value))
# Companies and Schools
for js_tmp in json_objects:
if "title_highlight" in js_tmp and "companyName" in js_tmp:
if "startdate_my" in js_tmp:
if "enddate_my" in js_tmp:
print "\t> Worked as '%s' for '%s', from %s until %s" % (js_tmp["title_highlight"], js_tmp["companyName"], js_tmp["startdate_my"], js_tmp["enddate_my"])
else:
print "\t> Worked as '%s' for '%s', from %s until <undefined>" % (js_tmp["title_highlight"], js_tmp["companyName"], js_tmp["startdate_my"])
else:
print "\t> Worked as '%s' for '%s'" % (js_tmp["title_highlight"], js_tmp["companyName"])
elif "educationId" in js_tmp and "schoolName" in js_tmp:
print "\t> Studied at %s" % js_tmp["schoolName"]
try:
print "\tScanning <%s> profile" % current["fullname"]
except KeyError:
pass
return current
def get_next_contacts(self, current):
"""
Retrieve contacts for current from LinkedIn JSON files
Called by visit_next()
"""
offset = 0
new_contacts = 0
current_contacts = []
num_contacts_in_last_query = Crawler.CONTACTS_PER_PROFILE
while num_contacts_in_last_query == Crawler.CONTACTS_PER_PROFILE:
# HTTP request and update cookies for next calls
print "\tGetting contacts list: %s" % Crawler.PROFILE_CONTACTS.format(id=current["id"], per_profile=Crawler.CONTACTS_PER_PROFILE, offset=offset)
contact_contacts_info = requests.get(Crawler.PROFILE_CONTACTS.format(id=current["id"], per_profile=Crawler.CONTACTS_PER_PROFILE, offset=offset), cookies=self.cookies)
self.cookies = contact_contacts_info.cookies
# Update offset
offset += Crawler.CONTACTS_PER_PROFILE
print "\tParsing data"
json_content = json.loads(contact_contacts_info.text.replace("\\\"", "")) # Quick trick to avoid problems with "
try:
possible_new_contacts = json_content["content"]["connections"]["connections"]
except KeyError, e:
print "\tERROR > JSON file: no such content.connections.connections"
#print "\tERROR > %s" % contact_contacts_info.text.encode('utf-8')
break
except ValueError, e:
print "\tERROR > JSON file: no such content.connections.connections"
#print "\tERROR > %s" % contact_contacts_info.text.encode('utf-8')
break
num_contacts_in_last_query = len(possible_new_contacts)
for sub_contact in possible_new_contacts:
# Get data from relevant fields
# On failure: continue to next contact
try:
headline = unicode(sub_contact["headline"]) # JSON can output: integers, None, strings, doubles..
memberID = int(sub_contact["memberID"])
distance = int(sub_contact["distance"])
full_name = unicode(sub_contact["fmt__full_name"])
except KeyError, e:
print "\tERROR > JSON file: contact details - %s" % e
#print "\tERROR > %s" % sub_contact.encode('utf-8')
continue
except ValueError, e:
print "\tERROR > JSON file: contact details - %s" % e
#print "\tERROR > %s" % sub_contact.encode('utf-8')
continue
except TypeError, e:
print "\tERROR > JSON file: contact details - %s" % e
#print "\tERROR > %s" % sub_contact.encode('utf-8')
continue
# Try to add the contact to the list to be tested
if self.add_to_be_tested({"id": memberID, "details": "%s [%s][distance=%d]" % (full_name, headline.lower(), distance), "fullname": full_name, "headline": headline, "depth": current["depth"] +1}):
new_contacts += 1
return new_contacts
| import sys
import json
import re
import requests
from collections import deque
class Crawler:
# Static attributes
PROFILE_URL = "https://www.linkedin.com/profile/view?id={id}"
CONTACTS_PER_PROFILE = 10
PROFILE_CONTACTS = "https://www.linkedin.com/profile/profile-v2-connections?id={id}&offset={offset}&count={per_profile}&distance=0&type=INITIAL"
def __init__(self, login, password):
# Open login page in order to avoid CSRF problems
print("Opening sign in page...")
login_page_info = requests.get("https://www.linkedin.com/uas/login?goback=&trk=hb_signin")
login_page = login_page_info.text.replace("\n", " ")
# Find the form
m = re.search(r"<form action=\"https://www.linkedin.com/uas/login-submit\" method=\"POST\" name=\"login\" novalidate=\"novalidate\" id=\"login\" class=\"ajax-form\" data-jsenabled=\"check\">(?P<content>.*)</form>", login_page)
if not m:
raise Exception("Missing login form")
inputs = re.findall(r"<input [^>]*name=\"(?P<name>[^\"]*)\" [^>]*value=\"(?P<value>[^\"]*)\"[^>]*>", m.group(1))
# Find relevant fields details
values = dict()
for input_field in inputs:
name = input_field[0]
value = input_field[1]
values[name] = value
# Add login/password in the fields
values["session_key"] = login
values["session_password"] = password
# Log in
print "\nSigning in..."
login_info = requests.post("https://www.linkedin.com/uas/login-submit", params=values, cookies=login_page_info.cookies)
# Save cookies for next calls
self.cookies = login_info.cookies
self.already_asked = set()
self.already_tested = set()
self.to_be_tested = deque()
self.crawl_from_connections_conditions = list()
self.targets_full_profile = list()
self.targets_short_profile = list()
def add_to_be_tested(self, profile_details):
"""
Add a profile in self.to_be_tested
Perform checks before adding anything
"""
if profile_details["id"] not in self.already_asked:
# Check Crawl from connections conditions
for condition in self.crawl_from_connections_conditions:
if not condition.is_crawlable(profile_details):
return False
# Update targets
for target in self.targets_short_profile:
target.check_if_targeted(profile_details)
# This profile is correct, and added to 'to_be_tested' queue
print "\t\t>", profile_details["details"]
self.already_asked.add(profile_details["id"])
self.to_be_tested.append(profile_details)
return True
else:
return False
def add(self, profile_id):
"""
Add a profile in self.to_be_tested
Perform checks before adding anything
"""
return self.add_to_be_tested({"id": int(profile_id), "details": "N.A.", "depth": 0})
def add_crawl_from_connections(self, condition):
"""
Add a condition to verify when adding new profile to be crawled for data
eg.: you only want to deal with profiles from company X
eg.: you only want to deal with profiles of people with an A in their full name
/!\ does not apply to profiles already in self.to_be_tested
"""
self.crawl_from_connections_conditions.append(condition)
def add_target_full_profile(self, target):
"""
You are looking for someone you met on a fair. You know the company, the first name.
Try to find its LinkedIn profile with this feature
full profile requires to go on the person's profile
"""
self.targets_full_profile.append(target)
def add_target_short_profile(self, target):
"""
Same as add_target_full_profile
Does not need full profile but just some details: headline, fullname..
"""
self.targets_short_profile.append(target)
def has_next(self):
""" Return True if it has at least one remaining profile id in self.to_be_tested """
return True if self.to_be_tested else False
def has_found_targets_full_profile(self):
for target in self.targets_full_profile:
if not target.has_found_target():
return False
return True
def has_found_targets_short_profile(self):
for target in self.targets_short_profile:
if not target.has_found_target():
return False
return True
def get_targets_full_profile(self):
return self.targets_full_profile
def get_targets_short_profile(self):
return self.targets_short_profile
def visit_next(self):
""" Crawl the webpages corresponding to the next profile """
new_contacts = 0
# Visit profile webpage
# Visited profile should receive a notification
# Get id
current = self.to_be_tested.popleft() # Remove in chronological order
print "\n[%d/%d] Scanning %s - %s..." % (len(self.already_tested)+1, len(self.already_asked), current["id"], current["details"])
self.already_tested.add(current["id"])
# HTTP request and update cookies for next calls
print "\tOpening profile: %s" % Crawler.PROFILE_URL.format(id=current["id"])
contact_profile_info = requests.get(Crawler.PROFILE_URL.format(id=current["id"]), cookies=self.cookies)
self.cookies = contact_profile_info.cookies
# Retrieve profile details
current = self.get_profile_details(current, contact_profile_info)
# Update targets
for target in self.targets_full_profile:
target.check_if_targeted(current)
# Retrive its contacts from JSON files
new_contacts += self.get_next_contacts(current)
print "\t%d new contacts" % new_contacts
def get_profile_details(self, current, profile_webpage):
# Find and analyse every json data included into the profile webpage
# it contains data concerning current user details, endorsers..
#with open("profile.html", "w+") as f:
# f.write(profile_webpage.text.encode("utf-8"))
jsons_current_info = re.findall(r"(?P<json>\{[^}^{]*\})", profile_webpage.text.encode("utf-8"))
json_objects = list()
for js_current in jsons_current_info:
try:
json_objects.append(json.loads(js_current))
except ValueError, e: # Invalid syntax
#print "\tERROR > JSON from profile: Invalid syntax"
continue
del jsons_current_info
# More user details
for js_tmp in json_objects:
# Check if the current JSON contains an user id
try:
memberID = int(js_tmp["memberID"])
except KeyError:
continue
except ValueError: # for int(.)
continue
except TypeError: # for int(.)
continue
# Check if this user id is the one in current
if memberID != current["id"]:
continue
# Add details to current user
for key, value in js_tmp.items():
if key not in current:
current[key] = value
#print "\t- %s: %s" % (unicode(key), unicode(value))
# Companies and Schools
for js_tmp in json_objects:
if "title_highlight" in js_tmp and "companyName" in js_tmp:
if "startdate_my" in js_tmp:
if "enddate_my" in js_tmp:
print "\t> Worked as '%s' for '%s', from %s until %s" % (js_tmp["title_highlight"], js_tmp["companyName"], js_tmp["startdate_my"], js_tmp["enddate_my"])
else:
print "\t> Worked as '%s' for '%s', from %s until <undefined>" % (js_tmp["title_highlight"], js_tmp["companyName"], js_tmp["startdate_my"])
else:
print "\t> Worked as '%s' for '%s'" % (js_tmp["title_highlight"], js_tmp["companyName"])
elif "educationId" in js_tmp and "schoolName" in js_tmp:
print "\t> Studied at %s" % js_tmp["schoolName"]
try:
print "\tScanning <%s> profile" % current["fullname"]
except KeyError:
pass
return current
def get_next_contacts(self, current):
"""
Retrieve contacts for current from LinkedIn JSON files
Called by visit_next()
"""
offset = 0
new_contacts = 0
current_contacts = []
num_contacts_in_last_query = Crawler.CONTACTS_PER_PROFILE
while num_contacts_in_last_query == Crawler.CONTACTS_PER_PROFILE:
# HTTP request and update cookies for next calls
print "\tGetting contacts list: %s" % Crawler.PROFILE_CONTACTS.format(id=current["id"], per_profile=Crawler.CONTACTS_PER_PROFILE, offset=offset)
contact_contacts_info = requests.get(Crawler.PROFILE_CONTACTS.format(id=current["id"], per_profile=Crawler.CONTACTS_PER_PROFILE, offset=offset), cookies=self.cookies)
self.cookies = contact_contacts_info.cookies
# Update offset
offset += Crawler.CONTACTS_PER_PROFILE
print "\tParsing data"
json_content = json.loads(contact_contacts_info.text.replace("\\\"", "")) # Quick trick to avoid problems with "
try:
possible_new_contacts = json_content["content"]["connections"]["connections"]
except KeyError, e:
print "\tERROR > JSON file: no such content.connections.connections"
#print "\tERROR > %s" % contact_contacts_info.text.encode('utf-8')
break
except ValueError, e:
print "\tERROR > JSON file: no such content.connections.connections"
#print "\tERROR > %s" % contact_contacts_info.text.encode('utf-8')
break
num_contacts_in_last_query = len(possible_new_contacts)
for sub_contact in possible_new_contacts:
# Get data from relevant fields
# On failure: continue to next contact
try:
headline = unicode(sub_contact["headline"]) # JSON can output: integers, None, strings, doubles..
memberID = int(sub_contact["memberID"])
distance = int(sub_contact["distance"])
full_name = unicode(sub_contact["fmt__full_name"])
except KeyError, e:
print "\tERROR > JSON file: contact details - %s" % e
#print "\tERROR > %s" % sub_contact.encode('utf-8')
continue
except ValueError, e:
print "\tERROR > JSON file: contact details - %s" % e
#print "\tERROR > %s" % sub_contact.encode('utf-8')
continue
except TypeError, e:
print "\tERROR > JSON file: contact details - %s" % e
#print "\tERROR > %s" % sub_contact.encode('utf-8')
continue
# Try to add the contact to the list to be tested
if self.add_to_be_tested({"id": memberID, "details": "%s [%s][distance=%d]" % (full_name, headline.lower(), distance), "fullname": full_name, "headline": headline, "depth": current["depth"] +1}):
new_contacts += 1
return new_contacts | en | 0.812948 | # Static attributes # Open login page in order to avoid CSRF problems # Find the form # Find relevant fields details # Add login/password in the fields # Log in # Save cookies for next calls Add a profile in self.to_be_tested Perform checks before adding anything # Check Crawl from connections conditions # Update targets # This profile is correct, and added to 'to_be_tested' queue Add a profile in self.to_be_tested Perform checks before adding anything Add a condition to verify when adding new profile to be crawled for data eg.: you only want to deal with profiles from company X eg.: you only want to deal with profiles of people with an A in their full name /!\ does not apply to profiles already in self.to_be_tested You are looking for someone you met on a fair. You know the company, the first name. Try to find its LinkedIn profile with this feature full profile requires to go on the person's profile Same as add_target_full_profile Does not need full profile but just some details: headline, fullname.. Return True if it has at least one remaining profile id in self.to_be_tested Crawl the webpages corresponding to the next profile # Visit profile webpage # Visited profile should receive a notification # Get id # Remove in chronological order # HTTP request and update cookies for next calls # Retrieve profile details # Update targets # Retrive its contacts from JSON files # Find and analyse every json data included into the profile webpage # it contains data concerning current user details, endorsers.. #with open("profile.html", "w+") as f: # f.write(profile_webpage.text.encode("utf-8")) # Invalid syntax #print "\tERROR > JSON from profile: Invalid syntax" # More user details # Check if the current JSON contains an user id # for int(.) # for int(.) # Check if this user id is the one in current # Add details to current user #print "\t- %s: %s" % (unicode(key), unicode(value)) # Companies and Schools Retrieve contacts for current from LinkedIn JSON files Called by visit_next() # HTTP request and update cookies for next calls # Update offset # Quick trick to avoid problems with " #print "\tERROR > %s" % contact_contacts_info.text.encode('utf-8') #print "\tERROR > %s" % contact_contacts_info.text.encode('utf-8') # Get data from relevant fields # On failure: continue to next contact # JSON can output: integers, None, strings, doubles.. #print "\tERROR > %s" % sub_contact.encode('utf-8') #print "\tERROR > %s" % sub_contact.encode('utf-8') #print "\tERROR > %s" % sub_contact.encode('utf-8') # Try to add the contact to the list to be tested | 2.791581 | 3 |
ophelia/reactrole/dm_lock.py | zanzivyr/Ophelia | 2 | 6622241 | """
DM Queue Module.
Pylint's too few public methods is disabled here since we're not really
using DMLock for a purpose that something else might satisfy better,
such as dataclasses.
The implementation here can be hard to follow, and while there is a
variable inside asyncio.Lock that would help with this (_waiters), that
is a private attribute, and hence we have to make do with an additional
waiting room.
Also this file has more comments than actual code so that's fun.
"""
from asyncio import Lock
from typing import Any, Callable, Dict, Set
class AbortQueue(Exception):
"""When all queued calls are to be aborted."""
# pylint: disable=too-few-public-methods
class DMLock:
"""
DM Lock Manager.
To prevent users from receiving multiple DMs at the same time, this
module queues DM role management tasks per user.
"""
__slots__ = [
"waiting",
"executing",
"aborted",
"waiting_lock",
"executing_lock"
]
def __init__(self) -> None:
"""Initializer for the DMLock class."""
self.waiting: Dict[int, Lock] = {}
self.executing: Dict[int, Lock] = {}
self.aborted: Set[int] = set()
self.waiting_lock = Lock()
self.executing_lock = Lock()
async def queue_call(
self,
call: Callable,
key: int,
*args,
**kwargs
) -> Any:
"""
Enqueue an async call for a member.
:param call: Callable to call once the key lock is free
:param key: Key of FIFO queue
"""
returner = None
# Check if the queue is in abort mode
if key in self.aborted:
return
# Obtain the waiting room lock
async with self.waiting_lock:
key_wait_lock = self.waiting.setdefault(key, Lock())
# Enter the waiting room.
# If someone else is in the waiting room, that means there's 1
# or 2 tasks in front of us, and we need to wait for them all
# to at least start executing so that we can enter the waiting
# room.
await key_wait_lock.acquire()
self_waiting = True
try:
# Obtain the execution lock
async with self.executing_lock:
key_execute_lock = self.executing.setdefault(key, Lock())
# Start execution, after previous call is done.
# At this point, the waiting room lock is held by us and is
# locked, but the execusion lock might still be held by
# someone else.
async with key_execute_lock:
# First thing to do during execusion is to release the
# waiting room for the next person in line so that they
# can wait for us to be done.
key_wait_lock.release()
self_waiting = False
# We check again if the queue is in abort mode.
if key not in self.aborted:
try:
returner = await call(*args, **kwargs)
except AbortQueue:
self.aborted.add(key)
finally:
# No matter what happens, we want to exit the waiting room
# ourselves. We check if the waiting room is locked, and if
# we ourselves are waiting (which we keep track of using
# the self_waiting bool). If we are not the ones waiting
# and someone else is in the waiting room, we shouldn't
# disturb them.
if key_wait_lock.locked() and self_waiting:
key_wait_lock.release()
# Now we check if the waiting room is empty or full,
# regardless of who it is inside. We know that we aren't
# the ones inside, so it has to be someone else if it is
# locked.
removed_waiting = False
async with self.waiting_lock:
if not key_wait_lock.locked():
# If it is empty, that means we can safely remove
# the waiting room, because no one is using it.
await self.waiting.pop(key, None)
removed_waiting = True
# If we removed the waiting room, there would only be the
# execution lock left now. We were just in it but we've
# since left - there can only be a maximum
if removed_waiting:
async with self.executing_lock:
if not key_execute_lock.locked():
# If the execution lock is empty, then we are
# pretty sure that we can remove it. We've
# already removed the waiting room, and since
# we can't be stuck between the waiting room
# and the execution (without being in either),
# that edge case is impossible.
await self.executing.pop(key, None)
self.aborted.discard(key)
return returner
| """
DM Queue Module.
Pylint's too few public methods is disabled here since we're not really
using DMLock for a purpose that something else might satisfy better,
such as dataclasses.
The implementation here can be hard to follow, and while there is a
variable inside asyncio.Lock that would help with this (_waiters), that
is a private attribute, and hence we have to make do with an additional
waiting room.
Also this file has more comments than actual code so that's fun.
"""
from asyncio import Lock
from typing import Any, Callable, Dict, Set
class AbortQueue(Exception):
"""When all queued calls are to be aborted."""
# pylint: disable=too-few-public-methods
class DMLock:
"""
DM Lock Manager.
To prevent users from receiving multiple DMs at the same time, this
module queues DM role management tasks per user.
"""
__slots__ = [
"waiting",
"executing",
"aborted",
"waiting_lock",
"executing_lock"
]
def __init__(self) -> None:
"""Initializer for the DMLock class."""
self.waiting: Dict[int, Lock] = {}
self.executing: Dict[int, Lock] = {}
self.aborted: Set[int] = set()
self.waiting_lock = Lock()
self.executing_lock = Lock()
async def queue_call(
self,
call: Callable,
key: int,
*args,
**kwargs
) -> Any:
"""
Enqueue an async call for a member.
:param call: Callable to call once the key lock is free
:param key: Key of FIFO queue
"""
returner = None
# Check if the queue is in abort mode
if key in self.aborted:
return
# Obtain the waiting room lock
async with self.waiting_lock:
key_wait_lock = self.waiting.setdefault(key, Lock())
# Enter the waiting room.
# If someone else is in the waiting room, that means there's 1
# or 2 tasks in front of us, and we need to wait for them all
# to at least start executing so that we can enter the waiting
# room.
await key_wait_lock.acquire()
self_waiting = True
try:
# Obtain the execution lock
async with self.executing_lock:
key_execute_lock = self.executing.setdefault(key, Lock())
# Start execution, after previous call is done.
# At this point, the waiting room lock is held by us and is
# locked, but the execusion lock might still be held by
# someone else.
async with key_execute_lock:
# First thing to do during execusion is to release the
# waiting room for the next person in line so that they
# can wait for us to be done.
key_wait_lock.release()
self_waiting = False
# We check again if the queue is in abort mode.
if key not in self.aborted:
try:
returner = await call(*args, **kwargs)
except AbortQueue:
self.aborted.add(key)
finally:
# No matter what happens, we want to exit the waiting room
# ourselves. We check if the waiting room is locked, and if
# we ourselves are waiting (which we keep track of using
# the self_waiting bool). If we are not the ones waiting
# and someone else is in the waiting room, we shouldn't
# disturb them.
if key_wait_lock.locked() and self_waiting:
key_wait_lock.release()
# Now we check if the waiting room is empty or full,
# regardless of who it is inside. We know that we aren't
# the ones inside, so it has to be someone else if it is
# locked.
removed_waiting = False
async with self.waiting_lock:
if not key_wait_lock.locked():
# If it is empty, that means we can safely remove
# the waiting room, because no one is using it.
await self.waiting.pop(key, None)
removed_waiting = True
# If we removed the waiting room, there would only be the
# execution lock left now. We were just in it but we've
# since left - there can only be a maximum
if removed_waiting:
async with self.executing_lock:
if not key_execute_lock.locked():
# If the execution lock is empty, then we are
# pretty sure that we can remove it. We've
# already removed the waiting room, and since
# we can't be stuck between the waiting room
# and the execution (without being in either),
# that edge case is impossible.
await self.executing.pop(key, None)
self.aborted.discard(key)
return returner
| en | 0.958774 | DM Queue Module. Pylint's too few public methods is disabled here since we're not really using DMLock for a purpose that something else might satisfy better, such as dataclasses. The implementation here can be hard to follow, and while there is a variable inside asyncio.Lock that would help with this (_waiters), that is a private attribute, and hence we have to make do with an additional waiting room. Also this file has more comments than actual code so that's fun. When all queued calls are to be aborted. # pylint: disable=too-few-public-methods DM Lock Manager. To prevent users from receiving multiple DMs at the same time, this module queues DM role management tasks per user. Initializer for the DMLock class. Enqueue an async call for a member. :param call: Callable to call once the key lock is free :param key: Key of FIFO queue # Check if the queue is in abort mode # Obtain the waiting room lock # Enter the waiting room. # If someone else is in the waiting room, that means there's 1 # or 2 tasks in front of us, and we need to wait for them all # to at least start executing so that we can enter the waiting # room. # Obtain the execution lock # Start execution, after previous call is done. # At this point, the waiting room lock is held by us and is # locked, but the execusion lock might still be held by # someone else. # First thing to do during execusion is to release the # waiting room for the next person in line so that they # can wait for us to be done. # We check again if the queue is in abort mode. # No matter what happens, we want to exit the waiting room # ourselves. We check if the waiting room is locked, and if # we ourselves are waiting (which we keep track of using # the self_waiting bool). If we are not the ones waiting # and someone else is in the waiting room, we shouldn't # disturb them. # Now we check if the waiting room is empty or full, # regardless of who it is inside. We know that we aren't # the ones inside, so it has to be someone else if it is # locked. # If it is empty, that means we can safely remove # the waiting room, because no one is using it. # If we removed the waiting room, there would only be the # execution lock left now. We were just in it but we've # since left - there can only be a maximum # If the execution lock is empty, then we are # pretty sure that we can remove it. We've # already removed the waiting room, and since # we can't be stuck between the waiting room # and the execution (without being in either), # that edge case is impossible. | 2.873784 | 3 |
setup.py | janelia-pypi/lickport_array_python | 0 | 6622242 | import pathlib
import codecs
import setuptools
here = pathlib.Path(__file__).resolve().parent
with codecs.open(here.joinpath('DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='lickport_array_interface',
use_scm_version = True,
setup_requires=['setuptools_scm'],
description='Lickport array interface.',
long_description=long_description,
url='https://github.com/janelia-pypi/lickport_array_interface_python',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
],
keywords='',
packages=setuptools.find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=[
'modular_client',
],
entry_points={
'console_scripts': [
'lai = lickport_array_interface.lickport_array_interface:main',
],
},
)
| import pathlib
import codecs
import setuptools
here = pathlib.Path(__file__).resolve().parent
with codecs.open(here.joinpath('DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='lickport_array_interface',
use_scm_version = True,
setup_requires=['setuptools_scm'],
description='Lickport array interface.',
long_description=long_description,
url='https://github.com/janelia-pypi/lickport_array_interface_python',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
],
keywords='',
packages=setuptools.find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=[
'modular_client',
],
entry_points={
'console_scripts': [
'lai = lickport_array_interface.lickport_array_interface:main',
],
},
)
| none | 1 | 1.297128 | 1 | |
example.py | dogeplusplus/sandbox | 0 | 6622243 | import torch
import torch.nn as nn
import torch.nn.functional as F
b = 20
t = 30
k = 10
x = torch.ones((b,t,k))
raw_weights = torch.bmm(x, x.transpose(1,2))
weights = F.softmax(raw_weights, dim=2)
y = torch.bmm(weights, x)
class SelfAttention(nn.Module):
def __init__(self, k, heads=8):
super().__init__()
self.k = k
self.heads = heads
self.to_keys = nn.Linear(k, k*heads, bias=False)
self.to_queries = nn.Linear(k, k*heads, bias=False)
self.to_values = nn.Linear(k, k*heads, bias=False)
self.unify_heads = nn.Linear(heads * k, k)
def forward(self, x):
b, t, k = x.size()
h = self.heads
queries = self.to_queries(x).view(b, t, h, k)
keys = self.to_keys(x).view(b, t, h, k)
values = self.to_values(x).view(b, t, h, k)
keys = keys.transpose(1, 2).contiguous().view(b * h, t, k)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, k)
values = values.transpose(1, 2).contiguous().view(b * h, t, k)
queries = queries / (k ** (1/4))
keys = keys / (k ** (1/4))
dot = torch.bmm(queries, keys.transpose(1, 2))
dot = F.softmax(dot, dim=2)
out = torch.bmm(dot, values).view(b, h, t, k)
out = out.transpose(1, 2).contiguous().view(b, t, h*k)
return self.unify_heads(out)
layer = SelfAttention(k)
out = layer(x)
class TransformerBlock(nn.Module):
def __init__(self, k, heads):
super().__init__()
self.attention = SelfAttention(k, heads=heads)
self.norm1 = nn.LayerNorm(k)
self.norm2 = nn.LayerNorm(k)
self.ff = nn.Sequential(
nn.Linear(k, 4*k),
nn.ReLU(),
nn.Linear(4*k, k)
)
def forward(self, x):
attended = self.attention(x)
x = self.norm1(attended + x)
fedforward = self.ff(x)
return self.norm2(fedforward + x)
class Transformer(nn.Module):
def __init__(self, k, heads, depth, seq_length, num_tokens, num_classes):
super().__init__()
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_tokens, k)
self.pos_emb = nn.Embedding(seq_length, k)
tblocks = []
for i in range(depth):
tblocks.append(TransformerBlock(k, heads))
self.tblocks = nn.Sequential(*tblocks)
self.to_probs = nn.Linear(k, num_classes)
def forward(self, x):
tokens = self.token_emb(x)
b, t, k = tokens.size()
positions = torch.arange(t)
positions = self.pos_emb(positions)[None, :, :].expand(b, t, k)
x = tokens + positions
x = self.tblocks(x)
x = self.to_probs(x.mean(dim=1))
return F.log_softmax(x, dim=1)
| import torch
import torch.nn as nn
import torch.nn.functional as F
b = 20
t = 30
k = 10
x = torch.ones((b,t,k))
raw_weights = torch.bmm(x, x.transpose(1,2))
weights = F.softmax(raw_weights, dim=2)
y = torch.bmm(weights, x)
class SelfAttention(nn.Module):
def __init__(self, k, heads=8):
super().__init__()
self.k = k
self.heads = heads
self.to_keys = nn.Linear(k, k*heads, bias=False)
self.to_queries = nn.Linear(k, k*heads, bias=False)
self.to_values = nn.Linear(k, k*heads, bias=False)
self.unify_heads = nn.Linear(heads * k, k)
def forward(self, x):
b, t, k = x.size()
h = self.heads
queries = self.to_queries(x).view(b, t, h, k)
keys = self.to_keys(x).view(b, t, h, k)
values = self.to_values(x).view(b, t, h, k)
keys = keys.transpose(1, 2).contiguous().view(b * h, t, k)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, k)
values = values.transpose(1, 2).contiguous().view(b * h, t, k)
queries = queries / (k ** (1/4))
keys = keys / (k ** (1/4))
dot = torch.bmm(queries, keys.transpose(1, 2))
dot = F.softmax(dot, dim=2)
out = torch.bmm(dot, values).view(b, h, t, k)
out = out.transpose(1, 2).contiguous().view(b, t, h*k)
return self.unify_heads(out)
layer = SelfAttention(k)
out = layer(x)
class TransformerBlock(nn.Module):
def __init__(self, k, heads):
super().__init__()
self.attention = SelfAttention(k, heads=heads)
self.norm1 = nn.LayerNorm(k)
self.norm2 = nn.LayerNorm(k)
self.ff = nn.Sequential(
nn.Linear(k, 4*k),
nn.ReLU(),
nn.Linear(4*k, k)
)
def forward(self, x):
attended = self.attention(x)
x = self.norm1(attended + x)
fedforward = self.ff(x)
return self.norm2(fedforward + x)
class Transformer(nn.Module):
def __init__(self, k, heads, depth, seq_length, num_tokens, num_classes):
super().__init__()
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_tokens, k)
self.pos_emb = nn.Embedding(seq_length, k)
tblocks = []
for i in range(depth):
tblocks.append(TransformerBlock(k, heads))
self.tblocks = nn.Sequential(*tblocks)
self.to_probs = nn.Linear(k, num_classes)
def forward(self, x):
tokens = self.token_emb(x)
b, t, k = tokens.size()
positions = torch.arange(t)
positions = self.pos_emb(positions)[None, :, :].expand(b, t, k)
x = tokens + positions
x = self.tblocks(x)
x = self.to_probs(x.mean(dim=1))
return F.log_softmax(x, dim=1)
| none | 1 | 2.633807 | 3 | |
wifid.py | mertkoc/wifidirect_linux | 1 | 6622244 | <gh_stars>1-10
import os
import time
def _copy_file_no_overwriting(src, dst):
import shutil
if not os.path.isfile(dst):
print('copying... ', dst)
shutil.copyfile(src, dst)
def setup_conf_files():
"""Setup configuration files that are needed to run start_as_go~().
:return: None
"""
dir_ = os.path.dirname(__file__) + '/conf/' # a directory those .conf files are in
_copy_file_no_overwriting(os.path.abspath(dir_ + 'dhcpd.conf'), os.path.abspath('/etc/dhcp/dhcpd.conf'))
_copy_file_no_overwriting(os.path.abspath(dir_ + 'udhcpd.conf'), os.path.abspath('/etc/udhcpd.conf'))
_copy_file_no_overwriting(os.path.abspath(dir_ + 'wpa_supplicant.conf'), os.path.abspath('/etc/wpa_supplicant.conf'))
def _system_critical(command):
if os.system(command) is not 0:
raise ConnectionError('Failed to configure the WiFi Direct')
def start_as_go_fedora(str_interface='wls35u1', str_static_ip_addr_for_p2p='192.168.1.2'):
"""Starts a Wifi direct interface as GO (Tested on Fedora 26)
1. Destroy dhcpd and a wifi connection. (It usually takes some little time to kill off a wifi thing so wait for couple seconds...)
2. Bring up a wifi p2p(direct) interface.
3. Wait for incoming p2p connection of clients, starting dhcpd (dhcpd is a DHCP server).
:param str_interface: A name of your wifi interface.
:param str_static_ip_addr_for_p2p: A static ip address to be given to your p2p interface. (This is only for the server(GO). The client should use a DHCP IP.)
:return: None
"""
os.system('sudo killall dhcpd') # kill current dhcpd running if there is any
os.system('sudo wpa_cli -i ' + str_interface + ' terminate -B') # this will down your interface
os.system('sudo wpa_cli -i p2p-' + str_interface + '-0 terminate -B') # kill p2p interface as well
time.sleep(2) # wait for the interface to go down
os.system('echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward') # enabling kernel ip forwarding (routing) in Linux
# os.system('echo "ctrl_interface=/var/run/wpa_supplicant\nupdate_config=1" | sudo tee /etc/wpa_supplicant.conf')
_system_critical('sudo wpa_supplicant -d -Dnl80211 -c /etc/wpa_supplicant.conf -i' + str_interface + ' -B') # this brings up an interface
_system_critical('sudo wpa_cli -i' + str_interface + ' p2p_group_add')
# p2p_group_add: Become an autonomous GO (this creates a p2p interface)
_system_critical('sudo ifconfig p2p-' + str_interface + '-0 ' + str_static_ip_addr_for_p2p) # assign a static ip to your p2p interface
_system_critical('sudo wpa_cli -i p2p-' + str_interface + '-0 p2p_find') # p2p_find: Enables discovery
os.system('sudo wpa_cli -ip2p-' + str_interface + '-0 p2p_peers')
# p2p_peers: Shows list of discovered peers (not necessary)
_system_critical('sudo wpa_cli -ip2p-' + str_interface + '-0 wps_pbc')
# wps_pbc: pushbutton for GO WPS authorization to accept incoming connections (When devices try to connect to GO)
_system_critical('sudo dhcpd') # start dhcpd
def start_as_go_ubuntu(str_interface='wlan0', str_static_ip_addr_for_p2p='192.168.1.2'):
"""Starts a Wifi direct interface as GO (Tested on Ubuntu 16.04)
Mostly same as the one in Fedora, except that Ubuntu uses udhcpd (which is a BusyBox utility) instead of dhcpd.
:param str_interface: A name of your wifi interface.
:param str_static_ip_addr_for_p2p: A static ip address to be given to your p2p interface. (This is only for the server(GO). The client should use a DHCP IP.)
:return: None
"""
os.system('sudo killall udhcpd')
os.system('sudo wpa_cli -i ' + str_interface + ' terminate -B')
os.system('sudo wpa_cli -i p2p-' + str_interface + '-0 terminate -B')
time.sleep(1)
os.system('echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward')
# os.system('echo "ctrl_interface=/var/run/wpa_supplicant\nupdate_config=1" | sudo tee /etc/wpa_supplicant.conf')
_system_critical('sudo wpa_supplicant -d -Dnl80211 -c /etc/wpa_supplicant.conf -i' + str_interface + ' -B')
_system_critical('sudo wpa_cli -i' + str_interface + ' p2p_group_add')
_system_critical('sudo ifconfig p2p-' + str_interface + '-0 ' + str_static_ip_addr_for_p2p)
_system_critical('sudo wpa_cli -i p2p-' + str_interface + '-0 p2p_find')
os.system('sudo wpa_cli -ip2p-' + str_interface + '-0 p2p_peers')
_system_critical('sudo wpa_cli -ip2p-' + str_interface + '-0 wps_pbc')
_system_critical('sudo udhcpd /etc/udhcpd.conf &')
if __name__ == "__main__":
# example
import wifid
wifid.setup_conf_files()
try:
wifid.start_as_go_ubuntu()
except ConnectionError:
print('ConnectionError from wifid')
| import os
import time
def _copy_file_no_overwriting(src, dst):
import shutil
if not os.path.isfile(dst):
print('copying... ', dst)
shutil.copyfile(src, dst)
def setup_conf_files():
"""Setup configuration files that are needed to run start_as_go~().
:return: None
"""
dir_ = os.path.dirname(__file__) + '/conf/' # a directory those .conf files are in
_copy_file_no_overwriting(os.path.abspath(dir_ + 'dhcpd.conf'), os.path.abspath('/etc/dhcp/dhcpd.conf'))
_copy_file_no_overwriting(os.path.abspath(dir_ + 'udhcpd.conf'), os.path.abspath('/etc/udhcpd.conf'))
_copy_file_no_overwriting(os.path.abspath(dir_ + 'wpa_supplicant.conf'), os.path.abspath('/etc/wpa_supplicant.conf'))
def _system_critical(command):
if os.system(command) is not 0:
raise ConnectionError('Failed to configure the WiFi Direct')
def start_as_go_fedora(str_interface='wls35u1', str_static_ip_addr_for_p2p='192.168.1.2'):
"""Starts a Wifi direct interface as GO (Tested on Fedora 26)
1. Destroy dhcpd and a wifi connection. (It usually takes some little time to kill off a wifi thing so wait for couple seconds...)
2. Bring up a wifi p2p(direct) interface.
3. Wait for incoming p2p connection of clients, starting dhcpd (dhcpd is a DHCP server).
:param str_interface: A name of your wifi interface.
:param str_static_ip_addr_for_p2p: A static ip address to be given to your p2p interface. (This is only for the server(GO). The client should use a DHCP IP.)
:return: None
"""
os.system('sudo killall dhcpd') # kill current dhcpd running if there is any
os.system('sudo wpa_cli -i ' + str_interface + ' terminate -B') # this will down your interface
os.system('sudo wpa_cli -i p2p-' + str_interface + '-0 terminate -B') # kill p2p interface as well
time.sleep(2) # wait for the interface to go down
os.system('echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward') # enabling kernel ip forwarding (routing) in Linux
# os.system('echo "ctrl_interface=/var/run/wpa_supplicant\nupdate_config=1" | sudo tee /etc/wpa_supplicant.conf')
_system_critical('sudo wpa_supplicant -d -Dnl80211 -c /etc/wpa_supplicant.conf -i' + str_interface + ' -B') # this brings up an interface
_system_critical('sudo wpa_cli -i' + str_interface + ' p2p_group_add')
# p2p_group_add: Become an autonomous GO (this creates a p2p interface)
_system_critical('sudo ifconfig p2p-' + str_interface + '-0 ' + str_static_ip_addr_for_p2p) # assign a static ip to your p2p interface
_system_critical('sudo wpa_cli -i p2p-' + str_interface + '-0 p2p_find') # p2p_find: Enables discovery
os.system('sudo wpa_cli -ip2p-' + str_interface + '-0 p2p_peers')
# p2p_peers: Shows list of discovered peers (not necessary)
_system_critical('sudo wpa_cli -ip2p-' + str_interface + '-0 wps_pbc')
# wps_pbc: pushbutton for GO WPS authorization to accept incoming connections (When devices try to connect to GO)
_system_critical('sudo dhcpd') # start dhcpd
def start_as_go_ubuntu(str_interface='wlan0', str_static_ip_addr_for_p2p='192.168.1.2'):
"""Starts a Wifi direct interface as GO (Tested on Ubuntu 16.04)
Mostly same as the one in Fedora, except that Ubuntu uses udhcpd (which is a BusyBox utility) instead of dhcpd.
:param str_interface: A name of your wifi interface.
:param str_static_ip_addr_for_p2p: A static ip address to be given to your p2p interface. (This is only for the server(GO). The client should use a DHCP IP.)
:return: None
"""
os.system('sudo killall udhcpd')
os.system('sudo wpa_cli -i ' + str_interface + ' terminate -B')
os.system('sudo wpa_cli -i p2p-' + str_interface + '-0 terminate -B')
time.sleep(1)
os.system('echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward')
# os.system('echo "ctrl_interface=/var/run/wpa_supplicant\nupdate_config=1" | sudo tee /etc/wpa_supplicant.conf')
_system_critical('sudo wpa_supplicant -d -Dnl80211 -c /etc/wpa_supplicant.conf -i' + str_interface + ' -B')
_system_critical('sudo wpa_cli -i' + str_interface + ' p2p_group_add')
_system_critical('sudo ifconfig p2p-' + str_interface + '-0 ' + str_static_ip_addr_for_p2p)
_system_critical('sudo wpa_cli -i p2p-' + str_interface + '-0 p2p_find')
os.system('sudo wpa_cli -ip2p-' + str_interface + '-0 p2p_peers')
_system_critical('sudo wpa_cli -ip2p-' + str_interface + '-0 wps_pbc')
_system_critical('sudo udhcpd /etc/udhcpd.conf &')
if __name__ == "__main__":
# example
import wifid
wifid.setup_conf_files()
try:
wifid.start_as_go_ubuntu()
except ConnectionError:
print('ConnectionError from wifid') | en | 0.762429 | Setup configuration files that are needed to run start_as_go~(). :return: None # a directory those .conf files are in Starts a Wifi direct interface as GO (Tested on Fedora 26) 1. Destroy dhcpd and a wifi connection. (It usually takes some little time to kill off a wifi thing so wait for couple seconds...) 2. Bring up a wifi p2p(direct) interface. 3. Wait for incoming p2p connection of clients, starting dhcpd (dhcpd is a DHCP server). :param str_interface: A name of your wifi interface. :param str_static_ip_addr_for_p2p: A static ip address to be given to your p2p interface. (This is only for the server(GO). The client should use a DHCP IP.) :return: None # kill current dhcpd running if there is any # this will down your interface # kill p2p interface as well # wait for the interface to go down # enabling kernel ip forwarding (routing) in Linux # os.system('echo "ctrl_interface=/var/run/wpa_supplicant\nupdate_config=1" | sudo tee /etc/wpa_supplicant.conf') # this brings up an interface # p2p_group_add: Become an autonomous GO (this creates a p2p interface) # assign a static ip to your p2p interface # p2p_find: Enables discovery # p2p_peers: Shows list of discovered peers (not necessary) # wps_pbc: pushbutton for GO WPS authorization to accept incoming connections (When devices try to connect to GO) # start dhcpd Starts a Wifi direct interface as GO (Tested on Ubuntu 16.04) Mostly same as the one in Fedora, except that Ubuntu uses udhcpd (which is a BusyBox utility) instead of dhcpd. :param str_interface: A name of your wifi interface. :param str_static_ip_addr_for_p2p: A static ip address to be given to your p2p interface. (This is only for the server(GO). The client should use a DHCP IP.) :return: None # os.system('echo "ctrl_interface=/var/run/wpa_supplicant\nupdate_config=1" | sudo tee /etc/wpa_supplicant.conf') # example | 2.561586 | 3 |
Kai/python/modules/simpletrigger.py | NJManganelli/FourTopNAOD | 1 | 6622245 | import ROOT
import collections, math
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
class Trigger(Module):
def __init__(self, Trigger):
self.counting = 0
self.maxEventsToProcess = -1
self.Trigger = Trigger
def beginJob(self, histFile=None,histDirName=None):
Module.beginJob(self,histFile,histDirName)
def analyze(self, event):
"""process event, return True (go to next module) or False (fail, go to next event)"""
#First N events
self.counting += 1
if -1 < self.maxEventsToProcess < self.counting:
return False
#run = getattr(event, "run")
#evt = getattr(event, "event")
#lumi = getattr(event, "luminosityBlock")
for trig in self.Trigger:
if hasattr(event, trig) and getattr(event, trig):
#print(getattr(event, trig))
return True
#else:
#print("No trigger fired")
return False
| import ROOT
import collections, math
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
class Trigger(Module):
def __init__(self, Trigger):
self.counting = 0
self.maxEventsToProcess = -1
self.Trigger = Trigger
def beginJob(self, histFile=None,histDirName=None):
Module.beginJob(self,histFile,histDirName)
def analyze(self, event):
"""process event, return True (go to next module) or False (fail, go to next event)"""
#First N events
self.counting += 1
if -1 < self.maxEventsToProcess < self.counting:
return False
#run = getattr(event, "run")
#evt = getattr(event, "event")
#lumi = getattr(event, "luminosityBlock")
for trig in self.Trigger:
if hasattr(event, trig) and getattr(event, trig):
#print(getattr(event, trig))
return True
#else:
#print("No trigger fired")
return False
| en | 0.654863 | process event, return True (go to next module) or False (fail, go to next event) #First N events #run = getattr(event, "run") #evt = getattr(event, "event") #lumi = getattr(event, "luminosityBlock") #print(getattr(event, trig)) #else: #print("No trigger fired") | 2.452106 | 2 |
Display/Pages/RoomPage.py | nataliap13/PT-Warcaby | 0 | 6622246 | from selenium.webdriver.common.by import By
from Locators import Locator
class Room(object):
def __init__(self, driver):
self.driver = driver
self.KindOfRoom = driver.find_element(By.XPATH, Locator.KindOfRoom)
def click_OpenList(self):
self.KindOfRoom.click()
| from selenium.webdriver.common.by import By
from Locators import Locator
class Room(object):
def __init__(self, driver):
self.driver = driver
self.KindOfRoom = driver.find_element(By.XPATH, Locator.KindOfRoom)
def click_OpenList(self):
self.KindOfRoom.click()
| none | 1 | 2.68371 | 3 | |
TEST3D/GUI/0010200_page_pixsel/log.py | usnistgov/OOF3D | 31 | 6622247 | <gh_stars>10-100
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# <EMAIL>.
import tests
findWidget('OOF3D').resize(550, 350)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
checkpoint page installed Voxel Selection
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(336)
assert tests.sensitization0()
assert tests.voxelSelectionPageNoMSCheck()
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
findWidget('OOF3D:Microstructure Page:NewFromFile').clicked()
checkpoint toplevel widget mapped Dialog-Load Image and create Microstructure
findWidget('Dialog-Load Image and create Microstructure').resize(401, 215)
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('TEST_DATA/5color')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Auto').clicked()
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('1')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('1.')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('1.0')
findWidget('Dialog-Load Image and create Microstructure:gtk-ok').clicked()
findWidget('OOF3D Messages 1').resize(603, 200)
findWidget('OOF3D:Microstructure Page:Pane').set_position(159)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint active area status updated
checkpoint microstructure page sensitized
checkpoint Field page sensitized
checkpoint meshable button set
checkpoint Materials page updated
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint microstructure page sensitized
checkpoint OOF.Microstructure.Create_From_ImageFile
findMenu(findWidget('OOF3D:MenuBar'), 'Windows:Graphics:New').activate()
checkpoint Move Node toolbox info updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint toplevel widget mapped OOF3D Graphics 1
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1').resize(1000, 800)
checkpoint OOF.Windows.Graphics.New
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1').resize(1000, 800)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
assert tests.voxelSelectionPageStatusCheck(0, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 0)
assert tests.sensitization1()
checkpoint page installed Voxel Selection
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(336)
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBChooser'), 'Voxel Selection')
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Method:Chooser'), 'Burn')
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3400000000000e+02,y= 2.7600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.3400000000000e+02,y= 2.7600000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Burn
assert tests.voxelSelectionPageStatusCheck(2313, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 2313)
assert tests.sensitization2()
findWidget('OOF3D Graphics 1').resize(1000, 800)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:New').clicked()
checkpoint toplevel widget mapped Dialog-Create new voxel group
findWidget('Dialog-Create new voxel group').resize(246, 67)
findWidget('Dialog-Create new voxel group:name:Auto').clicked()
findWidget('Dialog-Create new voxel group:name:Text').set_text('l')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lo')
findWidget('Dialog-Create new voxel group:name:Text').set_text('low')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowe')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lower')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowerl')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowerle')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowerlef')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowerleft')
findWidget('Dialog-Create new voxel group:gtk-ok').clicked()
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.PixelGroup.New
checkpoint microstructure page sensitized
checkpoint meshable button set
findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:Add').clicked()
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint OOF.PixelGroup.AddSelection
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
checkpoint page installed Voxel Selection
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Invert
assert tests.voxelSelectionPageStatusCheck(5687, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 5687)
assert tests.sensitization2()
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Undo').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Undo
assert tests.voxelSelectionPageStatusCheck(2313, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 2313)
assert tests.sensitization3()
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Clear').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Clear
assert tests.voxelSelectionPageStatusCheck(0, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 0)
assert tests.sensitization4()
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Group')
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(328)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Group
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
assert tests.voxelSelectionPageStatusCheck(2313, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 2313)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.7800000000000e+02,y= 9.4000000000000e+01,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.7800000000000e+02,y= 9.4000000000000e+01,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Burn
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:New').clicked()
checkpoint toplevel widget mapped Dialog-Create new voxel group
findWidget('Dialog-Create new voxel group').resize(246, 67)
findWidget('Dialog-Create new voxel group:name:Text').set_text('')
findWidget('Dialog-Create new voxel group:name:Text').set_text('u')
findWidget('Dialog-Create new voxel group:name:Text').set_text('up')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upp')
findWidget('Dialog-Create new voxel group:name:Text').set_text('uppe')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upper')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upperl')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upperle')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upperlef')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upperleft')
findWidget('Dialog-Create new voxel group:gtk-ok').clicked()
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.PixelGroup.New
findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:Add').clicked()
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint OOF.PixelGroup.AddSelection
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
checkpoint page installed Voxel Selection
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Group
assert tests.voxelSelectionPageStatusCheck(3518, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 3518)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Group:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Group
assert tests.voxelSelectionPageStatusCheck(1205, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 1205)
findWidget('OOF3D Graphics 1').resize(1000, 800)
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Method:Chooser'), 'Point')
findWidget('OOF3D').resize(550, 350)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.8900000000000e+02,y= 1.6800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.8900000000000e+02,y= 1.6800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.0700000000000e+02,y= 1.7600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.0700000000000e+02,y= 1.7600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.1600000000000e+02,y= 1.5500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.1600000000000e+02,y= 1.5500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3200000000000e+02,y= 1.5800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.3200000000000e+02,y= 1.5800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.4300000000000e+02,y= 1.6000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.4300000000000e+02,y= 1.6000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.6500000000000e+02,y= 1.5700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.6500000000000e+02,y= 1.5700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8000000000000e+02,y= 1.6100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8000000000000e+02,y= 1.6100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.9800000000000e+02,y= 1.5800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.9800000000000e+02,y= 1.5800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1700000000000e+02,y= 1.6100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1700000000000e+02,y= 1.6100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.3100000000000e+02,y= 1.5900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.3100000000000e+02,y= 1.5900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4200000000000e+02,y= 1.6100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4200000000000e+02,y= 1.6100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5200000000000e+02,y= 1.6100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5200000000000e+02,y= 1.6100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2900000000000e+02,y= 1.8000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2900000000000e+02,y= 1.8000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2300000000000e+02,y= 1.9100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2300000000000e+02,y= 1.9100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2200000000000e+02,y= 2.0500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2200000000000e+02,y= 2.0500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2100000000000e+02,y= 2.2200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2100000000000e+02,y= 2.2200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3600000000000e+02,y= 2.2100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.3600000000000e+02,y= 2.2100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5200000000000e+02,y= 2.1900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5200000000000e+02,y= 2.1900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5500000000000e+02,y= 2.0700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5500000000000e+02,y= 2.0700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.6400000000000e+02,y= 2.0200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.6400000000000e+02,y= 2.0200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8500000000000e+02,y= 2.0500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8500000000000e+02,y= 2.0500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8700000000000e+02,y= 2.1700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8700000000000e+02,y= 2.1700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8200000000000e+02,y= 2.3100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8200000000000e+02,y= 2.3100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8000000000000e+02,y= 2.4900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8000000000000e+02,y= 2.4900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.7000000000000e+02,y= 2.6200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.7000000000000e+02,y= 2.6200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5100000000000e+02,y= 2.7400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5100000000000e+02,y= 2.7400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1500000000000e+02,y= 2.0500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1500000000000e+02,y= 2.0500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1400000000000e+02,y= 2.1800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1400000000000e+02,y= 2.1800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1300000000000e+02,y= 2.3400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1300000000000e+02,y= 2.3400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1000000000000e+02,y= 2.5000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1000000000000e+02,y= 2.5000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.2700000000000e+02,y= 2.6400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.2700000000000e+02,y= 2.6400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.3500000000000e+02,y= 2.8000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.3500000000000e+02,y= 2.8000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Undo').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Undo
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4300000000000e+02,y= 2.8000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4300000000000e+02,y= 2.8000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.2600000000000e+02,y= 2.0800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.2600000000000e+02,y= 2.0800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4400000000000e+02,y= 2.0600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4400000000000e+02,y= 2.0600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5400000000000e+02,y= 2.0700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5400000000000e+02,y= 2.0700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5700000000000e+02,y= 2.1600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5700000000000e+02,y= 2.1600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Undo').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Undo
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Undo').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Undo
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4100000000000e+02,y= 2.2000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4100000000000e+02,y= 2.2000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5200000000000e+02,y= 2.2000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5200000000000e+02,y= 2.2000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5900000000000e+02,y= 2.1800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5900000000000e+02,y= 2.1800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7400000000000e+02,y= 2.2100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7400000000000e+02,y= 2.2100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7400000000000e+02,y= 2.0900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7400000000000e+02,y= 2.0900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7600000000000e+02,y= 1.9000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7600000000000e+02,y= 1.9000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7500000000000e+02,y= 1.7500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7500000000000e+02,y= 1.7500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8600000000000e+02,y= 1.7000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8600000000000e+02,y= 1.7000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.9600000000000e+02,y= 1.7200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.9600000000000e+02,y= 1.7200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.9900000000000e+02,y= 1.7200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.9900000000000e+02,y= 1.7200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
assert tests.pixelSelectionSizeCheck('1.0', 1235)
assert tests.voxelSelectionPageStatusCheck(1235, 8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Group:operator:Chooser'), 'Intersect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Group
assert tests.voxelSelectionPageStatusCheck(0, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 0)
assert tests.sensitization5()
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Region')
findWidget('OOF3D').resize(550, 411)
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(264)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(64, 8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('42.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(320, 8000)
findWidget('OOF3D').resize(550, 411)
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.7700000000000e+02,y= 9.4000000000000e+01,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 1.8200000000000e+02,y= 8.9000000000000e+01,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.8200000000000e+02,y= 8.9000000000000e+01,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('5.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('5.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:xComponenent').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:yComponenent').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(390, 8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Intersect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(15, 8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select Only')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('42.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('42.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0, 8000)
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.5400000000000e+02,y= 1.9500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 7.0200000000000e+02,y= 2.1800000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 7.0200000000000e+02,y= 2.1800000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0, 8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.0600000000000e+02,y= 2.1600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0800000000000e+02,y= 2.1500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.0800000000000e+02,y= 2.1500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:xComponenent').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:yComponenent').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('0.0')
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select Only')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8600000000000e+02,y= 2.0200000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.5700000000000e+02,y= 1.9100000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 8.4000000000000e+01,y= 1.8500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.0100000000000e+02,y= 1.9200000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.1400000000000e+02,y= 1.9400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.5400000000000e+02,y= 2.0300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5400000000000e+02,y= 2.0300000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0, 8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Chooser'), 'Circle')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:center:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:center:zComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:center:zComponenent').set_text('42.0')
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0, 8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.7200000000000e+02,y= 1.5100000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.2100000000000e+02,y= 1.5500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.2100000000000e+02,y= 1.5500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('20.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('30.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3200000000000e+02,y= 1.9100000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.2400000000000e+02,y= 1.7800000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.4400000000000e+02,y= 2.3500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 1.5400000000000e+02,y= 2.4400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.5400000000000e+02,y= 2.4400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0000000000000e+02,y= 2.4800000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.7200000000000e+02,y= 2.7600000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.7200000000000e+02,y= 2.7600000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0400000000000e+02,y= 3.1400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.6200000000000e+02,y= 2.6900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.6200000000000e+02,y= 2.6900000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7500000000000e+02,y= 2.5600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.8100000000000e+02,y= 2.7900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8100000000000e+02,y= 2.7900000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8100000000000e+02,y= 2.7900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.8100000000000e+02,y= 2.6700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8100000000000e+02,y= 2.6700000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('3.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('32.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.6400000000000e+02,y= 2.5900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.5500000000000e+02,y= 2.8700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5500000000000e+02,y= 2.8700000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
assert tests.voxelSelectionPageStatusCheck(2193,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Intersect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(24,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select Only')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(2193,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Chooser'), 'Ellipse')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Ellipse:point1:zComponenent').set_text('42.0')
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Ellipse:point1:yComponenent').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Intersect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select Only')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Despeckle')
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(287)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.4676923076923e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.4676923076923e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
assert tests.voxelSelectionPageStatusCheck(0,8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.5415384615385e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 2.6000000000000e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
assert tests.voxelSelectionPageStatusCheck(0,8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry').set_text('2.2061538461538e+01')
widget_0=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry')
widget_0.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_0.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry').set_text('2.6000000000000e+01')
widget_1=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry')
widget_1.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_1.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.9400000000000e+02,y= 1.7300000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 4.0100000000000e+02,y= 1.6000000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.0100000000000e+02,y= 1.6000000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry').set_text('1.0000000000000e+01')
widget_2=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry')
widget_2.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_2.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
assert tests.voxelSelectionPageStatusCheck(0,8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.0246153846154e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.8861538461538e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Elkcepsed')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 8.9384615384615e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 1.3000000000000e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Elkcepsed
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8800000000000e+02,y= 1.5100000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.2600000000000e+02,y= 8.2000000000000e+01,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.2600000000000e+02,y= 8.2000000000000e+01,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 1.2261538461538e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 1.0000000000000e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Elkcepsed
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.9400000000000e+02,y= 1.3200000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.4400000000000e+02,y= 1.2600000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.4400000000000e+02,y= 1.2600000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 1.1846153846154e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 6.3538461538462e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Elkcepsed
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5400000000000e+02,y= 3.0600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.9400000000000e+02,y= 2.6500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.9400000000000e+02,y= 2.6500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Expand')
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(336)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Expand
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8200000000000e+02,y= 2.9900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.3900000000000e+02,y= 3.2300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.3900000000000e+02,y= 3.2300000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Shrink')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.4800000000000e+02,y= 2.0200000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.4200000000000e+02,y= 2.6300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.4200000000000e+02,y= 2.6300000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('2.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('20.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3600000000000e+02,y= 1.4900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.9200000000000e+02,y= 1.5700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.9200000000000e+02,y= 1.5700000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('30.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.7100000000000e+02,y= 2.0600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.1800000000000e+02,y= 2.0400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.1700000000000e+02,y= 2.0400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('3.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('42.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.1000000000000e+02,y= 1.5400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.1400000000000e+02,y= 1.3500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.1400000000000e+02,y= 1.3500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Color Range')
findWidget('OOF3D').resize(550, 505)
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(175)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry').set_text('1.0000000000000e+00')
widget_3=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry')
widget_3.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_3.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Blue:entry').set_text('1.0000000000000e+00')
widget_4=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Blue:entry')
widget_4.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_4.window))
widget_5=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Blue:entry')
widget_5.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_5.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry').set_text('1.0000000000000e-02')
widget_6=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry')
widget_6.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_6.window))
widget_7=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry')
widget_7.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_7.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Green:entry').set_text('1.0000000000000e-02')
widget_8=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Green:entry')
widget_8.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_8.window))
widget_9=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Green:entry')
widget_9.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_9.window))
widget_10=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry')
widget_10.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_10.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Green:slider').get_adjustment().set_value( 0.0000000000000e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:slider').get_adjustment().set_value( 0.0000000000000e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_red:entry').set_text('1.0000000000000e-02')
widget_11=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_red:entry')
widget_11.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_11.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_green:entry').set_text('1.0000000000000e-02')
widget_12=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_green:entry')
widget_12.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_12.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_blue:entry').set_text('1.0000000000000e-02')
widget_13=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_blue:entry')
widget_13.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_13.window))
widget_14=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_blue:entry')
widget_14.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_14.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Color_Range
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.4200000000000e+02,y= 1.2500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.8000000000000e+02,y= 2.1100000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.8000000000000e+02,y= 2.1100000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Copy').clicked()
checkpoint toplevel widget mapped Dialog-Copy microstructure
findWidget('Dialog-Copy microstructure').resize(246, 67)
findWidget('Dialog-Copy microstructure:gtk-ok').clicked()
findWidget('OOF3D:Microstructure Page:Pane').set_position(159)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint Field page sensitized
checkpoint Materials page updated
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint microstructure page sensitized
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.Microstructure.Copy
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
checkpoint page installed Voxel Selection
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton')
checkpoint page installed Skeleton
findWidget('OOF3D:Skeleton Page:Pane').set_position(199)
findWidget('OOF3D').resize(601, 505)
findWidget('OOF3D:Skeleton Page:Pane').set_position(250)
checkpoint skeleton page sensitized
findWidget('OOF3D:Skeleton Page:New').clicked()
checkpoint toplevel widget mapped Dialog-New skeleton
findWidget('Dialog-New skeleton').resize(380, 191)
findWidget('Dialog-New skeleton:gtk-ok').clicked()
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint Graphics_1 Pin Nodes updated
checkpoint skeleton page sensitized
checkpoint Move Node toolbox writable changed
checkpoint Move Node toolbox info updated
checkpoint Graphics_1 Move Nodes sensitized
checkpoint Move Node toolbox writable changed
checkpoint Move Node toolbox info updated
checkpoint Graphics_1 Move Nodes sensitized
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint Field page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint boundary page updated
checkpoint skeleton page sensitized
checkpoint skeleton page sensitized
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.Skeleton.New
checkpoint skeleton selection page updated
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane').set_position(246)
findWidget('OOF3D:Microstructure Page:Delete').clicked()
checkpoint toplevel widget mapped Questioner
findWidget('Questioner').resize(225, 89)
findWidget('Questioner:gtk-yes').clicked()
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint microstructure page sensitized
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint skeleton page sensitized
checkpoint Graphics_1 Move Nodes sensitized
checkpoint Move Node toolbox writable changed
checkpoint Move Node toolbox writable changed
checkpoint Move Node toolbox info updated
checkpoint Graphics_1 Move Nodes sensitized
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint Field page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint Field page sensitized
checkpoint Materials page updated
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint Field page sensitized
##checkpoint skeleton page sensitized
checkpoint OOF.Microstructure.Delete
findMenu(findWidget('OOF3D:MenuBar'), 'File:Save:Python_Log').activate()
checkpoint toplevel widget mapped Dialog-Python_Log
findWidget('Dialog-Python_Log').resize(190, 95)
findWidget('Dialog-Python_Log:filename').set_text('pixsel.log')
findWidget('Dialog-Python_Log').resize(198, 95)
findWidget('Dialog-Python_Log:gtk-ok').clicked()
checkpoint OOF.File.Save.Python_Log
assert tests.filediff('pixsel.log')
widget_15=findWidget('OOF3D')
handled_0=widget_15.event(event(gtk.gdk.DELETE,window=widget_15.window))
| # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# <EMAIL>.
import tests
findWidget('OOF3D').resize(550, 350)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
checkpoint page installed Voxel Selection
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(336)
assert tests.sensitization0()
assert tests.voxelSelectionPageNoMSCheck()
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
findWidget('OOF3D:Microstructure Page:NewFromFile').clicked()
checkpoint toplevel widget mapped Dialog-Load Image and create Microstructure
findWidget('Dialog-Load Image and create Microstructure').resize(401, 215)
findWidget('Dialog-Load Image and create Microstructure:filenames:Entire Directory:directory').set_text('TEST_DATA/5color')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Auto').clicked()
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('1')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('1.')
findWidget('Dialog-Load Image and create Microstructure:microstructure_name:Text').set_text('1.0')
findWidget('Dialog-Load Image and create Microstructure:gtk-ok').clicked()
findWidget('OOF3D Messages 1').resize(603, 200)
findWidget('OOF3D:Microstructure Page:Pane').set_position(159)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint active area status updated
checkpoint microstructure page sensitized
checkpoint Field page sensitized
checkpoint meshable button set
checkpoint Materials page updated
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint microstructure page sensitized
checkpoint OOF.Microstructure.Create_From_ImageFile
findMenu(findWidget('OOF3D:MenuBar'), 'Windows:Graphics:New').activate()
checkpoint Move Node toolbox info updated
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint toplevel widget mapped OOF3D Graphics 1
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1').resize(1000, 800)
checkpoint OOF.Windows.Graphics.New
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1').resize(1000, 800)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
assert tests.voxelSelectionPageStatusCheck(0, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 0)
assert tests.sensitization1()
checkpoint page installed Voxel Selection
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(336)
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBChooser'), 'Voxel Selection')
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame').size_allocate(gtk.gdk.Rectangle(0, 29, 380, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Method:Chooser'), 'Burn')
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3400000000000e+02,y= 2.7600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.3400000000000e+02,y= 2.7600000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Burn
assert tests.voxelSelectionPageStatusCheck(2313, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 2313)
assert tests.sensitization2()
findWidget('OOF3D Graphics 1').resize(1000, 800)
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:New').clicked()
checkpoint toplevel widget mapped Dialog-Create new voxel group
findWidget('Dialog-Create new voxel group').resize(246, 67)
findWidget('Dialog-Create new voxel group:name:Auto').clicked()
findWidget('Dialog-Create new voxel group:name:Text').set_text('l')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lo')
findWidget('Dialog-Create new voxel group:name:Text').set_text('low')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowe')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lower')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowerl')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowerle')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowerlef')
findWidget('Dialog-Create new voxel group:name:Text').set_text('lowerleft')
findWidget('Dialog-Create new voxel group:gtk-ok').clicked()
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.PixelGroup.New
checkpoint microstructure page sensitized
checkpoint meshable button set
findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:Add').clicked()
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint OOF.PixelGroup.AddSelection
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
checkpoint page installed Voxel Selection
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Invert
assert tests.voxelSelectionPageStatusCheck(5687, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 5687)
assert tests.sensitization2()
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Undo').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Undo
assert tests.voxelSelectionPageStatusCheck(2313, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 2313)
assert tests.sensitization3()
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Clear').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Clear
assert tests.voxelSelectionPageStatusCheck(0, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 0)
assert tests.sensitization4()
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Group')
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(328)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Group
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
assert tests.voxelSelectionPageStatusCheck(2313, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 2313)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.7800000000000e+02,y= 9.4000000000000e+01,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.7800000000000e+02,y= 9.4000000000000e+01,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Burn
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:New').clicked()
checkpoint toplevel widget mapped Dialog-Create new voxel group
findWidget('Dialog-Create new voxel group').resize(246, 67)
findWidget('Dialog-Create new voxel group:name:Text').set_text('')
findWidget('Dialog-Create new voxel group:name:Text').set_text('u')
findWidget('Dialog-Create new voxel group:name:Text').set_text('up')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upp')
findWidget('Dialog-Create new voxel group:name:Text').set_text('uppe')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upper')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upperl')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upperle')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upperlef')
findWidget('Dialog-Create new voxel group:name:Text').set_text('upperleft')
findWidget('Dialog-Create new voxel group:gtk-ok').clicked()
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.PixelGroup.New
findWidget('OOF3D:Microstructure Page:Pane:VoxelGroups:Add').clicked()
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint OOF.PixelGroup.AddSelection
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
checkpoint page installed Voxel Selection
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Group
assert tests.voxelSelectionPageStatusCheck(3518, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 3518)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Group:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Group
assert tests.voxelSelectionPageStatusCheck(1205, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 1205)
findWidget('OOF3D Graphics 1').resize(1000, 800)
setComboBox(findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Method:Chooser'), 'Point')
findWidget('OOF3D').resize(550, 350)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.8900000000000e+02,y= 1.6800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.8900000000000e+02,y= 1.6800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.0700000000000e+02,y= 1.7600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.0700000000000e+02,y= 1.7600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.1600000000000e+02,y= 1.5500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.1600000000000e+02,y= 1.5500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3200000000000e+02,y= 1.5800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.3200000000000e+02,y= 1.5800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.4300000000000e+02,y= 1.6000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.4300000000000e+02,y= 1.6000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.6500000000000e+02,y= 1.5700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.6500000000000e+02,y= 1.5700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8000000000000e+02,y= 1.6100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8000000000000e+02,y= 1.6100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.9800000000000e+02,y= 1.5800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.9800000000000e+02,y= 1.5800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1700000000000e+02,y= 1.6100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1700000000000e+02,y= 1.6100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.3100000000000e+02,y= 1.5900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.3100000000000e+02,y= 1.5900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4200000000000e+02,y= 1.6100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4200000000000e+02,y= 1.6100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5200000000000e+02,y= 1.6100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5200000000000e+02,y= 1.6100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2900000000000e+02,y= 1.8000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2900000000000e+02,y= 1.8000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2300000000000e+02,y= 1.9100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2300000000000e+02,y= 1.9100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2200000000000e+02,y= 2.0500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2200000000000e+02,y= 2.0500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.2100000000000e+02,y= 2.2200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.2100000000000e+02,y= 2.2200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3600000000000e+02,y= 2.2100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.3600000000000e+02,y= 2.2100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5200000000000e+02,y= 2.1900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5200000000000e+02,y= 2.1900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5500000000000e+02,y= 2.0700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5500000000000e+02,y= 2.0700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.6400000000000e+02,y= 2.0200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.6400000000000e+02,y= 2.0200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8500000000000e+02,y= 2.0500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8500000000000e+02,y= 2.0500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8700000000000e+02,y= 2.1700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8700000000000e+02,y= 2.1700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8200000000000e+02,y= 2.3100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8200000000000e+02,y= 2.3100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8000000000000e+02,y= 2.4900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.8000000000000e+02,y= 2.4900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.7000000000000e+02,y= 2.6200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.7000000000000e+02,y= 2.6200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5100000000000e+02,y= 2.7400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5100000000000e+02,y= 2.7400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1500000000000e+02,y= 2.0500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1500000000000e+02,y= 2.0500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1400000000000e+02,y= 2.1800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1400000000000e+02,y= 2.1800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1300000000000e+02,y= 2.3400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1300000000000e+02,y= 2.3400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.1000000000000e+02,y= 2.5000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.1000000000000e+02,y= 2.5000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.2700000000000e+02,y= 2.6400000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.2700000000000e+02,y= 2.6400000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.3500000000000e+02,y= 2.8000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.3500000000000e+02,y= 2.8000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Undo').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Undo
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4300000000000e+02,y= 2.8000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4300000000000e+02,y= 2.8000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.2600000000000e+02,y= 2.0800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.2600000000000e+02,y= 2.0800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4400000000000e+02,y= 2.0600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4400000000000e+02,y= 2.0600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5400000000000e+02,y= 2.0700000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5400000000000e+02,y= 2.0700000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5700000000000e+02,y= 2.1600000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5700000000000e+02,y= 2.1600000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Undo').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Undo
findWidget('OOF3D Graphics 1:Pane0:Pane2:ToolboxFrame:TBScroll:Voxel Selection:Undo').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Undo
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.4100000000000e+02,y= 2.2000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.4100000000000e+02,y= 2.2000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5200000000000e+02,y= 2.2000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5200000000000e+02,y= 2.2000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.5900000000000e+02,y= 2.1800000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5900000000000e+02,y= 2.1800000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7400000000000e+02,y= 2.2100000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7400000000000e+02,y= 2.2100000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7400000000000e+02,y= 2.0900000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7400000000000e+02,y= 2.0900000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7600000000000e+02,y= 1.9000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7600000000000e+02,y= 1.9000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7500000000000e+02,y= 1.7500000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.7500000000000e+02,y= 1.7500000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8600000000000e+02,y= 1.7000000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8600000000000e+02,y= 1.7000000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.9600000000000e+02,y= 1.7200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.9600000000000e+02,y= 1.7200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.9900000000000e+02,y= 1.7200000000000e+02,button=1,state=17,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.9900000000000e+02,y= 1.7200000000000e+02,button=1,state=273,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.Graphics_1.Toolbox.Pixel_Select.Point
assert tests.pixelSelectionSizeCheck('1.0', 1235)
assert tests.voxelSelectionPageStatusCheck(1235, 8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Group:operator:Chooser'), 'Intersect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Group
assert tests.voxelSelectionPageStatusCheck(0, 8000)
assert tests.pixelSelectionSizeCheck('1.0', 0)
assert tests.sensitization5()
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Region')
findWidget('OOF3D').resize(550, 411)
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(264)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(64, 8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('42.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(320, 8000)
findWidget('OOF3D').resize(550, 411)
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.7700000000000e+02,y= 9.4000000000000e+01,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 1.8200000000000e+02,y= 8.9000000000000e+01,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.8200000000000e+02,y= 8.9000000000000e+01,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2:select').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('5.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('5.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:xComponenent').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:yComponenent').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(390, 8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Intersect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(15, 8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select Only')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:zComponenent').set_text('42.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('42.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0, 8000)
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
findWidget('OOF3D Graphics 1:Pane0:Pane2:tumble').clicked()
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.5400000000000e+02,y= 1.9500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 7.0200000000000e+02,y= 2.1800000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 7.0200000000000e+02,y= 2.1800000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0, 8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:yComponenent').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point0:xComponenent').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 4.0600000000000e+02,y= 2.1600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.0800000000000e+02,y= 2.1500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.0800000000000e+02,y= 2.1500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:xComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:xComponenent').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:yComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:yComponenent').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Box:point1:zComponenent').set_text('0.0')
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select Only')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8600000000000e+02,y= 2.0200000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.5700000000000e+02,y= 1.9100000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 8.4000000000000e+01,y= 1.8500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.0100000000000e+02,y= 1.9200000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.1400000000000e+02,y= 1.9400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.5400000000000e+02,y= 2.0300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.5400000000000e+02,y= 2.0300000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0, 8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Chooser'), 'Circle')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:center:zComponenent').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:center:zComponenent').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:center:zComponenent').set_text('42.0')
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0, 8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.7200000000000e+02,y= 1.5100000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.2100000000000e+02,y= 1.5500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.2100000000000e+02,y= 1.5500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('20.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('30.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3200000000000e+02,y= 1.9100000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.2400000000000e+02,y= 1.7800000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.4400000000000e+02,y= 2.3500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 1.5400000000000e+02,y= 2.4400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 1.5400000000000e+02,y= 2.4400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0000000000000e+02,y= 2.4800000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 2.7200000000000e+02,y= 2.7600000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 2.7200000000000e+02,y= 2.7600000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.0400000000000e+02,y= 3.1400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.6200000000000e+02,y= 2.6900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.6200000000000e+02,y= 2.6900000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.7500000000000e+02,y= 2.5600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.8100000000000e+02,y= 2.7900000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8100000000000e+02,y= 2.7900000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.8100000000000e+02,y= 2.7900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.8100000000000e+02,y= 2.6700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.8100000000000e+02,y= 2.6700000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('3.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Circle:radius').set_text('32.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 3.6400000000000e+02,y= 2.5900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 3.5500000000000e+02,y= 2.8700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 3.5500000000000e+02,y= 2.8700000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
assert tests.voxelSelectionPageStatusCheck(2193,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Intersect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(24,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select Only')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(2193,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Chooser'), 'Ellipse')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Ellipse:point1:zComponenent').set_text('42.0')
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:shape:Ellipse:point1:yComponenent').set_text('10.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Intersect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Select Only')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Region:operator:Chooser'), 'Unselect')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Region
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Despeckle')
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(287)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.4676923076923e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.4676923076923e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
assert tests.voxelSelectionPageStatusCheck(0,8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.5415384615385e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 2.6000000000000e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
assert tests.voxelSelectionPageStatusCheck(0,8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry').set_text('2.2061538461538e+01')
widget_0=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry')
widget_0.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_0.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry').set_text('2.6000000000000e+01')
widget_1=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry')
widget_1.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_1.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.9400000000000e+02,y= 1.7300000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 4.0100000000000e+02,y= 1.6000000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 4.0100000000000e+02,y= 1.6000000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry').set_text('1.0000000000000e+01')
widget_2=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:entry')
widget_2.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_2.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
assert tests.voxelSelectionPageStatusCheck(0,8000)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.0246153846154e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Despeckle:neighbors:slider').get_adjustment().set_value( 1.8861538461538e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Despeckle
assert tests.voxelSelectionPageStatusCheck(0,8000)
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Elkcepsed')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 8.9384615384615e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 1.3000000000000e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Elkcepsed
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8800000000000e+02,y= 1.5100000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.2600000000000e+02,y= 8.2000000000000e+01,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.2600000000000e+02,y= 8.2000000000000e+01,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 1.2261538461538e+01)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 1.0000000000000e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Elkcepsed
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 1.9400000000000e+02,y= 1.3200000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.4400000000000e+02,y= 1.2600000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.4400000000000e+02,y= 1.2600000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 1.1846153846154e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Elkcepsed:neighbors:slider').get_adjustment().set_value( 6.3538461538462e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Elkcepsed
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.5400000000000e+02,y= 3.0600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.9400000000000e+02,y= 2.6500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.9400000000000e+02,y= 2.6500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Expand')
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(336)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Expand
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.8200000000000e+02,y= 2.9900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.3900000000000e+02,y= 3.2300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.3900000000000e+02,y= 3.2300000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Shrink')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.4800000000000e+02,y= 2.0200000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.4200000000000e+02,y= 2.6300000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.4200000000000e+02,y= 2.6300000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('1.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('2.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('20.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.3600000000000e+02,y= 1.4900000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.9200000000000e+02,y= 1.5700000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.9200000000000e+02,y= 1.5700000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('0.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('30.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.7100000000000e+02,y= 2.0600000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.1800000000000e+02,y= 2.0400000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.1700000000000e+02,y= 2.0400000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('3.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('4.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Shrink:radius').set_text('42.0')
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Shrink
assert tests.voxelSelectionPageStatusCheck(0,8000)
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.1000000000000e+02,y= 1.5400000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 6.1400000000000e+02,y= 1.3500000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 6.1400000000000e+02,y= 1.3500000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Chooser'), 'Color Range')
findWidget('OOF3D').resize(550, 505)
findWidget('OOF3D:Voxel Selection Page:Pane').set_position(175)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry').set_text('1.0000000000000e+00')
widget_3=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry')
widget_3.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_3.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Blue:entry').set_text('1.0000000000000e+00')
widget_4=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Blue:entry')
widget_4.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_4.window))
widget_5=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Blue:entry')
widget_5.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_5.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry').set_text('1.0000000000000e-02')
widget_6=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry')
widget_6.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_6.window))
widget_7=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry')
widget_7.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_7.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Green:entry').set_text('1.0000000000000e-02')
widget_8=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Green:entry')
widget_8.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_8.window))
widget_9=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Green:entry')
widget_9.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_9.window))
widget_10=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:entry')
widget_10.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_10.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Green:slider').get_adjustment().set_value( 0.0000000000000e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:reference:RGBColor:Red:slider').get_adjustment().set_value( 0.0000000000000e+00)
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_red:entry').set_text('1.0000000000000e-02')
widget_11=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_red:entry')
widget_11.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_11.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_green:entry').set_text('1.0000000000000e-02')
widget_12=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_green:entry')
widget_12.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_12.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_blue:entry').set_text('1.0000000000000e-02')
widget_13=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_blue:entry')
widget_13.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_13.window))
widget_14=findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:Method:Color Range:range:DeltaRGB:delta_blue:entry')
widget_14.event(event(gtk.gdk.FOCUS_CHANGE, in_=0, window=widget_14.window))
findWidget('OOF3D:Voxel Selection Page:Pane:SelectionModification:OK').clicked()
checkpoint microstructure page sensitized
checkpoint pixel page updated
checkpoint OOF.PixelSelection.Color_Range
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_PRESS,x= 2.4200000000000e+02,y= 1.2500000000000e+02,button=1,state=16,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.MOTION_NOTIFY,x= 5.8000000000000e+02,y= 2.1100000000000e+02,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
window = findOOFWindow('Graphics_1')
oldsize = window.setCanvasSize(614, 396)
canvasobj = findCanvasDrawingArea(findWidget('OOF3D Graphics 1:Pane0:Pane2:Canvas'), windowname='Graphics_1')
canvasobj.emit('event', event(gtk.gdk.BUTTON_RELEASE,x= 5.8000000000000e+02,y= 2.1100000000000e+02,button=1,state=272,window=findCanvasGdkWindow('Graphics_1')))
window.setCanvasSize(oldsize[0], oldsize[1])
findWidget('OOF3D Graphics 1:Pane0:Pane2').size_allocate(gtk.gdk.Rectangle(0, 29, 1000, 430))
checkpoint OOF.Graphics_1.Settings.Camera.View
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Copy').clicked()
checkpoint toplevel widget mapped Dialog-Copy microstructure
findWidget('Dialog-Copy microstructure').resize(246, 67)
findWidget('Dialog-Copy microstructure:gtk-ok').clicked()
findWidget('OOF3D:Microstructure Page:Pane').set_position(159)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint Field page sensitized
checkpoint Materials page updated
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint microstructure page sensitized
findWidget('OOF3D:Microstructure Page:Pane').set_position(225)
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint microstructure page sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.Microstructure.Copy
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Voxel Selection')
checkpoint page installed Voxel Selection
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Skeleton')
checkpoint page installed Skeleton
findWidget('OOF3D:Skeleton Page:Pane').set_position(199)
findWidget('OOF3D').resize(601, 505)
findWidget('OOF3D:Skeleton Page:Pane').set_position(250)
checkpoint skeleton page sensitized
findWidget('OOF3D:Skeleton Page:New').clicked()
checkpoint toplevel widget mapped Dialog-New skeleton
findWidget('Dialog-New skeleton').resize(380, 191)
findWidget('Dialog-New skeleton:gtk-ok').clicked()
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint Graphics_1 Pin Nodes updated
checkpoint skeleton page sensitized
checkpoint Move Node toolbox writable changed
checkpoint Move Node toolbox info updated
checkpoint Graphics_1 Move Nodes sensitized
checkpoint Move Node toolbox writable changed
checkpoint Move Node toolbox info updated
checkpoint Graphics_1 Move Nodes sensitized
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint Field page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint boundary page updated
checkpoint skeleton page sensitized
checkpoint skeleton page sensitized
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint OOF.Skeleton.New
checkpoint skeleton selection page updated
setComboBox(findWidget('OOF3D:Navigation:PageMenu'), 'Microstructure')
checkpoint page installed Microstructure
findWidget('OOF3D:Microstructure Page:Pane').set_position(246)
findWidget('OOF3D:Microstructure Page:Delete').clicked()
checkpoint toplevel widget mapped Questioner
findWidget('Questioner').resize(225, 89)
findWidget('Questioner:gtk-yes').clicked()
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint microstructure page sensitized
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint skeleton page sensitized
checkpoint Graphics_1 Move Nodes sensitized
checkpoint Move Node toolbox writable changed
checkpoint Move Node toolbox writable changed
checkpoint Move Node toolbox info updated
checkpoint Graphics_1 Move Nodes sensitized
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint Field page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page subproblems sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint Graphics_1 Voxel Info updated
checkpoint Graphics_1 Pin Nodes updated
checkpoint Field page sensitized
checkpoint Materials page updated
checkpoint mesh page subproblems sensitized
checkpoint mesh page subproblems sensitized
checkpoint mesh page sensitized
checkpoint mesh page sensitized
checkpoint boundary page updated
checkpoint skeleton selection page selection sensitized
checkpoint skeleton selection page updated
checkpoint skeleton selection page groups sensitized
checkpoint skeleton selection page groups sensitized
checkpoint meshable button set
checkpoint meshable button set
checkpoint microstructure page sensitized
checkpoint Field page sensitized
##checkpoint skeleton page sensitized
checkpoint OOF.Microstructure.Delete
findMenu(findWidget('OOF3D:MenuBar'), 'File:Save:Python_Log').activate()
checkpoint toplevel widget mapped Dialog-Python_Log
findWidget('Dialog-Python_Log').resize(190, 95)
findWidget('Dialog-Python_Log:filename').set_text('pixsel.log')
findWidget('Dialog-Python_Log').resize(198, 95)
findWidget('Dialog-Python_Log:gtk-ok').clicked()
checkpoint OOF.File.Save.Python_Log
assert tests.filediff('pixsel.log')
widget_15=findWidget('OOF3D')
handled_0=widget_15.event(event(gtk.gdk.DELETE,window=widget_15.window)) | en | 0.949105 | # -*- python -*- # This software was produced by NIST, an agency of the U.S. government, # and by statute is not subject to copyright in the United States. # Recipients of this software assume all responsibilities associated # with its operation, modification and maintenance. However, to # facilitate maintenance we ask that before distributing modified # versions of this software, you first contact the authors at # <EMAIL>. ##checkpoint skeleton page sensitized | 1.76617 | 2 |
RT_Task.py | SamuelBishop/AVSS | 0 | 6622248 | #!/usr/bin/python
import queue
import threading
import time
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print("Starting " + self.name)
process_data(self.name, self.q)
print("Exiting " + self.name)
def sound_alarm():
try:
print("Sound Alarm!")
except Exception as e:
print(e)
time_now = time.time()
# logging.error(time_now, e)
return False
return True
def turn_on_led():
try:
print("LED Turned on")
except Exception as e:
print(e)
time_now = time.time()
# logging.error(time_now, e)
return False
return True
def activate_disenfectant():
try:
print("Activating Disenfectant")
except Exception as e:
print(e)
timeNow = time.time()
# logging.error(timeNow, e)
return False
return True
def process_data(threadName, q):
success = False
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print("%s processing %s" % (threadName, data))
if data == "Sanitation":
success = activate_disenfectant()
q.task_done()
elif data == "Led":
success = turn_on_led()
q.task_done()
elif data == "Alarm":
success = sound_alarm()
q.task_done()
else:
queueLock.release()
time.sleep(1)
object_detected = True
if(object_detected):
threadList = ["Thread-1", "Thread-2", "Thread-3"]
taskList = ["Sanitation", "Led", "Alarm"]
queueLock = threading.Lock()
workQueue = queue.Queue(10)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in taskList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print("Exiting Main Thread") | #!/usr/bin/python
import queue
import threading
import time
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print("Starting " + self.name)
process_data(self.name, self.q)
print("Exiting " + self.name)
def sound_alarm():
try:
print("Sound Alarm!")
except Exception as e:
print(e)
time_now = time.time()
# logging.error(time_now, e)
return False
return True
def turn_on_led():
try:
print("LED Turned on")
except Exception as e:
print(e)
time_now = time.time()
# logging.error(time_now, e)
return False
return True
def activate_disenfectant():
try:
print("Activating Disenfectant")
except Exception as e:
print(e)
timeNow = time.time()
# logging.error(timeNow, e)
return False
return True
def process_data(threadName, q):
success = False
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print("%s processing %s" % (threadName, data))
if data == "Sanitation":
success = activate_disenfectant()
q.task_done()
elif data == "Led":
success = turn_on_led()
q.task_done()
elif data == "Alarm":
success = sound_alarm()
q.task_done()
else:
queueLock.release()
time.sleep(1)
object_detected = True
if(object_detected):
threadList = ["Thread-1", "Thread-2", "Thread-3"]
taskList = ["Sanitation", "Led", "Alarm"]
queueLock = threading.Lock()
workQueue = queue.Queue(10)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in taskList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print("Exiting Main Thread") | en | 0.633932 | #!/usr/bin/python # logging.error(time_now, e) # logging.error(time_now, e) # logging.error(timeNow, e) # Create new threads # Fill the queue # Wait for queue to empty # Notify threads it's time to exit # Wait for all threads to complete | 3.201757 | 3 |
tti/utils/plot.py | Bill-Software-Engineer/trading-technical-indicators | 68 | 6622249 | """
Trading-Technical-Indicators (tti) python library
File name: plot.py
Plotting methods defined under the tti.utils package.
"""
import pandas as pd
import matplotlib.pyplot as plt
def linesGraph(data, y_label, title, lines_color, alpha_values, areas,
x_label='Date'):
"""
Returns a lines graph of type matplotlib.pyplot. The graph can be either
a figure with a single plot, or a figure containing two vertical subplots.
Parameters:
data (pandas.DataFrame or a list of pandas.DataFrame objects): The data
to include in the graph. If data is a single pandas.DataFrame then
a single plot is prepared. If data is a list of pandas.DataFrame,
then a plot with subplots vertically stacked is prepared. Each
pandas.DataFrame in the list is used for a separate subplot. The
index of the dataframe represents the data on the x-axis and it
should be of type pandas.DatetimeIndex. Each column of the
dataframe represents a line in the graph.
y_label (str): The label of the y-axis of the graph.
title (str): The title on the top of the graph.
lines_color ([str,]): The colors (matplotlib.colors) to be used for
each line of the graph, in the defined order. In case where the
lines are more than the colors, then the list is scanned again from
the zero index.
alpha_values ([float,]): Alpha value of each line, to be used in
the call of the matplotlib.pyplot.plot method. In case where the
lines are more than the members of the list, then the list is
scanned again from the zero index.
areas ([dict,] or None): Includes the areas to be plotted by using the
fill_between matplotlib method. Each member of the list should be a
dictionary with the below keys: ``x``, ``y1``, ``y2``, ``color``,
see ``fill_between`` matplotlib method for more details.
x_label (str, default='Date'): The label of the x-axis of the graph.
Returns:
matplotlib.pyplot: The prepared graph object.
Raises:
TypeError: Type error occurred when validating the ``data``.
"""
# For handling a list as input always
if type(data) != list:
data = [data]
for df in data:
# Validate that the input_data parameter is a pandas.DataFrame object
if not isinstance(df, pd.DataFrame):
raise TypeError('Invalid input_data type. It was expected ' +
'`pd.DataFrame` but `' +
str(type(df).__name__) + '` was found.')
# Validate that the index of the pandas.DataFrame is of type date
if not isinstance(df.index, pd.DatetimeIndex):
raise TypeError('Invalid input_data index type. It was expected ' +
'`pd.DatetimeIndex` but `' +
str(type(df.index).__name__) + '` was found.')
plt.figure(figsize=(7, 5))
# Add the subplots
j = 0 # Used for plot attributes use in rotation
# Maximum of two DataFrames are considered from the data parameter
for i in range(len(data)):
plt.subplot(len(data), 1, i + 1)
for line_name in data[i].columns.values:
plt.plot(data[i].index, data[i][line_name], label=line_name,
color=lines_color[j % len(lines_color)],
alpha=alpha_values[j % len(alpha_values)])
j += 1
plt.legend(loc=0)
plt.grid(which='major', axis='y', alpha=0.5)
# Set attributes for each subplot depending its position
if i == 0:
plt.title(title, fontsize=11, fontweight='bold')
if len(data) > 1:
plt.gca().axes.get_xaxis().set_visible(False)
# Last subplot x-axis
plt.xlabel(x_label, fontsize=11, fontweight='bold')
plt.gcf().autofmt_xdate()
# Common y-axis label
plt.gcf().text(0.04, 0.5, y_label, fontsize=11, fontweight='bold',
va='center', rotation='vertical')
# Plot areas
if areas is not None:
# Translate the areas to python objects
areas_objects = []
for a in areas:
areas_objects.append({})
for area_key, area_value in a.items():
if type(area_value) == list:
# If list it contains the data list index, the constant
# 'ti_data', and the ti_data column name
areas_objects[-1][area_key] = \
data[area_value[0]][area_value[2]].to_list()
elif area_value == 'ti_index':
areas_objects[-1][area_key] = data[0].index
else:
areas_objects[-1][area_key] = a[area_key]
for a in areas_objects:
plt.gca().fill_between(x=a['x'], y1=a['y1'], y2=a['y2'],
color=a['color'])
return plt
| """
Trading-Technical-Indicators (tti) python library
File name: plot.py
Plotting methods defined under the tti.utils package.
"""
import pandas as pd
import matplotlib.pyplot as plt
def linesGraph(data, y_label, title, lines_color, alpha_values, areas,
x_label='Date'):
"""
Returns a lines graph of type matplotlib.pyplot. The graph can be either
a figure with a single plot, or a figure containing two vertical subplots.
Parameters:
data (pandas.DataFrame or a list of pandas.DataFrame objects): The data
to include in the graph. If data is a single pandas.DataFrame then
a single plot is prepared. If data is a list of pandas.DataFrame,
then a plot with subplots vertically stacked is prepared. Each
pandas.DataFrame in the list is used for a separate subplot. The
index of the dataframe represents the data on the x-axis and it
should be of type pandas.DatetimeIndex. Each column of the
dataframe represents a line in the graph.
y_label (str): The label of the y-axis of the graph.
title (str): The title on the top of the graph.
lines_color ([str,]): The colors (matplotlib.colors) to be used for
each line of the graph, in the defined order. In case where the
lines are more than the colors, then the list is scanned again from
the zero index.
alpha_values ([float,]): Alpha value of each line, to be used in
the call of the matplotlib.pyplot.plot method. In case where the
lines are more than the members of the list, then the list is
scanned again from the zero index.
areas ([dict,] or None): Includes the areas to be plotted by using the
fill_between matplotlib method. Each member of the list should be a
dictionary with the below keys: ``x``, ``y1``, ``y2``, ``color``,
see ``fill_between`` matplotlib method for more details.
x_label (str, default='Date'): The label of the x-axis of the graph.
Returns:
matplotlib.pyplot: The prepared graph object.
Raises:
TypeError: Type error occurred when validating the ``data``.
"""
# For handling a list as input always
if type(data) != list:
data = [data]
for df in data:
# Validate that the input_data parameter is a pandas.DataFrame object
if not isinstance(df, pd.DataFrame):
raise TypeError('Invalid input_data type. It was expected ' +
'`pd.DataFrame` but `' +
str(type(df).__name__) + '` was found.')
# Validate that the index of the pandas.DataFrame is of type date
if not isinstance(df.index, pd.DatetimeIndex):
raise TypeError('Invalid input_data index type. It was expected ' +
'`pd.DatetimeIndex` but `' +
str(type(df.index).__name__) + '` was found.')
plt.figure(figsize=(7, 5))
# Add the subplots
j = 0 # Used for plot attributes use in rotation
# Maximum of two DataFrames are considered from the data parameter
for i in range(len(data)):
plt.subplot(len(data), 1, i + 1)
for line_name in data[i].columns.values:
plt.plot(data[i].index, data[i][line_name], label=line_name,
color=lines_color[j % len(lines_color)],
alpha=alpha_values[j % len(alpha_values)])
j += 1
plt.legend(loc=0)
plt.grid(which='major', axis='y', alpha=0.5)
# Set attributes for each subplot depending its position
if i == 0:
plt.title(title, fontsize=11, fontweight='bold')
if len(data) > 1:
plt.gca().axes.get_xaxis().set_visible(False)
# Last subplot x-axis
plt.xlabel(x_label, fontsize=11, fontweight='bold')
plt.gcf().autofmt_xdate()
# Common y-axis label
plt.gcf().text(0.04, 0.5, y_label, fontsize=11, fontweight='bold',
va='center', rotation='vertical')
# Plot areas
if areas is not None:
# Translate the areas to python objects
areas_objects = []
for a in areas:
areas_objects.append({})
for area_key, area_value in a.items():
if type(area_value) == list:
# If list it contains the data list index, the constant
# 'ti_data', and the ti_data column name
areas_objects[-1][area_key] = \
data[area_value[0]][area_value[2]].to_list()
elif area_value == 'ti_index':
areas_objects[-1][area_key] = data[0].index
else:
areas_objects[-1][area_key] = a[area_key]
for a in areas_objects:
plt.gca().fill_between(x=a['x'], y1=a['y1'], y2=a['y2'],
color=a['color'])
return plt
| en | 0.734471 | Trading-Technical-Indicators (tti) python library File name: plot.py Plotting methods defined under the tti.utils package. Returns a lines graph of type matplotlib.pyplot. The graph can be either a figure with a single plot, or a figure containing two vertical subplots. Parameters: data (pandas.DataFrame or a list of pandas.DataFrame objects): The data to include in the graph. If data is a single pandas.DataFrame then a single plot is prepared. If data is a list of pandas.DataFrame, then a plot with subplots vertically stacked is prepared. Each pandas.DataFrame in the list is used for a separate subplot. The index of the dataframe represents the data on the x-axis and it should be of type pandas.DatetimeIndex. Each column of the dataframe represents a line in the graph. y_label (str): The label of the y-axis of the graph. title (str): The title on the top of the graph. lines_color ([str,]): The colors (matplotlib.colors) to be used for each line of the graph, in the defined order. In case where the lines are more than the colors, then the list is scanned again from the zero index. alpha_values ([float,]): Alpha value of each line, to be used in the call of the matplotlib.pyplot.plot method. In case where the lines are more than the members of the list, then the list is scanned again from the zero index. areas ([dict,] or None): Includes the areas to be plotted by using the fill_between matplotlib method. Each member of the list should be a dictionary with the below keys: ``x``, ``y1``, ``y2``, ``color``, see ``fill_between`` matplotlib method for more details. x_label (str, default='Date'): The label of the x-axis of the graph. Returns: matplotlib.pyplot: The prepared graph object. Raises: TypeError: Type error occurred when validating the ``data``. # For handling a list as input always # Validate that the input_data parameter is a pandas.DataFrame object # Validate that the index of the pandas.DataFrame is of type date # Add the subplots # Used for plot attributes use in rotation # Maximum of two DataFrames are considered from the data parameter # Set attributes for each subplot depending its position # Last subplot x-axis # Common y-axis label # Plot areas # Translate the areas to python objects # If list it contains the data list index, the constant # 'ti_data', and the ti_data column name | 3.60033 | 4 |
Python-For-Everyone-Horstmann/Chapter10-Inheritance/test_10_8.py | islayy/Books-solutions | 0 | 6622250 | <reponame>islayy/Books-solutions<gh_stars>0
# Unit tests for P10_8.py
# IMPORTS
from P10_8 import Person
from P10_8 import Student
import unittest
class PersonTests(unittest.TestCase):
def setUp(self):
self.p = Person("John", 1986)
def test_get_name(self):
self.assertEqual("John", self.p.get_name())
def test_get_birth_year(self):
self.assertEqual(1986, self.p.get_year())
def test_get_age(self):
self.assertEqual(2014 - 1986, self.p.get_age())
def test_object_repr(self):
print(self.p)
class StudentTests(unittest.TestCase):
def setUp(self):
self.s = Student("Mike", 1989, "Law")
def test_get_major(self):
self.assertEqual("Law", self.s.get_major())
def test_object_repr(self):
print(self.s)
# PROGRAM RUN
if __name__ == '__main__':
unittest.main()
| # Unit tests for P10_8.py
# IMPORTS
from P10_8 import Person
from P10_8 import Student
import unittest
class PersonTests(unittest.TestCase):
def setUp(self):
self.p = Person("John", 1986)
def test_get_name(self):
self.assertEqual("John", self.p.get_name())
def test_get_birth_year(self):
self.assertEqual(1986, self.p.get_year())
def test_get_age(self):
self.assertEqual(2014 - 1986, self.p.get_age())
def test_object_repr(self):
print(self.p)
class StudentTests(unittest.TestCase):
def setUp(self):
self.s = Student("Mike", 1989, "Law")
def test_get_major(self):
self.assertEqual("Law", self.s.get_major())
def test_object_repr(self):
print(self.s)
# PROGRAM RUN
if __name__ == '__main__':
unittest.main() | en | 0.633987 | # Unit tests for P10_8.py # IMPORTS # PROGRAM RUN | 3.454525 | 3 |