input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import gridspec
import parmap
import numpy as np
import pandas as pd
import os
import shutil
import cv2
import scipy.io as sio
import scipy.signal
from Specgram.Specgram import Specgram
import glob2
from numba import jit
from sklearn.svm import SVC # "Support vector classifier"
# functions
def plot_median_sem_over_single_trials(area_ids, trial_courses):
ymin, ymax = -10, 10
for k in range(area_ids.shape[0]):
ax=plt.subplot(6,6,k+1)
temp = trial_courses[:,k]
median = np.nanmedian(temp, axis=0)
print ("Area: ", area_ids[k])
# compute STD and SEM
std = np.nanstd(temp,axis=0)
sem = std/float(trial_courses.shape[0])
#print ("sem: ", sem.shape, " , mean: ", median.shape)
plt.plot(median,c='blue')
# plot individual trials
#plt.plot(temp.T, c='black',alpha=.1)
# plt.fill_between(np.arange(mean.shape[0]), mean-std, mean+std,
# alpha=0.2, facecolor='#089FFF',
# linewidth=4, antialiased=True)
plt.plot([0, trial_courses.shape[2]],[0,0], 'r--',c='black')
plt.plot([trial_courses.shape[2]//2, trial_courses.shape[2]//2],
[ymin, ymax], 'r--',c='black')
# if area_ids[k]==100:
# print ("Median: ", median, temp)
# mean[mean == -np.inf] = 0
# mean[mean == np.inf] = 0
plt.ylim(-10, 10)
#plt.fill_between(mean, mean-sem, mean+sem,color='grey')
plt.plot(median+sem, color='red')
plt.plot(median-sem, color='red')
#idx = np.where(areanames[:,0]==area_ids[k])[0]
#print (allen_ids[:10], area_ids[k])
#idx = np.where(allen_ids-1==area_ids[k])[0]
#print (allen_abbreviations[idx])
#plt.title("Area: "+ allen_abbreviations[idx][0]+ ", "+str(area_ids[k]),
#plt.title("Area: "+ allen_abbreviations[idx][0]+ ", "+str(area_ids[k]),
plt.title("Area: "+ str(area_ids[k]),
fontsize=12, pad=.9)
if k < (area_ids.shape[0]-1):
plt.xticks([])
if k == 0:
plt.ylabel("DF/F %",fontsize=15)
plt.suptitle("All significant areas (10pixels+): medians over # trials: "+
str(trial_courses.shape[0]), fontsize=16)
plt.show()
def sum_pixels_in_registered_mask(data, maskwarp):
print (" # of trials: ", data.shape[0])
areas = np.unique(maskwarp)
print (" # of areas: ", areas.shape)
# work in 1D vectors easier to mask
maskwarp1D = maskwarp.reshape(-1)
trial_courses = []
area_ids = []
for k in range(data.shape[0]):
if k%10==0:
print ("computing trial: ", k)
time_courses_local = []
# convert to 1D vector to mask faster
data1D = np.float32(data[k].reshape(181,-1))
for id_ in areas:
idx = np.where(maskwarp1D==id_)[0]
# only keep areas that have at least 10 pixels
if idx.shape[0]>10:
#print ("Area: ", id_)
area_ids.append(id_)#print ("Areas: ", id_)
#print (data1D[:,idx].shape)
temp = data1D[:,idx]
if False:
# compute DFF
F0 = np.nanmean(temp,axis=0)
dFF = (data1D[:,idx]-F0)/F0
else:
# skip dFF computation;
dFF = temp
# save average of all pixesl post DFF
time_courses_local.append(np.nanmean(dFF, axis=1))
#all_times.append(time_courses_local)
trial_courses.append(time_courses_local)
area_ids = np.int32(np.unique(area_ids))
trial_courses = np.float32(trial_courses)
print ("# trials, # areas, # times: ", trial_courses.shape)
print ("area ids: ", area_ids.shape)
return area_ids, trial_courses
def make_movie(data):
from matplotlib import animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=5, metadata=dict(artist='Me'), bitrate=1800)
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.set_aspect('equal')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
data = np.float32(data)
print (data.shape)
# data_trial = data[trial]
# F0 = np.mean(data_trial[:30],axis=0)
# print (F0.shape)
# dFF = (data.mean(0) - F0)/F0
# print ("dFF: ", dFF.shape)
#dFF = data.mean(0)
trial = 0
print (np.nanmin(data), np.nanmax(data))
n_frames = data.shape[0]
print ("n_frames: ", n_frames)
im = ax.imshow(data[data.shape[0]//2], vmin=-0.1, vmax=0.1, cmap='viridis')
#im.set_clim([0,1])
ax.set_title("")
fig.set_size_inches([5,5])
def update_img(n):
#tmp = rand(300,300)
print (n)
ax.set_title(str(n))
im.set_data(data[n])
return im
#legend(loc=0)
ani = animation.FuncAnimation(fig, update_img,n_frames,interval=30)
writer = animation.writers['ffmpeg'](fps=30)
ani.save('/home/cat/video.mp4',writer=writer)
plt.close()
def make_training_sets_multiple_tests_window(time,
trial_courses_fixed, trial_courses_fixed_ids,
trial_courses_random_fixed, trial_courses_random_ids):
# combine good trials with random trials for training sets:
#time = 0
good_trials = trial_courses_fixed[trial_courses_fixed_ids, :,time:time+30].reshape(trial_courses_fixed_ids.shape[0], -1)
#print ("good trials: ", good_trials.shape)
temp = np.arange(trial_courses_fixed.shape[0])
idx = np.delete(temp,trial_courses_fixed_ids)
test_trials = trial_courses_fixed[idx, :,time:time+30].reshape(idx.shape[0], -1)
#print ("test_trials: ", test_trials.shape)
random_trials = trial_courses_random_fixed[trial_courses_random_ids, :,time:time+30].reshape(trial_courses_random_ids.shape[0], -1)
temp = np.arange(trial_courses_random.shape[0])
idx = np.delete(temp,trial_courses_random_ids)
test_trials_random = trial_courses_random_fixed[idx, :,time:time+30].reshape(idx.shape[0], -1)
#print ("test_trials_random: ", test_trials_random.shape)
# make labels
y = np.zeros(good_trials.shape[0]+random_trials.shape[0],'int32')
y[:good_trials.shape[0]]=1
# concatenate
X = np.vstack((good_trials,random_trials))
print ("done time: ", time)
return X, y, test_trials, test_trials_random
def make_training_sets_multiple_tests(time,
trial_courses_fixed, trial_courses_fixed_ids,
trial_courses_random_fixed, trial_courses_random_ids):
# combine good trials with random trials for training sets:
#time = 0
good_trials = trial_courses_fixed[trial_courses_fixed_ids, :,time].reshape(trial_courses_fixed_ids.shape[0], -1)
#print ("good trials: ", good_trials.shape)
temp = np.arange(trial_courses_fixed.shape[0])
idx = np.delete(temp,trial_courses_fixed_ids)
test_trials = trial_courses_fixed[idx, :,time].reshape(idx.shape[0], -1)
#print ("test_trials: ", test_trials.shape)
random_trials = trial_courses_random_fixed[trial_courses_random_ids, :,time].reshape(trial_courses_random_ids.shape[0], -1)
temp = np.arange(trial_courses_random.shape[0])
idx = np.delete(temp,trial_courses_random_ids)
test_trials_random = trial_courses_random_fixed[idx, :,time].reshape(idx.shape[0], -1)
#print ("test_trials_random: ", test_trials_random.shape)
# make labels
y = np.zeros(good_trials.shape[0]+random_trials.shape[0],'int32')
y[:good_trials.shape[0]]=1
# concatenate
X = np.vstack((good_trials,random_trials))
print ("done time: ", time)
return X, y, test_trials, test_trials_random
def make_training_sets(time, trial_courses_fixed, trial_courses_random_fixed):
# combine good trials with random trials for training sets:
#time = 0
good_trials = trial_courses_fixed[:50, :,time].reshape(50, -1)
#print ("good trials: ", good_trials.shape)
test_trials = trial_courses_fixed[50:, :,time].reshape(13, -1)
random_trials = trial_courses_random_fixed[:50, :,time].reshape(50, -1)
#print ("random_trials: ", random_trials.shape)
test_trials_random = trial_courses_random_fixed[50:, :,time].reshape(50, -1)
# make labels
y = np.zeros(100,'int32')
y[:50]=1
# concatenate
X = np.vstack((good_trials,random_trials))
print ("done time: ", time)
return X, y, test_trials, test_trials_random
def make_training_sets_multi_times(times, trial_courses_fixed, trial_courses_random_fixed):
# combine good trials with random trials for training sets:
#time = 0
good_trials = trial_courses_fixed[:50, :,times[0]:times[1]].reshape(50, -1)
#print ("good trials: ", good_trials.shape)
test_trials = trial_courses_fixed[50:, :,times[0]:times[1]].reshape(13, -1)
random_trials = trial_courses_random_fixed[:50, :,times[0]:times[1]].reshape(50, -1)
#print ("random_trials: ", random_trials.shape)
test_trials_random = trial_courses_random_fixed[50:, :,times[0]:times[1]].reshape(50, -1)
# make labels
y = np.zeros(100,'int32')
y[:50]=1
# concatenate
X = np.vstack((good_trials,random_trials))
print ("done time: ", time)
return X, y, test_trials, test_trials_random
def make_training_sets_multi_times_multi_areas(times,
area_id,
trial_courses_fixed,
trial_courses_random_fixed):
# combine good trials with random trials for training sets:
#time = 0
good_trials = trial_courses_fixed[:50, area_id,times[0]:times[1]].reshape(50, -1)
#print ("good trials: ", good_trials.shape)
test_trials = trial_courses_fixed[50:, area_id,times[0]:times[1]].reshape(13, -1)
random_trials = trial_courses_random_fixed[:50, area_id,times[0]:times[1]].reshape(50, -1)
#print ("random_trials: ", random_trials.shape)
test_trials_random = trial_courses_random_fixed[50:, area_id,times[0]:times[1]].reshape(50, -1)
# make labels
y = np.zeros(100,'int32')
y[:50]=1
# concatenate
X = np.vstack((good_trials,random_trials))
#print ("done time: ", time)
return X, y, test_trials, test_trials_random
def parallel_svm_multiple_tests(time,
trial_courses_fixed, trial_courses_fixed_ids,
trial_courses_random_fixed, trial_courses_random_ids):
res1 = []
res2 = []
for k in range(len(trial_courses_fixed_ids)):
# X, y, test_trials, test_trials_random = make_training_sets_multiple_tests(time,
# trial_courses_fixed, trial_courses_fixed_ids[k],
# trial_courses_random_fixed, trial_courses_random_ids[k])
X, y, test_trials, test_trials_random = make_training_sets_multiple_tests_window(time,
trial_courses_fixed, trial_courses_fixed_ids[k],
trial_courses_random_fixed, trial_courses_random_ids[k])
#print (" X: ", X.shape, X[:5])
print (" y: ", y.shape, y)
model = SVC(kernel='linear', C=1)
model.fit(X, y)
#test_trials_rewarded = trial_courses_fixed[50:, :,time].reshape(50, -1)
#model = grid.best_estimator_
yfit = model.predict(test_trials)
print ("predict test trial: ", yfit)
res1.append(np.sum(yfit)/float(yfit.shape[0]))
#real_data.append(res1)
yfit = model.predict(test_trials_random)
res2.append(np.sum(yfit)/float(yfit.shape[0]))
#random_data.append(res2)
return (res1, res2)
def parallel_svm_multi_time(times, trial_courses_fixed, trial_courses_random_fixed):
#for time in times:
print ("times ", times)
X, y, test_trials, test_trials_random = make_training_sets_multi_times(times,
trial_courses_fixed,
trial_courses_random_fixed)
model = SVC(kernel='linear', C=2)
model.fit(X, y)
# test_trials_rewarded = trial_courses_fixed[50:, :,time].reshape(50, -1)
# model = grid.best_estimator_
yfit = model.predict(test_trials)
res1 = np.sum(yfit)/float(yfit.shape[0])
real_data.append(res1)
yfit = model.predict(test_trials_random)
res2 = np.sum(yfit)/float(yfit.shape[0])
random_data.append(res2)
print (res1, res2)
return (res1, res2)
def parallel_svm_multi_time_multi_area(times,
areas,
trial_courses_fixed,
trial_courses_random_fixed):
print ("times ", times)
res1_array = []
res2_array = []
for area_id in areas:
X, y, test_trials, test_trials_random = make_training_sets_multi_times_multi_areas(
times,
area_id,
trial_courses_fixed,
trial_courses_random_fixed)
model = SVC(kernel='linear', C=2)
model.fit(X, y)
# test_trials_rewarded = trial_courses_fixed[50:, :,time].reshape(50, -1)
# model = grid.best_estimator_
yfit = model.predict(test_trials)
res1 = np.sum(yfit)/float(yfit.shape[0])
res1_array.append(res1)
yfit = model.predict(test_trials_random)
res2 = np.sum(yfit)/float(yfit.shape[0])
res2_array.append(res2)
print ("done times: ", times)
return (res1_array, res2_array)
# find sudden movements in time series
def find_starts(feature):
plotting = False
times = np.arange(feature.shape[0])/15. #- 0.5
#print (times)
fs=15
P, extent = Specgram(data = feature, sampfreq = fs,
p0=-60, f0=0.5, f1=fs,
width=0.25, tres = 0.125)
data1 = P[0]-np.median(P[0])
data2 = np.abs(feature)
# only need to find this shift once!
rolling_data = []
for k in range(-20,20,1):
rolling_data.append((data1*np.roll(data2[::2][:-1],k)).sum())
#shift = np.argmax(rolling_data)-20
shift = -1
print ("HARDCODED feature shift: ", shift)
#plot maximum for the roll shift;
if plotting:
fig = plt.figure()
ax = plt.subplot(2,1,1)
plt.plot(rolling_data)
P_med = P[0]-np.median(P[0])
starts = []
for k in range(1,P_med.shape[0],1):
if (P_med[k]>P_med[k-1]) and (P_med[k-1]==0):
starts.append(k)
starts = np.array(starts)/15.*2
# plot feature traces, power-spectrogram peaks, argrelmax peaks and initiation peaks
if plotting:
locs = np.array(scipy.signal.argrelmax(P_med)).squeeze()/15.*2
ax=plt.subplot(2,1,2)
plt.plot(times+shift/15., np.abs(feature))
plt.plot(times[::2][:-1],P_med, c='red')
plt.scatter(locs, P_med[np.int32(locs*15/2.)], c='green')
plt.scatter(starts, P_med[np.int32(starts*15/2)]*1.1, c='orange')
plt.show()
return starts
def visualize_lever_vs_DLClever(starts_arrays, abscodes,abstimes):
labels = ['left_paw','right_paw','nose','lever','right_ear','jaw','tongue']
# convert abspositiosn to integers
vals = []
for k in range(abscodes.shape[0]):
vals.append(np.int(abscodes[k].decode()))
vals=np.array(vals)
idx04 = np.where(vals==4)[0]
idx02 = np.where(vals==2)[0]
fig=plt.figure()
ax=plt.subplot(111)
# plot lever starts
for k in range(len(starts_arrays)):
if k==3:
ax.scatter(starts_arrays[k], starts_arrays[k]*0+k, label=labels[k])
# plot lever codes
ax.scatter(abstimes[idx04], idx04*0+8, c='darkblue', label='04 codes')
ax.scatter(abstimes[idx02], idx02*0+9, c='darkgreen', label='02 codes')
# metadata
ax.legend(title="Behaviours initiated by", fontsize=15)
plt.xlabel("Time (sec)",fontsize=20)
plt.yticks([])
ax.tick_params(axis='both', which='major', labelsize=20)
plt.title("DLC traced behaviour initiations by body part ", fontsize=20)
# compute crosscorrelogram between lever starts and
shifts = np.arange(-5.0, 5.0, 0.03)
arraysum = np.zeros(1400*1000,'int16')
# select only the first entry in a series
starts_04 = []
starts_04.append(abstimes[idx04][0])
for k in | |
'1615446':{'en': 'Dickson, TN'},
'1615449':{'en': 'Lebanon, TN'},
'1615451':{'en': 'Gallatin, TN'},
'1615452':{'en': 'Gallatin, TN'},
'1615453':{'en': 'Lebanon, TN'},
'1615457':{'en': 'Nashville, TN'},
'1615459':{'en': 'Smyrna, TN'},
'1615460':{'en': 'Nashville, TN'},
'1615463':{'en': 'Nashville, TN'},
'1615472':{'en': 'Franklin, TN'},
'1615494':{'en': 'Murfreesboro, TN'},
'1615499':{'en': 'Nashville, TN'},
'1615514':{'en': 'Nashville, TN'},
'1615515':{'en': 'Nashville, TN'},
'1615538':{'en': 'Franklin, TN'},
'1615547':{'en': 'Lebanon, TN'},
'1615563':{'en': 'Woodbury, TN'},
'1615567':{'en': 'Franklin, TN'},
'1615591':{'en': 'Franklin, TN'},
'1615595':{'en': 'Franklin, TN'},
'1615597':{'en': 'Smithville, TN'},
'1615599':{'en': 'Franklin, TN'},
'1615612':{'en': 'Madison, TN'},
'1615620':{'en': 'Nashville, TN'},
'1615624':{'en': 'Murfreesboro, TN'},
'1615641':{'en': 'Antioch, TN'},
'1615643':{'en': 'Greenbrier, TN'},
'1615644':{'en': 'Westmoreland, TN'},
'1615646':{'en': 'Nashville, TN'},
'1615650':{'en': 'Nashville, TN'},
'1615653':{'en': 'Murfreesboro, TN'},
'1615654':{'en': 'Cross Plains, TN'},
'1615661':{'en': 'Brentwood, TN'},
'1615662':{'en': 'Nashville, TN'},
'1615665':{'en': 'Nashville, TN'},
'1615666':{'en': 'Lafayette, TN'},
'1615672':{'en': 'White House, TN'},
'1615673':{'en': 'Nashville, TN'},
'1615678':{'en': 'Nashville, TN'},
'1615683':{'en': 'Gordonsville, TN'},
'1615688':{'en': 'Lafayette, TN'},
'1615699':{'en': 'Red Boiling Spgs, TN'},
'1615712':{'en': 'Nashville, TN'},
'1615726':{'en': 'Nashville, TN'},
'1615730':{'en': 'Nashville, TN'},
'1615731':{'en': 'Antioch, TN'},
'1615732':{'en': 'Nashville, TN'},
'1615735':{'en': 'Carthage, TN'},
'1615736':{'en': 'Nashville, TN'},
'1615740':{'en': 'Dickson, TN'},
'1615741':{'en': 'Nashville, TN'},
'1615742':{'en': 'Nashville, TN'},
'1615746':{'en': 'Pleasant View, TN'},
'1615750':{'en': 'Nashville, TN'},
'1615754':{'en': 'Mount Juliet, TN'},
'1615758':{'en': 'Mount Juliet, TN'},
'1615771':{'en': 'Franklin, TN'},
'1615773':{'en': 'Mount Juliet, TN'},
'1615776':{'en': 'Nolensville, TN'},
'1615777':{'en': 'Nashville, TN'},
'1615781':{'en': 'Nashville, TN'},
'1615783':{'en': 'Nashville, TN'},
'1615789':{'en': 'Charlotte, TN'},
'1615790':{'en': 'Franklin, TN'},
'1615791':{'en': 'Franklin, TN'},
'1615792':{'en': 'Ashland City, TN'},
'1615793':{'en': 'La Vergne, TN'},
'1615794':{'en': 'Franklin, TN'},
'1615797':{'en': 'White Bluff, TN'},
'1615799':{'en': 'Fairview, TN'},
'1615807':{'en': 'Franklin, TN'},
'1615822':{'en': 'Hendersonville, TN'},
'1615823':{'en': 'Nashville, TN'},
'1615824':{'en': 'Hendersonville, TN'},
'1615826':{'en': 'Hendersonville, TN'},
'1615831':{'en': 'Nashville, TN'},
'1615832':{'en': 'Nashville, TN'},
'1615833':{'en': 'Nashville, TN'},
'1615834':{'en': 'Nashville, TN'},
'1615837':{'en': 'Nashville, TN'},
'1615847':{'en': 'Old Hickory, TN'},
'1615848':{'en': 'Murfreesboro, TN'},
'1615849':{'en': 'Murfreesboro, TN'},
'1615851':{'en': 'Goodlettsville, TN'},
'1615855':{'en': 'Goodlettsville, TN'},
'1615859':{'en': 'Goodlettsville, TN'},
'1615860':{'en': 'Madison, TN'},
'1615862':{'en': 'Nashville, TN'},
'1615865':{'en': 'Madison, TN'},
'1615867':{'en': 'Murfreesboro, TN'},
'1615868':{'en': 'Madison, TN'},
'1615871':{'en': 'Nashville, TN'},
'1615872':{'en': 'Nashville, TN'},
'1615873':{'en': 'Nashville, TN'},
'1615874':{'en': 'Nashville, TN'},
'1615883':{'en': 'Nashville, TN'},
'1615884':{'en': 'Nashville, TN'},
'1615889':{'en': 'Nashville, TN'},
'1615890':{'en': 'Murfreesboro, TN'},
'1615891':{'en': 'Nashville, TN'},
'1615893':{'en': 'Murfreesboro, TN'},
'1615895':{'en': 'Murfreesboro, TN'},
'1615896':{'en': 'Murfreesboro, TN'},
'1615898':{'en': 'Murfreesboro, TN'},
'1615904':{'en': 'Murfreesboro, TN'},
'1615907':{'en': 'Murfreesboro, TN'},
'1615915':{'en': 'Nashville, TN'},
'1615936':{'en': 'Nashville, TN'},
'1615942':{'en': 'Nashville, TN'},
'1615952':{'en': 'Kingston Springs, TN'},
'1615953':{'en': 'Nashville, TN'},
'1615962':{'en': 'Murfreesboro, TN'},
'1616':{'en': 'Michigan'},
'1616222':{'en': 'Grand Rapids, MI'},
'1616225':{'en': 'Greenville, MI'},
'1616233':{'en': 'Grand Rapids, MI'},
'1616235':{'en': 'Grand Rapids, MI'},
'161624':{'en': 'Grand Rapids, MI'},
'1616281':{'en': 'Grand Rapids, MI'},
'1616285':{'en': 'Grand Rapids, MI'},
'1616301':{'en': 'Grand Rapids, MI'},
'1616335':{'en': 'Holland, MI'},
'1616336':{'en': 'Grand Rapids, MI'},
'1616355':{'en': 'Holland, MI'},
'1616356':{'en': 'Grand Rapids, MI'},
'1616361':{'en': 'Grand Rapids, MI'},
'1616363':{'en': 'Grand Rapids, MI'},
'1616364':{'en': 'Grand Rapids, MI'},
'1616365':{'en': 'Grand Rapids, MI'},
'1616374':{'en': 'Lake Odessa, MI'},
'161639':{'en': 'Holland, MI'},
'1616391':{'en': 'Grand Rapids, MI'},
'1616447':{'en': 'Grand Rapids, MI'},
'161645':{'en': 'Grand Rapids, MI'},
'1616457':{'en': 'Jenison, MI'},
'1616464':{'en': 'Grand Rapids, MI'},
'1616475':{'en': 'Grand Rapids, MI'},
'1616494':{'en': 'Holland, MI'},
'1616522':{'en': 'Ionia, MI'},
'1616527':{'en': 'Ionia, MI'},
'1616546':{'en': 'Holland, MI'},
'1616551':{'en': 'Grand Rapids, MI'},
'1616575':{'en': 'Grand Rapids, MI'},
'1616583':{'en': 'Byron Center, MI'},
'1616608':{'en': 'Grand Rapids, MI'},
'1616632':{'en': 'Grand Rapids, MI'},
'1616636':{'en': 'Sand Lake, MI'},
'1616642':{'en': 'Saranac, MI'},
'1616662':{'en': 'Hudsonville, MI'},
'1616667':{'en': 'Jenison, MI'},
'1616669':{'en': 'Hudsonville, MI'},
'1616676':{'en': 'Ada, MI'},
'1616677':{'en': 'Marne, MI'},
'1616681':{'en': 'Dorr, MI'},
'1616682':{'en': 'Ada, MI'},
'1616685':{'en': 'Grand Rapids, MI'},
'1616696':{'en': 'Cedar Springs, MI'},
'1616719':{'en': 'Grand Rapids, MI'},
'1616726':{'en': 'Grand Rapids, MI'},
'1616732':{'en': 'Grand Rapids, MI'},
'1616735':{'en': 'Grand Rapids, MI'},
'1616738':{'en': 'Holland, MI'},
'1616742':{'en': 'Grand Rapids, MI'},
'1616748':{'en': 'Zeeland, MI'},
'1616752':{'en': 'Grand Rapids, MI'},
'1616754':{'en': 'Greenville, MI'},
'1616772':{'en': 'Zeeland, MI'},
'1616774':{'en': 'Grand Rapids, MI'},
'1616776':{'en': 'Grand Rapids, MI'},
'1616786':{'en': 'Holland, MI'},
'1616791':{'en': 'Grand Rapids, MI'},
'1616794':{'en': 'Belding, MI'},
'1616796':{'en': 'Holland, MI'},
'1616805':{'en': 'Grand Rapids, MI'},
'1616819':{'en': 'Grand Rapids, MI'},
'1616827':{'en': 'Grand Rapids, MI'},
'1616828':{'en': 'Grand Rapids, MI'},
'1616831':{'en': 'Grand Rapids, MI'},
'1616836':{'en': 'Holland, MI'},
'1616837':{'en': 'Coopersville, MI'},
'1616842':{'en': 'Grand Haven, MI'},
'1616844':{'en': 'Grand Haven, MI'},
'1616846':{'en': 'Grand Haven, MI'},
'1616847':{'en': 'Grand Haven, MI'},
'1616850':{'en': 'Grand Haven, MI'},
'1616863':{'en': 'Rockford, MI'},
'1616866':{'en': 'Rockford, MI'},
'1616868':{'en': 'Alto, MI'},
'1616874':{'en': 'Rockford, MI'},
'1616877':{'en': 'Wayland, MI'},
'1616878':{'en': 'Byron Center, MI'},
'1616885':{'en': 'Grand Rapids, MI'},
'1616887':{'en': 'Sparta, MI'},
'1616891':{'en': 'Caledonia, MI'},
'1616892':{'en': 'Allendale Charter Township, MI'},
'1616895':{'en': 'Allendale Charter Township, MI'},
'1616896':{'en': 'Hudsonville, MI'},
'1616897':{'en': 'Lowell, MI'},
'1616935':{'en': 'Grand Haven, MI'},
'1616940':{'en': 'Grand Rapids, MI'},
'1616942':{'en': 'Grand Rapids, MI'},
'1616949':{'en': 'Grand Rapids, MI'},
'1616954':{'en': 'Grand Rapids, MI'},
'1616956':{'en': 'Grand Rapids, MI'},
'1616957':{'en': 'Grand Rapids, MI'},
'1616974':{'en': 'Grand Rapids, MI'},
'1616975':{'en': 'Grand Rapids, MI'},
'1616977':{'en': 'Grand Rapids, MI'},
'1616988':{'en': 'Grand Rapids, MI'},
'1616994':{'en': 'Holland, MI'},
'1616997':{'en': 'Coopersville, MI'},
'1617':{'en': 'Massachusetts'},
'1617225':{'en': 'Cambridge, MA'},
'1617227':{'en': 'Boston, MA'},
'1617236':{'en': 'Boston, MA'},
'1617241':{'en': 'Charlestown, MA'},
'1617242':{'en': 'Charlestown, MA'},
'1617243':{'en': 'Newton, MA'},
'1617247':{'en': 'Boston, MA'},
'1617248':{'en': 'Boston, MA'},
'1617253':{'en': 'Cambridge, MA'},
'1617261':{'en': 'Boston, MA'},
'1617262':{'en': 'Boston, MA'},
'1617265':{'en': 'Dorchester, MA'},
'1617266':{'en': 'Boston, MA'},
'1617267':{'en': 'Boston, MA'},
'1617277':{'en': 'Brookline, MA'},
'1617282':{'en': 'Dorchester, MA'},
'1617284':{'en': 'Somerville, MA'},
'1617288':{'en': 'Dorchester, MA'},
'1617292':{'en': 'Boston, MA'},
'1617294':{'en': 'Everett, MA'},
'1617298':{'en': 'Mattapan, MA'},
'1617328':{'en': 'Quincy, MA'},
'1617330':{'en': 'Boston, MA'},
'1617332':{'en': 'Newton, MA'},
'1617338':{'en': 'Boston, MA'},
'1617342':{'en': 'Boston, MA'},
'1617345':{'en': 'Boston, MA'},
'1617348':{'en': 'Boston, MA'},
'1617349':{'en': 'Cambridge, MA'},
'1617350':{'en': 'Boston, MA'},
'1617353':{'en': 'Boston, MA'},
'1617354':{'en': 'Cambridge, MA'},
'1617355':{'en': 'Boston, MA'},
'1617357':{'en': 'Boston, MA'},
'1617361':{'en': 'Hyde Park, MA'},
'1617364':{'en': 'Hyde Park, MA'},
'1617367':{'en': 'Boston, MA'},
'1617371':{'en': 'Boston, MA'},
'1617375':{'en': 'Boston, MA'},
'1617376':{'en': 'Quincy, MA'},
'1617381':{'en': 'Everett, MA'},
'1617387':{'en': 'Everett, MA'},
'1617389':{'en': 'Everett, MA'},
'1617391':{'en': 'Boston, MA'},
'1617394':{'en': 'Everett, MA'},
'1617414':{'en': 'Boston, MA'},
'161742':{'en': 'Boston, MA'},
'1617432':{'en': 'Boston, MA'},
'1617436':{'en': 'Dorchester, MA'},
'1617437':{'en': 'Boston, MA'},
'1617439':{'en': 'Boston, MA'},
'1617441':{'en': 'Cambridge, MA'},
'1617451':{'en': 'Boston, MA'},
'1617466':{'en': 'Chelsea, MA'},
'1617471':{'en': 'Quincy, MA'},
'1617472':{'en': 'Quincy, MA'},
'1617479':{'en': 'Quincy, MA'},
'1617482':{'en': 'Boston, MA'},
'1617484':{'en': 'Belmont, MA'},
'1617489':{'en': 'Belmont, MA'},
'161749':{'en': 'Cambridge, MA'},
'1617522':{'en': 'Jamaica Plain, MA'},
'1617523':{'en': 'Boston, MA'},
'1617524':{'en': 'Jamaica Plain, MA'},
'1617525':{'en': 'Boston, MA'},
'1617526':{'en': 'Boston, MA'},
'1617527':{'en': 'Newton, MA'},
'1617536':{'en': 'Boston, MA'},
'1617542':{'en': 'Boston, MA'},
'1617547':{'en': 'Cambridge, MA'},
'1617557':{'en': 'Boston, MA'},
'1617558':{'en': 'Newton, MA'},
'1617562':{'en': 'Brighton, MA'},
'1617566':{'en': 'Brookline, MA'},
'1617567':{'en': 'East Boston, MA'},
'1617568':{'en': 'East Boston, MA'},
'1617569':{'en': 'East Boston, MA'},
'1617570':{'en': 'Boston, MA'},
'1617573':{'en': 'Boston, MA'},
'1617574':{'en': 'Boston, MA'},
'1617575':{'en': 'Cambridge, MA'},
'1617576':{'en': 'Cambridge, MA'},
'1617577':{'en': 'Cambridge, MA'},
'1617591':{'en': 'Somerville, MA'},
'1617621':{'en': 'Cambridge, MA'},
'1617623':{'en': 'Somerville, MA'},
'1617625':{'en': 'Somerville, MA'},
'1617626':{'en': 'Boston, MA'},
'1617628':{'en': 'Somerville, MA'},
'1617629':{'en': 'Somerville, MA'},
'1617630':{'en': 'Newton, MA'},
'1617632':{'en': 'Boston, MA'},
'1617636':{'en': 'Boston, MA'},
'1617638':{'en': 'Boston, MA'},
'1617643':{'en': 'Boston, MA'},
'1617661':{'en': 'Cambridge, MA'},
'1617665':{'en': 'Cambridge, MA'},
'1617666':{'en': 'Somerville, MA'},
'1617667':{'en': 'Boston, MA'},
'1617695':{'en': 'Boston, MA'},
'1617696':{'en': 'Milton, MA'},
'1617698':{'en': 'Milton, MA'},
'1617714':{'en': 'Cambridge, MA'},
'1617718':{'en': 'Somerville, MA'},
'161772':{'en': 'Boston, MA'},
'1617731':{'en': 'Brookline, MA'},
'1617732':{'en': 'Boston, MA'},
'1617734':{'en': 'Brookline, MA'},
'1617737':{'en': 'Boston, MA'},
'1617738':{'en': 'Brookline, MA'},
'1617739':{'en': 'Brookline, MA'},
'1617742':{'en': 'Boston, MA'},
'1617764':{'en': 'Somerville, MA'},
'1617770':{'en': 'Quincy, MA'},
'1617773':{'en': 'Quincy, MA'},
'1617774':{'en': 'Quincy, MA'},
'1617776':{'en': 'Somerville, MA'},
'1617778':{'en': 'Boston, MA'},
'1617779':{'en': 'Brighton, MA'},
'1617786':{'en': 'Quincy, MA'},
'1617789':{'en': 'Brighton, MA'},
'1617796':{'en': 'Newton, MA'},
'1617832':{'en': 'Boston, MA'},
'1617846':{'en': 'Winthrop, MA'},
'1617847':{'en': 'Quincy, MA'},
'1617854':{'en': 'Boston, MA'},
'1617855':{'en': 'Belmont, MA'},
'1617859':{'en': 'Boston, MA'},
'1617864':{'en': 'Cambridge, MA'},
'1617868':{'en': 'Cambridge, MA'},
'1617876':{'en': 'Cambridge, MA'},
'1617879':{'en': 'Brookline, MA'},
'1617884':{'en': 'Chelsea, MA'},
'1617887':{'en': 'Chelsea, MA'},
'1617889':{'en': 'Chelsea, MA'},
'1617923':{'en': 'Watertown, MA'},
'1617924':{'en': 'Watertown, MA'},
'1617926':{'en': 'Watertown, MA'},
'1617928':{'en': 'Newton, MA'},
'1617934':{'en': 'Quincy, MA'},
'1617945':{'en': 'Cambridge, MA'},
'1617951':{'en': 'Boston, MA'},
'1617964':{'en': 'Newton, MA'},
'1617965':{'en': 'Newton, MA'},
'1617969':{'en': 'Newton, MA'},
'1617971':{'en': 'Jamaica Plain, MA'},
'1617972':{'en': 'Watertown, MA'},
'1617973':{'en': 'Boston, MA'},
'1617983':{'en': 'Jamaica Plain, MA'},
'1618':{'en': 'Illinois'},
'1618222':{'en': 'Belleville, IL'},
'1618224':{'en': 'Trenton, IL'},
'1618233':{'en': 'Belleville, IL'},
'1618234':{'en': 'Belleville, IL'},
'1618235':{'en': 'Belleville, IL'},
'1618236':{'en': 'Belleville, IL'},
'1618239':{'en': 'Belleville, IL'},
'1618241':{'en': 'Mount Vernon, IL'},
'1618242':{'en': 'Mount Vernon, IL'},
'1618244':{'en': 'Mount Vernon, IL'},
'1618251':{'en': 'Wood River, IL'},
'1618252':{'en': 'Harrisburg, IL'},
'1618253':{'en': 'Harrisburg, IL'},
'1618254':{'en': 'Wood River, IL'},
'1618256':{'en': 'Scott AFB, IL'},
'1618257':{'en': 'Belleville, IL'},
'1618262':{'en': 'Mount Carmel, IL'},
'1618263':{'en': 'Mount Carmel, IL'},
'1618271':{'en': 'East St. Louis, IL'},
'1618273':{'en': 'Eldorado, IL'},
'1618274':{'en': 'East St. Louis, IL'},
'1618277':{'en': 'Belleville, IL'},
'1618281':{'en': 'Columbia, IL'},
'1618282':{'en': 'Red Bud, IL'},
'1618283':{'en': 'Vandalia, IL'},
'1618286':{'en': 'Dupo, IL'},
'1618288':{'en': 'Maryville, IL'},
'1618295':{'en': 'Marissa, IL'},
'1618307':{'en': 'Edwardsville, IL'},
'1618327':{'en': 'Nashville, IL'},
'1618343':{'en': 'Collinsville, IL'},
'1618344':{'en': 'Collinsville, IL'},
'1618345':{'en': 'Collinsville, IL'},
| |
<filename>backend-server/sensors/views.py
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User, Group
from sensors.models import Room, Device, DeviceData, Person, CameraRecord, SensorData, LocationData
import face_recognition
import datetime
from django.db.models.functions import Now
from rest_framework import viewsets
from rest_framework import permissions
from sensors.serializers import UserSerializer, GroupSerializer, RoomSerializer, CameraRecordSerializer
from sensors.serializers import DeviceSerializer, DeviceDataSerializer, PersonSeiralizer, SensorDataSerializer
from pprint import pprint
import json
import numpy as np
import ast
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
@api_view(['GET', 'POST'])
def utilization(request):
def utilization_response(array):
"""
array: numpy array contains three column: name, location, time
where time is in the same day
Similar to
SELECT time, name, location
FROM table
WHERE time = date('today')
"""
if array is None:
return json.dumps({"utilization": 0})
day_time = array[0, 2]
start_time = day_time.replace(hour=15, minute=0, second=0)
end_time = day_time.replace(hour=23, minute=0, second=0)
valid = array[array[:, 2] >= start_time]
if valid.shape[0] == 0:
return json.dumps({"utilization": 0})
valid = valid[valid[:, 2] < end_time]
count = len(np.unique(valid[:, 2]))
return json.dumps({"utilization": count / (8 * 60 * 60)})
req_data = request.data
try:
room = req_data.get('room')
start = req_data.get('date', None)
start_date = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
end_date = start_date + datetime.timedelta(days=1)
data = LocationData.objects.filter(location=room).filter(created_by__range=(start_date, end_date)).values()
result = []
for item in data:
result.append((item['location'], item['name'], item['created_by']))
if len(result) == 0:
result = utilization_response(None)
else:
result = utilization_response(np.array(result))
return Response(data=result, status=status.HTTP_200_OK)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'POST'])
def person_room(request):
req_data = request.data
start = req_data.get('start', None)
end = req_data.get('end', None)
try:
name = req_data.get('name')
start_date = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
end_date = datetime.datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
data = LocationData.objects.filter(created_by__range=(start_date, end_date)).filter(name=name).values()
seen = set()
for item in data:
if item['location'] not in seen:
seen.add(item['location'])
print(seen)
return Response(data={'room': list(seen)}, status=status.HTTP_200_OK)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'POST'])
def people_room(request):
req_data = request.data
start = req_data.get('start', None)
end = req_data.get('end', None)
room = req_data.get('room')
try:
start_date = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
end_date = datetime.datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
data = LocationData.objects.filter(created_by__range=(start_date, end_date)).filter(location=room).values()
seen = set()
for item in data:
if item['name'] not in seen:
seen.add(item['name'])
print(seen)
return Response(data={'count': len(seen), 'occupancy_info': list(seen)}, status=status.HTTP_200_OK)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'POST'])
def people_building(request):
req_data = request.data
start = req_data.get('start', None)
end = req_data.get('end', None)
start_date = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
end_date = datetime.datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
data = LocationData.objects.filter(created_by__range=(start_date, end_date)).values()
seen = set()
for item in data:
if item['name'] not in seen:
seen.add(item['name'])
print(seen)
return Response(data={'count': len(seen)}, status=status.HTTP_200_OK)
@api_view(['GET', 'POST'])
def room_info(request):
req_data = request.data
room = req_data.get('room')
now = datetime.datetime.now()
# try:
data = LocationData.objects.filter(created_by__range=(now - datetime.timedelta(seconds=5), now)).order_by(
'-created_by').filter(location=room).values()
# data = LocationData.objects.filter(created_by__range=(now - datetime.timedelta(seconds=10), now)).order_by(
# '-created_by').filter(room=room).values()
seen = set()
check = []
for item in data:
if item['name'] not in seen:
seen.add(item['name'])
check.append((item['location'], item['name'], item['created_by']))
print(check)
return Response(data={'room_name': room, 'occupancy_info': list(check)}, status=status.HTTP_200_OK)
# except:
# return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'POST'])
def room(request):
def room_response(array):
"""
array: numpy array contains three column: location, name, time
where all time is the same as request_time/last available time in DB
Similar to
"""
response = {"room_info": list()}
if array is None:
return json.dumps(response)
room_name, count = np.unique(array[:, 0], return_counts=True)
for i, name in enumerate(room_name):
response["room_info"].append([name, int(count[i])])
print(response)
return json.dumps(response)
try:
now = datetime.datetime.now()
data = LocationData.objects.filter(created_by__range=(now - datetime.timedelta(seconds=10), now)).order_by(
'-created_by').values()
# data = LocationData.objects.filter(created_by__range=(now - datetime.timedelta(days=1), now)).order_by(
# '-created_by').values()
result = []
seen = set()
for item in data:
if item['name'] not in seen:
result.append((item['location'], item['name'], item['created_by']))
seen.add(item['name'])
if len(seen) == 0:
result = room_response(None)
else:
result = room_response(np.array(result))
print(result)
return Response(data=result, status=status.HTTP_200_OK)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def sec_sensor_data(request):
req_data = request.data
# save sensor data
s = SensorData(location=req_data['location'], sensor_data=req_data['sensor_data'],
created_by=Now())
s.save()
# save location data
locations = ast.literal_eval(req_data['location'])
for name in locations:
if name == 'time':
continue
if locations[name] == 'home':
continue
loc = LocationData(location=locations[name], name=name, created_by=Now())
loc.save()
return Response(status=status.HTTP_201_CREATED)
# except:
# return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def get_sensor_data(request):
now = datetime.datetime.now()
now_plus_10 = now + datetime.timedelta(minutes=1)
data = SensorData.objects.filter(created_by__range=(now - datetime.timedelta(minutes=10), now_plus_10)).order_by(
'-created_by').values()
print(data)
try:
sensor_data_1 = data[0]['sensor_data']
location = data[0]['location']
sensor_data_2 = data[1]['sensor_data']
return Response(data={'current_sensor_data': sensor_data_1, 'prev_sensor_data': sensor_data_2,
'location': location}, status=status.HTTP_202_ACCEPTED)
except:
return Response(data={}, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def fetch_location_data_by(request):
req_data = request.data
try:
start = req_data.get('start', None)
end = req_data.get('end', None)
start_date = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
end_date = datetime.datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
d_data = LocationData.objects.filter(created_by__range=(start_date, end_date)).values()
return Response({'data': d_data}, status=status.HTTP_200_OK)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def fetch_sensor_data_by(request):
req_data = request.data
start = req_data.get('start', None)
end = req_data.get('end', None)
device_type = req_data.get('device_type', None)
device_id = req_data.get('device_id', None)
d_data = None
if start and end:
start_date = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
end_date = datetime.datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
d_data = DeviceData.objects.filter(create_by__range=(start_date, end_date))
if device_id:
if d_data:
d_data = d_data.filter(device=device_id)
else:
d_data = DeviceData.objects.filter(device=device_id)
if d_data == None:
d_data = DeviceData.objects.all()
d_data = d_data.values()
return Response({'data': d_data}, status=status.HTTP_200_OK)
@api_view(['POST'])
def register_user(request, format=None):
req_data = request.data
face_encodings = req_data['face_encodings']
name = req_data['name']
email = req_data['email']
identity = req_data['identity']
try:
try:
person = Person.objects.get(email=email)
person.name = name
person.identity = identity
face_embedding = ast.literal_eval(person.face_embedding)
face_embedding.extend(face_encodings)
person.face_embedding = face_embedding
person.save()
return Response(status=status.HTTP_201_CREATED)
except ObjectDoesNotExist:
person = Person(name=name, email=email, identity=identity, face_embedding=str(face_encodings))
person.save()
return Response(status=status.HTTP_202_ACCEPTED)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def face_record(request, format=None):
req_data = request.data
face_encodings = req_data['face_encodings']
camera_id = req_data['camera_id']
database_face_encodings = []
database_face_names = []
database_face_ids = []
people = Person.objects.all().values()
for person in people:
database_face_ids.append(person['person_id'])
database_face_encodings.append(person['face_embedding'])
database_face_names.append(person['name'])
detected_people = []
# start to compare with database
for encoding in face_encodings:
matches = face_recognition.compare_faces(database_face_encodings,
encoding)
print(matches)
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = database_face_names[i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of
# votes (note: in the event of an unlikely tie Python will
# select first entry in the dictionary)
name = max(counts, key=counts.get)
p_idx = database_face_names.index(name)
cam_record = CameraRecord(person_id=database_face_ids[p_idx], person_name=name,
camera_id=camera_id)
if cam_record.is_valid():
cam_record.save()
return Response({}, status=status.HTTP_201_CREATED)
@api_view(['POST'])
def device_scan(request, device_id):
item = request.data
print(device_id)
# print(item)
try:
if item['type'] == 'HUB':
device = Device(device_id=device_id, device_name=item['name'],
device_label=item['label'], location_id='',
device_type='SmartThings v3 Hub', room='', complete_setup=True,
hub_id=item['deviceId'], network_type='', network_sec='',
device_description='')
else:
device = Device(device_id=device_id, device_name=item['name'],
device_label=item['label'], location_id=item['locationId'],
device_type=item['dth']['deviceTypeName'], room=item['roomId'],
complete_setup=item['dth']['completedSetup'],
hub_id=item['dth']['hubId'],
network_type=item['dth']['deviceNetworkType'],
network_sec=item['dth']['networkSecurityLevel'],
device_description=item['dth']['deviceTypeName'])
except Exception as e:
print('Error', e)
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
device.save()
return Response(status=status.HTTP_201_CREATED)
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def sensor_data_stream(request, device_id=None):
data = request.data
# common properties
actuator = str(data.get('actuator', {}))
configuration = str(data.get('configuration', {}))
# health_check = str(data.get('healthCheck', {}))
health_check = ''
refresh = str(data.get('refresh', {}))
sensor = str(data.get('sensor', {}))
face_name = ''
face_email = ''
if 'face' in data:
face_name = data['face']['face']['name']
face_email = data['face']['face']['email']
# outlet only
outlet_switch_value = ''
if 'outlet' in data:
outlet_switch_value = data['outlet']['switch']['value']
power_unit = ''
power_value = 0.
if 'powerMeter' in data:
power_unit = data['powerMeter']['power']['unit']
power_value = float(data['powerMeter']['power']['value'])
# motion sensor
motion_sensor_value = ''
temperature_unit = ''
temperature_value = -999.
if 'motionSensor' in data:
motion_sensor_value = data['motionSensor']['motion']['value']
temperature_unit = data['temperatureMeasurement']['temperature']['unit']
temperature_value = float(data['temperatureMeasurement']['temperature']['value'])
lock_data = ''
lock_value = ''
if 'lock' in data:
lock_data = data['lock']['lock']['data']
lock_value = data['lock']['lock']['value']
battery_value = -1.
if 'battery' in data:
battery_value = float(data['battery']['battery']['value'])
holdable_button = ''
if 'button' in data:
holdable_button = data['button']['button']['value']
try:
device_data = DeviceData(
device=device_id,
actuator=actuator,
configuration=configuration,
health_check=health_check,
refresh=refresh,
sensor=sensor,
battery_value=battery_value,
lock_data=lock_data, lock_value=lock_value,
motion_sensor_value=motion_sensor_value,
temperature_unit=temperature_unit,
temperature_value=temperature_value,
power_unit=power_unit, power_value=power_value,
holdable_button=holdable_button,
outlet_switch_value=outlet_switch_value,
face_name=face_name,
face_email=face_email,
create_by=Now()
)
device_data.save()
return Response(status=status.HTTP_201_CREATED)
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'POST'])
def room_list(request, format=None):
if request.method == 'GET':
rooms = Room.objects.all()
serializer = RoomSerializer(rooms, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = Room(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'POST', 'DELETE'])
def room_detail(request, pk, format=None):
"""
Retrieve, update or delete a code snippet.
"""
try:
room = Room.objects.get(pk=pk)
except Room.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = RoomSerializer(room)
return Response(serializer.data)
elif request.method in ['PUT', 'POST']:
serializer = RoomSerializer(room, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
room.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class RoomViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows rooms to be viewed or edited.
"""
queryset = Room.objects.all()
serializer_class = RoomSerializer
permission_classes = [permissions.IsAuthenticated]
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class | |
<gh_stars>1-10
import pyecharts.options as opts
from pyecharts.charts import Line
"""
Gallery 使用 pyecharts 1.1.0
参考地址: https://echarts.baidu.com/examples/editor.html?c=line-aqi
目前无法实现的功能:
1、dataZoom 放大的时候无法固定 Y 轴的上下限
"""
all_data = [
["2000-06-05", 116],
["2000-06-06", 129],
["2000-06-07", 135],
["2000-06-08", 86],
["2000-06-09", 73],
["2000-06-10", 85],
["2000-06-11", 73],
["2000-06-12", 68],
["2000-06-13", 92],
["2000-06-14", 130],
["2000-06-15", 245],
["2000-06-16", 139],
["2000-06-17", 115],
["2000-06-18", 111],
["2000-06-19", 309],
["2000-06-20", 206],
["2000-06-21", 137],
["2000-06-22", 128],
["2000-06-23", 85],
["2000-06-24", 94],
["2000-06-25", 71],
["2000-06-26", 106],
["2000-06-27", 84],
["2000-06-28", 93],
["2000-06-29", 85],
["2000-06-30", 73],
["2000-07-01", 83],
["2000-07-02", 125],
["2000-07-03", 107],
["2000-07-04", 82],
["2000-07-05", 44],
["2000-07-06", 72],
["2000-07-07", 106],
["2000-07-08", 107],
["2000-07-09", 66],
["2000-07-10", 91],
["2000-07-11", 92],
["2000-07-12", 113],
["2000-07-13", 107],
["2000-07-14", 131],
["2000-07-15", 111],
["2000-07-16", 64],
["2000-07-17", 69],
["2000-07-18", 88],
["2000-07-19", 77],
["2000-07-20", 83],
["2000-07-21", 111],
["2000-07-22", 57],
["2000-07-23", 55],
["2000-07-24", 60],
["2000-07-25", 44],
["2000-07-26", 127],
["2000-07-27", 114],
["2000-07-28", 86],
["2000-07-29", 73],
["2000-07-30", 52],
["2000-07-31", 69],
["2000-08-01", 86],
["2000-08-02", 118],
["2000-08-03", 56],
["2000-08-04", 91],
["2000-08-05", 121],
["2000-08-06", 127],
["2000-08-07", 78],
["2000-08-08", 79],
["2000-08-09", 46],
["2000-08-10", 108],
["2000-08-11", 80],
["2000-08-12", 79],
["2000-08-13", 69],
["2000-08-14", 80],
["2000-08-15", 105],
["2000-08-16", 119],
["2000-08-17", 105],
["2000-08-18", 55],
["2000-08-19", 74],
["2000-08-20", 41],
["2000-08-21", 62],
["2000-08-22", 104],
["2000-08-23", 118],
["2000-08-24", 121],
["2000-08-25", 126],
["2000-08-26", 99],
["2000-08-27", 92],
["2000-08-28", 75],
["2000-08-29", 91],
["2000-08-30", 94],
["2000-08-31", 69],
["2000-09-01", 93],
["2000-09-02", 124],
["2000-09-03", 120],
["2000-09-04", 93],
["2000-09-05", 26],
["2000-09-06", 32],
["2000-09-07", 70],
["2000-09-08", 89],
["2000-09-10", 117],
["2000-09-11", 144],
["2000-09-12", 111],
["2000-09-13", 120],
["2000-09-14", 97],
["2000-09-15", 108],
["2000-09-17", 74],
["2000-09-18", 105],
["2000-09-19", 127],
["2000-09-20", 143],
["2000-09-21", 62],
["2000-09-22", 80],
["2000-09-23", 136],
["2000-09-24", 29],
["2000-09-25", 91],
["2000-09-26", 93],
["2000-09-27", 114],
["2000-09-28", 45],
["2000-09-29", 102],
["2000-09-30", 111],
["2000-10-01", 93],
["2000-10-02", 117],
["2000-10-03", 78],
["2000-10-04", 76],
["2000-10-05", 100],
["2000-10-06", 75],
["2000-10-07", 169],
["2000-10-08", 59],
["2000-10-09", 89],
["2000-10-10", 91],
["2000-10-11", 75],
["2000-10-12", 28],
["2000-10-13", 47],
["2000-10-14", 92],
["2000-10-16", 72],
["2000-10-17", 149],
["2000-10-18", 86],
["2000-10-19", 88],
["2000-10-20", 104],
["2000-10-21", 91],
["2000-10-22", 88],
["2000-10-23", 55],
["2000-10-24", 63],
["2000-10-25", 41],
["2000-10-26", 85],
["2000-10-27", 99],
["2000-10-28", 121],
["2000-10-29", 96],
["2000-10-30", 90],
["2000-11-01", 80],
["2000-11-02", 116],
["2000-11-03", 207],
["2000-11-04", 306],
["2000-11-05", 283],
["2000-11-06", 200],
["2000-11-07", 93],
["2000-11-08", 49],
["2000-11-09", 78],
["2000-11-10", 40],
["2000-11-11", 74],
["2000-11-12", 67],
["2000-11-13", 118],
["2000-11-14", 196],
["2000-11-15", 101],
["2000-11-16", 59],
["2000-11-17", 83],
["2000-11-18", 83],
["2000-11-19", 124],
["2000-11-20", 57],
["2000-11-21", 78],
["2000-11-22", 113],
["2000-11-23", 172],
["2000-11-24", 129],
["2000-11-25", 103],
["2000-11-26", 75],
["2000-11-27", 125],
["2000-11-28", 121],
["2000-11-29", 204],
["2000-11-30", 141],
["2000-12-01", 106],
["2000-12-02", 146],
["2000-12-03", 95],
["2000-12-04", 149],
["2000-12-05", 71],
["2000-12-07", 157],
["2000-12-08", 141],
["2000-12-09", 197],
["2000-12-10", 43],
["2000-12-11", 81],
["2000-12-12", 109],
["2000-12-13", 118],
["2000-12-15", 115],
["2000-12-16", 92],
["2000-12-17", 123],
["2000-12-18", 147],
["2000-12-19", 59],
["2000-12-20", 103],
["2000-12-21", 146],
["2000-12-22", 137],
["2000-12-23", 74],
["2000-12-24", 64],
["2000-12-25", 67],
["2000-12-26", 107],
["2000-12-27", 101],
["2000-12-28", 79],
["2000-12-29", 137],
["2000-12-30", 165],
["2000-12-31", 81],
["2001-01-01", 100],
["2001-01-02", 126],
["2001-01-03", 56],
["2001-01-05", 108],
["2001-01-06", 88],
["2001-01-07", 78],
["2001-01-08", 105],
["2001-01-09", 77],
["2001-01-10", 105],
["2001-01-11", 93],
["2001-01-12", 107],
["2001-01-13", 128],
["2001-01-14", 53],
["2001-01-15", 81],
["2001-01-16", 128],
["2001-01-17", 179],
["2001-01-18", 225],
["2001-01-19", 116],
["2001-01-20", 153],
["2001-01-21", 161],
["2001-01-22", 149],
["2001-01-23", 115],
["2001-01-24", 136],
["2001-01-25", 101],
["2001-01-26", 109],
["2001-01-27", 108],
["2001-01-28", 86],
["2001-01-29", 101],
["2001-01-30", 109],
["2001-01-31", 139],
["2001-02-01", 110],
["2001-02-02", 113],
["2001-02-03", 130],
["2001-02-04", 62],
["2001-02-05", 88],
["2001-02-06", 105],
["2001-02-07", 87],
["2001-02-08", 140],
["2001-02-09", 116],
["2001-02-10", 100],
["2001-02-11", 83],
["2001-02-12", 102],
["2001-02-13", 106],
["2001-02-14", 157],
["2001-02-15", 131],
["2001-02-16", 77],
["2001-02-17", 101],
["2001-02-18", 148],
["2001-02-19", 227],
["2001-02-20", 105],
["2001-02-21", 155],
["2001-02-22", 293],
["2001-02-23", 99],
["2001-02-24", 57],
["2001-02-25", 97],
["2001-02-26", 104],
["2001-02-27", 117],
["2001-02-28", 125],
["2001-03-01", 216],
["2001-03-02", 149],
["2001-03-03", 256],
["2001-03-04", 172],
["2001-03-05", 113],
["2001-03-06", 338],
["2001-03-07", 57],
["2001-03-08", 48],
["2001-03-10", 111],
["2001-03-11", 87],
["2001-03-12", 175],
["2001-03-13", 186],
["2001-03-14", 201],
["2001-03-15", 76],
["2001-03-16", 131],
["2001-03-17", 127],
["2001-03-18", 128],
["2001-03-19", 152],
["2001-03-20", 144],
["2001-03-21", 162],
["2001-03-22", 500],
["2001-03-24", 358],
["2001-03-25", 128],
["2001-03-26", 54],
["2001-03-27", 57],
["2001-03-28", 54],
["2001-03-29", 80],
["2001-03-30", 71],
["2001-03-31", 73],
["2001-04-01", 139],
["2001-04-02", 224],
["2001-04-03", 107],
["2001-04-04", 150],
["2001-04-05", 180],
["2001-04-06", 77],
["2001-04-07", 95],
["2001-04-08", 194],
["2001-04-09", 143],
["2001-04-10", 205],
["2001-04-11", 129],
["2001-04-12", 64],
["2001-04-13", 61],
["2001-04-14", 79],
["2001-04-15", 121],
["2001-04-16", 130],
["2001-04-17", 150],
["2001-04-18", 205],
["2001-04-19", 154],
["2001-04-20", 81],
["2001-04-21", 140],
["2001-04-22", 119],
["2001-04-23", 156],
["2001-04-24", 72],
["2001-04-25", 108],
["2001-04-26", 124],
["2001-04-27", 94],
["2001-04-28", 157],
["2001-04-29", 100],
["2001-04-30", 158],
["2001-05-01", 277],
["2001-05-02", 332],
["2001-05-03", 303],
["2001-05-04", 238],
["2001-05-05", 500],
["2001-05-06", 99],
["2001-05-07", 93],
["2001-05-08", 104],
["2001-05-09", 74],
["2001-05-10", 68],
["2001-05-11", 90],
["2001-05-12", 114],
["2001-05-13", 142],
["2001-05-14", 126],
["2001-05-15", 185],
["2001-05-16", 402],
["2001-05-17", 189],
["2001-05-17", 189],
["2001-05-17", 189],
["2001-05-18", 112],
["2001-05-19", 137],
["2001-05-20", 158],
["2001-05-21", 158],
["2001-05-22", 116],
["2001-05-23", 132],
["2001-05-24", 110],
["2001-05-25", 82],
["2001-05-26", 56],
["2001-05-27", 54],
["2001-05-28", 71],
["2001-05-29", 101],
["2001-05-30", 57],
["2001-05-31", 88],
["2001-06-01", 99],
["2001-06-02", 84],
["2001-06-03", 139],
["2001-06-04", 132],
["2001-06-05", 141],
["2001-06-07", 159],
["2001-06-08", 131],
["2001-06-09", 180],
["2001-06-10", 164],
["2001-06-11", 134],
["2001-06-12", 163],
["2001-06-13", 105],
["2001-06-14", 74],
["2001-06-15", 50],
["2001-06-16", 60],
["2001-06-17", 82],
["2001-06-18", 111],
["2001-06-19", 89],
["2001-06-20", 81],
["2001-06-21", 76],
["2001-06-22", 70],
["2001-06-23", 74],
["2001-06-24", 99],
["2001-06-25", 91],
["2001-06-26", 113],
["2001-06-27", 93],
["2001-06-28", 69],
["2001-06-29", 74],
["2001-06-30", 75],
["2001-07-01", 108],
["2001-07-02", 115],
["2001-07-03", 86],
["2001-07-04", 67],
["2001-07-05", 68],
["2001-07-06", 74],
["2001-07-07", 69],
["2001-07-08", 95],
["2001-07-09", 99],
["2001-07-10", 92],
["2001-07-11", 84],
["2001-07-12", 77],
["2001-07-13", 69],
["2001-07-14", 62],
["2001-07-15", 83],
["2001-07-16", 101],
["2001-07-17", 98],
["2001-07-18", 89],
["2001-07-19", 82],
["2001-07-20", 105],
["2001-07-21", 79],
["2001-07-22", 48],
["2001-07-23", 119],
["2001-07-24", 126],
["2001-07-25", 44],
["2001-07-26", 42],
["2001-07-27", 86],
["2001-07-28", 68],
["2001-07-29", 93],
["2001-07-30", 89],
["2001-07-31", 76],
["2001-08-01", 54],
["2001-08-02", 53],
["2001-08-03", 35],
["2001-08-04", 65],
["2001-08-05", 108],
["2001-08-06", 114],
["2001-08-07", 90],
["2001-08-08", 63],
["2001-08-09", 79],
["2001-08-10", 102],
["2001-08-11", 100],
["2001-08-12", 107],
["2001-08-13", 81],
["2001-08-14", 79],
["2001-08-15", 116],
["2001-08-16", 98],
["2001-08-17", 96],
["2001-08-18", 94],
["2001-08-19", 63],
["2001-08-20", 39],
["2001-08-21", 81],
["2001-08-22", 73],
["2001-08-23", 66],
["2001-08-24", 52],
["2001-08-25", 64],
["2001-08-26", 61],
["2001-08-27", 83],
["2001-08-28", 85],
["2001-08-29", 99],
["2001-08-30", 97],
["2001-08-31", 93],
["2001-09-01", 86],
["2001-09-02", 105],
["2001-09-03", 98],
["2001-09-04", 109],
["2001-09-05", 92],
["2001-09-06", 68],
["2001-09-07", 92],
["2001-09-08", 72],
["2001-09-09", 64],
["2001-09-10", 88],
["2001-09-11", 97],
["2001-09-12", 102],
["2001-09-13", 103],
["2001-09-14", 120],
["2001-09-15", 94],
["2001-09-16", 95],
["2001-09-17", 93],
["2001-09-18", 56],
["2001-09-19", 98],
["2001-09-20", 81],
["2001-09-21", 100],
["2001-09-22", 75],
["2001-09-23", 84],
["2001-09-24", 91],
["2001-09-25", 70],
["2001-09-26", 96],
["2001-09-27", 128],
["2001-09-28", 92],
["2001-09-29", 107],
["2001-09-30", 95],
["2001-10-01", 63],
["2001-10-02", 115],
["2001-10-03", 69],
["2001-10-04", 47],
["2001-10-05", 86],
["2001-10-06", 122],
["2001-10-07", 104],
["2001-10-08", 122],
["2001-10-09", 49],
["2001-10-10", 36],
["2001-10-11", 83],
["2001-10-12", 107],
["2001-10-13", 126],
["2001-10-14", 126],
["2001-10-15", 78],
["2001-10-16", 72],
["2001-10-17", 76],
["2001-10-18", 87],
["2001-10-19", 143],
["2001-10-20", 259],
["2001-10-21", 183],
["2001-10-22", 276],
["2001-10-23", 232],
["2001-10-24", 167],
["2001-10-25", 105],
["2001-10-26", 129],
["2001-10-27", 140],
["2001-10-28", 61],
["2001-10-29", 85],
["2001-10-30", 155],
["2001-11-01", 38],
["2001-11-02", 106],
["2001-11-03", 134],
["2001-11-04", 57],
["2001-11-05", 51],
["2001-11-06", 68],
["2001-11-07", 129],
["2001-11-08", 158],
["2001-11-09", 85],
["2001-11-10", 121],
["2001-11-11", 161],
["2001-11-12", 94],
["2001-11-13", 58],
["2001-11-14", 57],
["2001-11-15", 71],
["2001-11-16", 105],
["2001-11-17", 66],
["2001-11-18", 117],
["2001-11-19", 87],
["2001-11-20", 88],
["2001-11-21", 131],
["2001-11-22", 151],
["2001-11-23", 310],
["2001-11-24", 161],
["2001-11-25", 23],
["2001-11-26", 52],
["2001-11-27", 82],
["2001-11-28", 128],
["2001-11-29", 115],
["2001-11-30", 63],
["2001-12-02", 102],
["2001-12-03", 96],
["2001-12-04", 107],
["2001-12-05", 89],
["2001-12-06", 59],
["2001-12-07", 100],
["2001-12-08", 136],
["2001-12-09", 137],
["2001-12-10", 119],
["2001-12-11", 112],
["2001-12-12", 186],
["2001-12-13", 192],
["2001-12-14", 83],
["2001-12-15", 97],
["2001-12-16", 113],
["2001-12-18", 89],
["2001-12-19", 106],
["2001-12-20", 119],
["2001-12-21", 62],
["2001-12-22", 79],
["2001-12-23", 58],
["2001-12-24", 61],
["2001-12-25", 64],
["2001-12-26", 108],
["2001-12-27", 101],
["2001-12-28", 82],
["2001-12-29", 85],
["2001-12-30", 98],
["2001-12-31", 132],
["2002-01-01", 88],
["2002-01-02", 97],
["2002-01-03", 116],
["2002-01-04", 111],
["2002-01-05", 81],
["2002-01-06", 78],
["2002-01-07", 138],
["2002-01-08", 100],
["2002-01-09", 157],
["2002-01-10", 349],
["2002-01-11", 196],
["2002-01-12", 190],
["2002-01-13", 100],
["2002-01-14", 103],
["2002-01-15", 160],
["2002-01-16", 97],
["2002-01-17", 103],
["2002-01-18", 123],
["2002-01-19", 137],
["2002-01-20", 268],
["2002-01-21", 52],
["2002-01-22", 44],
["2002-01-23", 66],
["2002-01-24", 106],
["2002-01-25", 94],
["2002-01-26", 96],
["2002-01-27", 58],
["2002-01-28", 62],
["2002-01-29", 56],
["2002-01-30", 62],
["2002-01-31", 109],
["2002-02-01", 96],
["2002-02-02", 95],
["2002-02-03", 126],
["2002-02-04", 161],
["2002-02-05", 138],
["2002-02-06", 106],
["2002-02-07", 99],
["2002-02-08", 113],
["2002-02-09", 80],
["2002-02-10", 90],
["2002-02-11", 86],
["2002-02-12", 142],
["2002-02-13", 93],
["2002-02-14", 125],
["2002-02-15", 135],
["2002-02-16", 138],
["2002-02-17", 111],
["2002-02-18", 70],
["2002-02-19", 101],
["2002-02-20", 153],
["2002-02-21", 146],
["2002-02-22", 97],
["2002-02-23", 82],
["2002-02-24", 99],
["2002-02-25", 131],
["2002-02-26", 88],
["2002-02-27", 74],
["2002-02-28", 96],
["2002-03-01", 133],
["2002-03-02", 105],
["2002-03-03", 86],
["2002-03-04", 105],
["2002-03-05", 89],
["2002-03-06", 70],
["2002-03-07", 87],
["2002-03-08", 109],
["2002-03-09", 161],
| |
from cassandra.cluster import Cluster
from cassandra.protocol import NumpyProtocolHandler
from cassandra.auth import PlainTextAuthProvider
from dask.distributed import Client, LocalCluster
import copy
import dask
import dask.dataframe as dd
import logging
import pandas as pd
from sqlalchemy import sql
from sqlalchemy.sql import text
from threading import Event
class PagedResultHandler(object):
""" An handler for paged loading of a Cassandra's query result. """
def __init__(self, future):
"""
Initialization of PagedResultHandler
> handler = PagedResultHandler(future)
:param future: Future from Cassandra session asynchronous execution.
"""
self.error = None
self.finished_event = Event()
self.future = future
self.future.add_callbacks(callback=self.handle_page,
errback=self.handle_error)
self.df = None
def handle_page(self, rows):
"""
It pages the result of a Cassandra query.
> handle_page(rows)
:param rows: Cassandra's query result.
:return:
"""
if self.df is None:
self.df = rows
else:
self.df = self.df.append(rows, ignore_index=True)
if self.future.has_more_pages:
self.future.start_fetching_next_page()
else:
self.finished_event.set()
def handle_error(self, exc):
"""
It handles and exception.
> handle_error(exc)
:param exc: It is a Python Exception.
:return:
"""
self.error = exc
self.finished_event.set()
class Connector(object):
""" It sets and manages a connection to a Cassandra Cluster. """
def __init__(self, cassandra_clusters, cassandra_keyspace, username,
password):
"""
Initialization of CassandraConnector. It connects to a Cassandra cluster defined by a list of IPs.
If the connection is successful, it then establishes a session with a Cassandra keyspace.
> CassandraConnector(['10.0.1.1', '10.0.1.2'], 'test')
:param cassandra_clusters: It is a list of IPs with each IP represented as a string.
:param cassandra_keyspace: It is a string which contains an existent Cassandra keyspace.
:param username: It is a String.
:param password: It is a String.
"""
self.logger = logging.getLogger(__name__)
self.error = None
self.clusters = cassandra_clusters
self.keyspace = cassandra_keyspace
self.auth = None
def pandas_factory(colnames, rows):
return pd.DataFrame(rows, columns=colnames)
# Connect to Cassandra
self.logger.info("connecting to:" + str(self.clusters) + ".\n")
if username is None:
self.cluster = Cluster(self.clusters)
else:
self.auth = PlainTextAuthProvider(username=username,
password=password)
self.cluster = Cluster(self.clusters,
auth_provider=self.auth)
self.session = self.cluster.connect(self.keyspace)
# Configure session to return a Pandas dataframe
self.session.client_protocol_handler = NumpyProtocolHandler
self.session.row_factory = pandas_factory
# Tables
self.tables = dict()
return
def shutdown(self):
"""
Shutdowns the existing connection with a Cassandra cluster.
> shutdown()
"""
self.session.shutdown()
self.cluster.shutdown()
return
class Operators(object):
""" Operators for a valida SQL select statement over a Cassandra Table. """
def __init__(self):
"""
Initialization of CassandraOperators.
> CassandraOperators()
"""
self.logger = logging.getLogger(__name__)
self.error = None
self.warning = None
self.operators = [
"less_than_equal", "less_than", "greater_than_equal",
"greater_than", "equal", "between", "like", "in_", "notin_"
]
self.si_operators = [
"less_than_equal", "less_than", "greater_than_equal",
"greater_than", "equal", "like"
]
self.bi_operators = ["between"]
self.li_operators = ["in_", "notin_"]
return
@staticmethod
def create_predicate(table, col_name, op_name, values):
"""
It creates a single predicate over a table's column using an operator. Call CassandraOperators.print_operators()
to print all available operators.
> create_predicate(table, 'month', 'les_than', 1)
:param table: Instance of CassandraTable.
:param col_name: Table's column name as string.
:param op_name: Operators name as string.
:param values: List of values. The number of values depends on the operator.
"""
if op_name == "less_than_equal":
return table.predicate_cols[col_name] <= values[0]
elif op_name == "less_than":
return table.predicate_cols[col_name] < values[0]
elif op_name == "greater_than_equal":
return table.predicate_cols[col_name] >= values[0]
elif op_name == "greater_than":
return table.predicate_cols[col_name] > values[0]
elif op_name == "equal":
return table.predicate_cols[col_name] == values[0]
elif op_name == "between":
return table.predicate_cols[col_name].between(values[0], values[1])
elif op_name == "like":
return table.predicate_cols[col_name].like(values[0])
elif op_name == "in_":
return table.predicate_cols[col_name].in_(values)
elif op_name == "notin_":
return table.predicate_cols[col_name].notin_(values)
else:
raise Exception("Invalid operator!!!")
return
def print_operators(self):
"""
Print all the operators that can be used in a SQL select statement over a Cassandra's table.
> print_operators()
"""
print("The single value operators - op(x) - are: " +
str(self.si_operators) + ".")
print("The binary operators - op(x,y) - are: " +
str(self.bi_operators) + ".")
print("The list of values operators - op([x,y,...,z]) - are: " +
str(self.li_operators) + ".")
class LoadingQuery(object):
""" Class to define a SQL select statement over a Cassandra table. """
def __init__(self):
"""
Initialization of CassandraLoadingQuery
> CassandraLoadingQuery()
"""
self.logger = logging.getLogger(__name__)
self.error = None
self.warning = None
self.projections = None
self.and_predicates = None
self.sql_query = None
return
def set_projections(self, table, projections):
"""
It set the list of columns to be projected, i.e., selected.
> set_projections(table, ['id', 'year', 'month', 'day'])
:param table: Instance of class CassandraTable
:param projections: A list of columns names. Each column name is a String.
"""
if projections is None or len(projections) == 0:
self.logger.info("All columns will be projected!!!")
self.projections = projections
else:
for col in projections:
if col not in table.cols:
raise Exception(
"Invalid column, please use one of the following columns: "
+ str(table.cols) + "!!!")
self.projections = list(dict.fromkeys(projections))
return
def drop_projections(self):
"""
It drops the list of columns to be projected, i.e., selected.
> drop_projections()
"""
self.projections = None
return
def set_and_predicates(self, table, predicates):
"""
It sets a list of predicates with 'and' clause over the non partition columns of a Cassandra's table.
> set_and_predicates(table, [('month', 'less_than', 1), ('day', 'in\_', [1,2,3,8,12,30])])
:param table: Instance of class CassandraTable.
:param predicates: List of triples. Each triple contains column name as String,
operator name as String, and a list of values depending on the operator. CassandraOperators.print_operators()
prints all available operators. It should only contain columns which are not partition columns.
"""
if predicates is None or len(predicates) == 0:
self.logger.info(
"No predicates over the non primary key columns were defined!!!"
)
else:
operators = Operators()
for predicate in predicates:
(col, op, values) = predicate
if col not in table.predicate_cols:
raise Exception(
"Predicate: " + str(predicate) +
" has an primary key column. Pick a non-primary key column "
+ str(table.predicate_cols.keys() + "!!!\n"))
else:
if self.and_predicates is None:
self.and_predicates = [
operators.create_predicate(table, col, op, values)
]
else:
self.and_predicates.append(
operators.create_predicate(table, col, op, values))
return
def remove_and_predicates(self):
"""
It drops the list of predicates with 'and' clause over the non partition columns of a Cassandra's table.
> remove_and_predicates()
"""
self.and_predicates = None
return
@staticmethod
def partition_elimination(table, partitions_to_load, force):
"""
It does partition elimination when by selecting only a range of partition key values.
> partition_elimination( table, [(id, [1, 2, 3, 4, 5, 6]), ('year',[2019])] )
:param table: Instance of a CassandraTable
:param partitions_to_eliminate: List of tuples. Each tuple as a column name as String
and a list of keys which should be selected. It should only contain columns which are partition columns.
:param force: It is a boolean. In case all the partitions need to be loaded, which is not recommended,
it should be set to 'True'.
"""
part_cols_prun = dict.fromkeys(table.partition_cols)
if partitions_to_load is None or len(partitions_to_load) == 0:
if force is True:
return
else:
raise Exception(
"ATTENTION: All partitions will be loaded, query might be aborted!!!"
+ "To proceed re-call the function with force = True.")
else:
for partition in partitions_to_load:
(col, part_keys) = partition
if col not in table.partition_cols:
raise Exception(
"Column " + str(col) +
" is not a partition column. It should be one of " +
str(table.partition_cols) + ".")
else:
try:
part_cols_prun[col] = list(map(float, part_keys))
except Exception as e:
raise ("Invalid value in the partition keys list: " +
str(e) + " !!!")
for col in list(part_cols_prun.keys()):
if col in list(table.partition_cols):
if part_cols_prun[col] is not None:
table.partition_keys = table.partition_keys[
table.partition_keys[col].isin(part_cols_prun[col])]
return
def build_query(self, table):
"""
It builds and compiles the query which will be used to load data from a Cassandra table into a Dask Dataframe.
> build_query(table)
:param table: Instance of CassandraTable.
"""
if self.projections is None:
self.sql_query = sql.select([text('*')
]).select_from(text(table.name))
else:
self.sql_query = sql.select([text(f) for f in self.projections
]).select_from(text(table.name))
if self.and_predicates is not None:
self.sql_query = self.sql_query.where(
sql.expression.and_(*self.and_predicates))
return
def print_query(self):
"""
It prints the query which will be used to load data from a Cassandra table into a Dask Dataframe.
> print_query()
"""
if self.sql_query is None:
self.error = "The query needs first to be defined!!! "
self.finished_event.set()
else:
self.logger.info(
self.sql_query.compile(compile_kwargs={"literal_binds": True}))
return
class Table():
"""It stores and manages metadata and data from a Cassandra table loaded into a Dask DataFrame."""
def __init__(self, keyspace, name):
"""
Initialization of a CassandraTable.
> table = CassandraTable('test', 'tab1')
:param keyspace: It is a string which contains an existent Cassandra keyspace.
:param name: It | |
from collections import deque
import numpy as np
from typing import Tuple, Dict, Optional, Any, List
import logging
logger = logging.getLogger('sarbor')
Bounds = Tuple[np.ndarray, np.ndarray]
class Node:
"""
Basic Node datastructure, has basic getter and setter methods
"""
def __init__(self, **kwargs):
"""
node has only key, value, parent
"""
self._key = kwargs.get("key", None)
self._parent = kwargs.get("parent", None)
self._children = kwargs.get("children", None)
self._strahler = kwargs.get("strahler", None)
self._value = NodeData(
center=kwargs.get("center", None), mask=kwargs.get("mask", None)
)
def __str__(self):
return "nid: {}, {}".format(self.key, self.value)
@property
def key(self):
"""
A unique identifier for this Node
"""
if self._key is None:
raise ValueError("This node does not yet have a key")
else:
return self._key
@key.setter
def key(self, key):
if self._key is None:
self._key = key
else:
raise ValueError("Overwriting node keys is not supported")
@property
def value(self):
"""
A unique identifier for this Node
"""
if self._value is None:
self._value = NodeData()
return self._value
@value.setter
def value(self, value):
if self._value is None:
self._value = value
else:
raise ValueError("Overwriting node keys is not supported")
@property
def parent(self):
"""
This Nodes parent. (None if this node is the root of a tree)
"""
return self._parent
@property
def parent_key(self):
"""
Returns the parents key or None if no parent
"""
if self.parent is None:
return None
else:
return self.parent.key
@parent.setter
def parent(self, parent):
if self._parent is None:
self._parent = parent
else:
raise ValueError("Overwriting node parents is not supported")
@property
def children(self):
if self._children is None:
self._children = []
return self._children
def add_child(self, child):
self.children.append(child)
child.parent = self
def get_neighbors(self):
if self.parent is not None:
return [self.parent] + self.children
else:
return self.children
@property
def strahler(self) -> int:
if self._strahler is not None:
return self._strahler
else:
self._strahler = self._calculate_strahler()
return self._strahler
@strahler.setter
def strahler(self, strahler):
self._strahler = strahler
if self.value is not None:
self.value.strahler = strahler
def is_root(self) -> bool:
return self.parent is None
def is_branch(self) -> bool:
return len(self.get_neighbors()) > 2
def is_leaf(self) -> bool:
return len(self.children) == 0
def is_regular(self) -> bool:
normal = len(self.get_neighbors()) == 2
if normal:
assert not (self.is_branch() or self.is_leaf())
return normal
def _calculate_strahler(self) -> int:
if self.is_leaf():
return 1
child_strahlers = [child._strahler for child in self.children]
if any([strahler is None for strahler in child_strahlers]):
raise ValueError("A child did not have a strahler index")
max_child_strahler, count = 1, 0
for child_strahler in child_strahlers:
if child_strahler > max_child_strahler:
max_child_strahler, count = child_strahler, 1
elif child_strahler == max_child_strahler:
count += 1
if count > 1:
return max_child_strahler + 1
else:
return max_child_strahler
@property
def data(self):
"""
Return all the information necessary to build a clone of this node
(key, parent_key, strahler).
"""
data = {"nid": self.key, "pid": self.parent_key, "strahler": self.strahler}
if self.value is not None:
data.update(self.value.data)
return data
def get_following(self, previous):
"""
get the next node from the perspective of the previous.
i.e. given nodes:
a--b--c
b.get_following(a) = c
b.get_vollowing(c) = a
"""
neighbors = self.get_neighbors()
if len(neighbors) > 0 and previous in neighbors:
neighbors.remove(previous)
return neighbors
elif previous not in neighbors:
return neighbors
else:
raise Exception("This node has {} neighbors".format(len(neighbors)))
def traverse(self, ignore: List[int] = None):
if ignore is None:
ignore = []
queue = deque([self])
while len(queue) > 0:
current = queue.pop()
yield current
for child in sorted(current.children, key=lambda x: x.key, reverse=True):
if child.key not in ignore:
queue.append(child)
class Arbor:
"""
A basic arbor structure. Only has access to nodes and edges
and provides as much functionality it can with just that.
Any other data can be stored in the value parameter of
individual nodes.
"""
def __init__(self, root: Node = None):
"""
Initialize an empty tree
"""
self.root = root
self._nodes = {} # Dict[int, Node]
if root is not None:
self._nodes[root.key] = root
def search(self, key: int) -> Node:
for node in self.traverse():
if key == node.get_key():
return node
raise Exception("node {0} does not exist".format(key))
@property
def nodes(self) -> Dict[int, Node]:
if self._nodes is None:
self._nodes = {}
return self._nodes
def build_from_root(self, root: Node):
self.root = root
for node in self.traverse():
self.nodes[node.key] = node
def get_key_map(self):
key_map = {}
for node in self.traverse():
key_map[node.key] = node
return key_map
def traverse(self, fifo=True):
"""
Iterate over the elements of the tree
traversal options:
- first in first out (depth first)
- first in last out (bredth first)
"""
if self.root is None:
raise Exception("this arbor has no root")
else:
if fifo:
return self.depth_first_traversal()
else:
return self.breadth_first_traversal()
def traverse_segments(self):
"""
Traverses segments in a breadth first style to avoid recursion
"""
queue = deque([self.root])
while len(queue) > 0:
root = queue.popleft()
for child in sorted(root.children, key=lambda x: x.key):
segment = [root]
current = child
while len(current.children) == 1:
segment.append(current)
current = current.children[0]
segment.append(current)
if len(current.children) > 1:
queue.append(current)
yield segment
def get_minimal_subtree(self, ids):
"""
get the smallest possible subtree containing all given ids
"""
uncovered = ids
all_nodes = []
potential_queue = []
last = None
for node in self.traverse(True):
while node.parent != last and len(potential_queue) > 0:
del potential_queue[0]
if len(potential_queue) > 0:
last = potential_queue[0]
potential_queue.insert(0, node)
if node.key in uncovered:
uncovered.remove(node.key)
all_nodes = all_nodes + potential_queue
potential_queue = []
last = node
if last is not None:
last = node
assert len(uncovered) == 0, "Not all nodes were found. missing: {}".format(
uncovered
)
return all_nodes
def get_root_leaf_paths(self):
potential_queue = deque([])
last = None
for node in self.traverse(True):
while node.parent != last and len(potential_queue) > 0:
potential_queue.pop()
if len(potential_queue) > 0:
last = potential_queue[-1]
potential_queue.append(node)
last = node
if len(node.children) == 0:
yield potential_queue
if last is not None:
last = node
def breadth_first_traversal(self, ignore: Optional[List[int]] = None):
queue = deque([self.root])
while len(queue) > 0:
current = queue.pop()
yield current
for child in sorted(current.children, key=lambda x: x.key, reverse=True):
if ignore is None or child.key not in ignore:
queue.appendleft(child)
def depth_first_traversal(self, ignore: Optional[List[int]] = None):
queue = deque([self.root])
while len(queue) > 0:
current = queue.pop()
yield current
for child in sorted(current.children, key=lambda x: x.key, reverse=True):
if ignore is None or child.key not in ignore:
queue.append(child)
def get_interesting_nodes(self, root=False, leaves=False, branches=False):
if root or leaves or branches:
for node in self.traverse():
if root:
root = False
yield node
elif branches and node.is_branch():
yield node
elif leaves and node.is_leaf():
yield node
class NodeData:
"""
Contains the data for a node
"""
def __init__(self, **kwargs):
self._data = kwargs
def __str__(self):
return "center: {}".format(self.center)
@property
def data(self) -> Dict[str, Any]:
return self._data
@property
def center(self) -> np.ndarray:
"""
Get the center of a region.
"""
c = self.data.get("center", None)
if c is None:
return None
else:
return c
@center.setter
def center(self, center: np.ndarray):
if self.center is None:
self.data["center"] = center
else:
logger.debug(
"Overriding the center {} with {}".format(self.center, center)
)
@property
def mask(self) -> Optional[np.ndarray]:
m = self.data.get("mask", None)
if m is None:
# mask can be None
return None
else:
return m
@mask.setter
def mask(self, mask: np.ndarray):
if self.data.get("mask", None) is None:
self.data["mask"] = mask
else:
raise Exception("Overriding the mask is not supported")
def get_bounds(self, fov_shape: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Note fov_shape should be in nm cubed
"""
return (self.center - fov_shape // 2, self.center + fov_shape // 2 + 1)
def clone_center(self):
if self.center is not None:
return self.center.clone()
else:
return None
def clone_mask(self):
if self.mask is not None:
return self.mask.clone()
else:
return None
def clone(self):
return {"mask": self.clone_mask(), "center": self.clone_center()}
class SpatialArbor(Arbor):
"""
Class for storing and accessing local segmentations around each node
"""
def __init__(self):
Arbor.__init__(self)
self._bounds = None
@property
def node_bounds(self) -> Bounds:
"""
Bounds containing all node centers
"""
if self._bounds is None:
self._bounds = self.calculate_tree_bounds()
return self._bounds
def calculate_tree_bounds(self) -> Bounds:
"""
Find the minimum and maximum node center
"""
lower = np.array([float("inf"), float("inf"), float("inf")])
upper = -lower.copy()
for nid, node in self.nodes.items():
if node.value is not None:
upper = np.maximum(node.value.center, upper)
lower = np.minimum(node.value.center, lower)
return (lower.astype(int), upper.astype(int))
def get_radius(self, node, radius):
"""
get all nodes within a specific radius (physical distance) of a given node
radius can either be a | |
),
'plt_towlower':( 'int', None, 'cdecl', '*.towlower', (('int', None),) ),
'plt_towupper':( 'int', None, 'cdecl', '*.towupper', (('int', None),) ),
'plt_vdbgprintex':( 'int', None, 'stdcall', '*.vDbgPrintEx', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt_vdbgprintexwithprefix':( 'int', None, 'stdcall', '*.vDbgPrintExWithPrefix', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt_vsprintf':( 'int', None, 'cdecl', '*.vsprintf', (('int', None), ('int', None), ('int', None)) ),
'plt_wcscat':( 'int', None, 'cdecl', '*.wcscat', (('int', None), ('void *', 'ptr')) ),
'plt_wcschr':( 'int', None, 'cdecl', '*.wcschr', (('void *', 'ptr'), ('int', None)) ),
'plt_wcscmp':( 'int', None, 'cdecl', '*.wcscmp', (('void *', 'ptr'), ('int', None)) ),
'plt_wcscpy':( 'int', None, 'cdecl', '*.wcscpy', (('void *', 'dst'), ('void *', 'src')) ),
'plt_wcscspn':( 'int', None, 'cdecl', '*.wcscspn', (('void *', 'ptr'), ('int', None)) ),
'plt_wcslen':( 'int', None, 'cdecl', '*.wcslen', (('void *', 'ptr'),) ),
'plt_wcsncat':( 'int', None, 'cdecl', '*.wcsncat', (('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt_wcsncmp':( 'int', None, 'cdecl', '*.wcsncmp', (('int', None), ('int', None), ('int', None)) ),
'plt_wcsncpy':( 'int', None, 'cdecl', '*.wcsncpy', (('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt_wcspbrk':( 'int', None, 'cdecl', '*.wcspbrk', (('int', None), ('int', None)) ),
'plt_wcsrchr':( 'int', None, 'cdecl', '*.wcsrchr', (('int', None), ('int', None)) ),
'plt_wcsspn':( 'int', None, 'cdecl', '*.wcsspn', (('int', None), ('int', None)) ),
'plt_wcsstr':( 'int', None, 'cdecl', '*.wcsstr', (('void *', 'ptr'), ('int', None)) ),
'plt_wcstol':( 'int', None, 'cdecl', '*.wcstol', (('int', None), ('int', None), ('int', None)) ),
'plt_wcstombs':( 'int', None, 'cdecl', '*.wcstombs', (('int', None), ('void *', 'ptr'), ('int', None)) ),
'plt_wcstoul':( 'int', None, 'cdecl', '*.wcstoul', (('int', None), ('void *', 'ptr'), ('int', None)) ),
'plt__hread':( 'int', None, 'stdcall', '*._hread', (('int', None), ('void *', 'ptr'), ('int', None)) ),
'plt__hwrite':( 'int', None, 'stdcall', '*._hwrite', (('int', None), ('void *', 'ptr'), ('int', None)) ),
'plt__lclose':( 'int', None, 'stdcall', '*._lclose', (('int', None),) ),
'plt__lcreat':( 'int', None, 'stdcall', '*._lcreat', (('int', None), ('int', None)) ),
'plt__llseek':( 'int', None, 'stdcall', '*._llseek', (('int', None), ('int', None), ('int', None)) ),
'plt__lopen':( 'int', None, 'stdcall', '*._lopen', (('int', None), ('int', None)) ),
'plt__lread':( 'int', None, 'stdcall', '*._lread', (('int', None), ('void *', 'ptr'), ('int', None)) ),
'plt__lwrite':( 'int', None, 'stdcall', '*._lwrite', (('int', None), ('void *', 'ptr'), ('int', None)) ),
'plt_lstrcat':( 'int', None, 'stdcall', '*.lstrcat', (('int', None), ('int', None)) ),
'plt_lstrcata':( 'int', None, 'stdcall', '*.lstrcatA', (('int', None), ('int', None)) ),
'plt_lstrcatw':( 'int', None, 'stdcall', '*.lstrcatW', (('int', None), ('void *', 'ptr')) ),
'plt_lstrcmp':( 'int', None, 'stdcall', '*.lstrcmp', (('void *', 'str1'), ('void *', 'str2')) ),
'plt_lstrcmpa':( 'int', None, 'stdcall', '*.lstrcmpA', (('void *', 'str1'), ('void *', 'str2')) ),
'plt_lstrcmpw':( 'int', None, 'stdcall', '*.lstrcmpW', (('void *', 'str1'), ('void *', 'str2')) ),
'plt_lstrcmpi':( 'int', None, 'stdcall', '*.lstrcmpi', (('void *', 'str1'), ('void *', 'str2')) ),
'plt_lstrcmpia':( 'int', None, 'stdcall', '*.lstrcmpiA', (('void *', 'str1'), ('void *', 'str2')) ),
'plt_lstrcmpiw':( 'int', None, 'stdcall', '*.lstrcmpiW', (('void *', 'str1'), ('void *', 'str2')) ),
'plt_lstrcpy':( 'int', None, 'stdcall', '*.lstrcpy', (('int', None), ('int', None)) ),
'plt_lstrcpya':( 'int', None, 'stdcall', '*.lstrcpyA', (('int', None), ('int', None)) ),
'plt_lstrcpyw':( 'int', None, 'stdcall', '*.lstrcpyW', (('int', None), ('int', None)) ),
'plt_lstrcpyn':( 'int', None, 'stdcall', '*.lstrcpyn', (('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt_lstrcpyna':( 'int', None, 'stdcall', '*.lstrcpynA', (('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt_lstrcpynw':( 'int', None, 'stdcall', '*.lstrcpynW', (('int', None), ('int', None), ('int', None)) ),
'plt_lstrlen':( 'int', None, 'stdcall', '*.lstrlen', (('int', None),) ),
'plt_lstrlena':( 'int', None, 'stdcall', '*.lstrlenA', (('int', None),) ),
'plt_lstrlenw':( 'int', None, 'stdcall', '*.lstrlenW', (('int', None),) ),
'plt__getdays':( 'int', None, 'cdecl', '*._Getdays', () ),
'plt__getmonths':( 'int', None, 'cdecl', '*._Getmonths', () ),
'plt__gettnames':( 'int', None, 'cdecl', '*._Gettnames', () ),
'plt__huge':( 'int', None, 'cdecl', '*._HUGE', () ),
'plt__strftime':( 'int', None, 'cdecl', '*._Strftime', (('void *', 's'), ('int', 'max'), ('void *', 'fmt'), ('int', 'tm'), ('int', None)) ),
'plt__xcptfilter':( 'int', None, 'cdecl', '*._XcptFilter', (('int', None), ('int', None)) ),
'plt___cppxcptfilter':( 'int', None, 'cdecl', '*.__CppXcptFilter', (('int', None), ('int', None)) ),
'plt___cxxcallunwinddtor':( 'int', None, 'cdecl', '*.__CxxCallUnwindDtor', (('void *', 'funcptr'), ('int', None)) ),
'plt___cxxcallunwindvecdtor':( 'int', None, 'cdecl', '*.__CxxCallUnwindVecDtor', (('void *', 'funcptr'), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt___cxxdetectrethrow':( 'int', None, 'cdecl', '*.__CxxDetectRethrow', (('int', None),) ),
'plt___cxxexceptionfilter':( 'int', None, 'cdecl', '*.__CxxExceptionFilter', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt___cxxframehandler':( 'int', None, 'bfastcall', '*.__CxxFrameHandler', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt___cxxlongjmpunwind':( 'int', None, 'stdcall', '*.__CxxLongjmpUnwind', (('int', None),) ),
'plt___cxxqueryexceptionsize':( 'int', None, 'cdecl', '*.__CxxQueryExceptionSize', () ),
'plt___cxxregisterexceptionobject':( 'int', None, 'cdecl', '*.__CxxRegisterExceptionObject', (('int', None), ('void *', 'ptr')) ),
'plt___cxxunregisterexceptionobject':( 'int', None, 'cdecl', '*.__CxxUnregisterExceptionObject', (('int', None), ('int', None), ('int', None)) ),
'plt___destructexceptionobject':( 'int', None, 'cdecl', '*.__DestructExceptionObject', (('int', None),) ),
'plt___rtcasttovoid':( 'int', None, 'cdecl', '*.__RTCastToVoid', () ),
'plt___rtdynamiccast':( 'int', None, 'cdecl', '*.__RTDynamicCast', ( ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt___rttypeid':( 'int', None, 'cdecl', '*.__RTtypeid', (('int', None),) ),
'plt___stringtold':( 'int', None, 'cdecl', '*.__STRINGTOLD', (('int', None), ('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt____lc_codepage_func':( 'int', None, 'cdecl', '*.___lc_codepage_func', () ),
'plt____lc_collate_cp_func':( 'int', None, 'cdecl', '*.___lc_collate_cp_func', () ),
'plt____lc_handle_func':( 'int', None, 'cdecl', '*.___lc_handle_func', () ),
'plt____mb_cur_max_func':( 'int', None, 'cdecl', '*.___mb_cur_max_func', () ),
'plt____setlc_active_func':( 'int', None, 'cdecl', '*.___setlc_active_func', () ),
'plt____unguarded_readlc_active_add_func':( 'int', None, 'cdecl', '*.___unguarded_readlc_active_add_func', () ),
'plt___argc':( 'int', None, 'cdecl', '*.__argc', () ),
'plt___argv':( 'int', None, 'cdecl', '*.__argv', () ),
'plt___badioinfo':( 'int', None, 'cdecl', '*.__badioinfo', () ),
'plt___buffer_overrun':( 'int', None, 'cdecl', '*.__buffer_overrun', () ),
'plt___crtcomparestringa':( 'int', None, 'cdecl', '*.__crtCompareStringA', (('int', 'Locale'), ('int', 'dwCmpFlags'), ('void *', 'lpString1'), ('int', 'cchCount1'), ('void *', 'lpString2'), ('int', 'cchCount2'), ('int', 'code_page')) ),
'plt___crtcomparestringw':( 'int', None, 'cdecl', '*.__crtCompareStringW', (('int', 'Locale'), ('int', 'dwCmpFlags'), ('void *', 'lpString1'), ('int', 'cchCount1'), ('void *', 'lpString2'), ('int', 'cchCount2'), ('int', 'code_page')) ),
'plt___crtgetlocaleinfow':( 'int', None, 'cdecl', '*.__crtGetLocaleInfoW', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt___crtgetstringtypew':( 'int', None, 'cdecl', '*.__crtGetStringTypeW', (('int', None), ('int', None), ('int', None), ('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt___crtlcmapstringa':( 'int', None, 'cdecl', '*.__crtLCMapStringA', (('int', 'LocalName'), ('int', 'dwMapFlags'), ('void *', 'lpSrcStr'), ('int', 'cchSrc'), ('void *', 'lpDstStr'), ('int', 'cchDst'), ('int', 'code_page'), ('int', 'bError')) ),
'plt___crtlcmapstringw':( 'int', None, 'cdecl', '*.__crtLCMapStringW', (('int', None), ('int', None), ('int', None), ('int', None), ('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt___dllonexit':( 'int', None, 'cdecl', '*.__dllonexit', (('int', None), ('int', None), ('int', None)) ),
'plt___doserrno':( 'int', None, 'cdecl', '*.__doserrno', () ),
'plt___fpecode':( 'int', None, 'cdecl', '*.__fpecode', () ),
'plt___getmainargs':( 'int', None, 'cdecl', '*.__getmainargs', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt___initenv':( 'int', None, 'cdecl', '*.__initenv', () ),
'plt___iob_func':( 'int', None, 'cdecl', '*.__iob_func', () ),
'plt___isascii':( 'int', None, 'cdecl', '*.__isascii', (('int', None),) ),
'plt___iscsym':( 'int', None, 'cdecl', '*.__iscsym', (('int', None),) ),
'plt___iscsymf':( 'int', None, 'cdecl', '*.__iscsymf', (('int', None),) ),
'plt___lc_clike':( 'int', None, 'bfastcall', '*.__lc_clike', (('int', None),) ),
'plt___lc_codepage':( 'int', None, 'cdecl', '*.__lc_codepage', () ),
'plt___lc_collate_cp':( 'int', None, 'cdecl', '*.__lc_collate_cp', () ),
'plt___lc_handle':( 'int', None, 'cdecl', '*.__lc_handle', () ),
'plt___lconv_init':( 'int', None, 'cdecl', '*.__lconv_init', () ),
'plt___mb_cur_max':( 'int', None, 'bfastcall', '*.__mb_cur_max', (('int', None),) ),
'plt___p___argc':( 'int', None, 'cdecl', '*.__p___argc', () ),
'plt___p___argv':( 'int', None, 'cdecl', '*.__p___argv', () ),
'plt___p___initenv':( 'int', None, 'cdecl', '*.__p___initenv', () ),
'plt___p___mb_cur_max':( 'int', None, 'cdecl', '*.__p___mb_cur_max', () ),
'plt___p___wargv':( 'int', None, 'cdecl', '*.__p___wargv', () ),
'plt___p___winitenv':( 'int', None, 'cdecl', '*.__p___winitenv', () ),
'plt___p__acmdln':( 'int', None, 'cdecl', '*.__p__acmdln', () ),
'plt___p__amblksiz':( 'int', None, 'cdecl', '*.__p__amblksiz', () ),
'plt___p__commode':( 'int', None, 'cdecl', '*.__p__commode', () ),
'plt___p__daylight':( 'int', None, 'cdecl', '*.__p__daylight', () ),
'plt___p__dstbias':( 'int', None, 'cdecl', '*.__p__dstbias', () ),
'plt___p__environ':( 'int', None, 'cdecl', '*.__p__environ', () ),
'plt___p__fileinfo':( 'int', None, 'cdecl', '*.__p__fileinfo', () ),
'plt___p__fmode':( 'int', None, 'cdecl', '*.__p__fmode', () ),
'plt___p__iob':( 'int', None, 'cdecl', '*.__p__iob', () ),
'plt___p__mbcasemap':( 'int', None, 'cdecl', '*.__p__mbcasemap', () ),
'plt___p__mbctype':( 'int', None, 'cdecl', '*.__p__mbctype', () ),
'plt___p__osver':( 'int', None, 'cdecl', '*.__p__osver', () ),
'plt___p__pctype':( 'int', None, 'cdecl', '*.__p__pctype', () ),
'plt___p__pgmptr':( 'int', None, 'cdecl', '*.__p__pgmptr', () ),
'plt___p__pwctype':( 'int', None, 'cdecl', '*.__p__pwctype', () ),
'plt___p__timezone':( 'int', None, 'cdecl', '*.__p__timezone', () ),
'plt___p__tzname':( 'int', None, 'cdecl', '*.__p__tzname', () ),
'plt___p__wcmdln':( 'int', None, 'cdecl', '*.__p__wcmdln', () ),
'plt___p__wenviron':( 'int', None, 'cdecl', '*.__p__wenviron', () ),
'plt___p__winmajor':( 'int', None, 'cdecl', '*.__p__winmajor', () ),
'plt___p__winminor':( 'int', None, 'cdecl', '*.__p__winminor', () ),
'plt___p__winver':( 'int', None, 'cdecl', '*.__p__winver', () ),
'plt___p__wpgmptr':( 'int', None, 'cdecl', '*.__p__wpgmptr', () ),
'plt___pctype_func':( 'int', None, 'cdecl', '*.__pctype_func', () ),
'plt___pioinfo':( | |
<reponame>vvlink/TinyWebIO
__version__ = "TinyWebIO v0.0.8"
__author__ = "<EMAIL>"
__license__ = "http://unlicense.org"
import mpython, music, socket, network, json, gc
from ubinascii import hexlify
from time import time
from urequests import request
from machine import Timer, unique_id
class Request:
def __init__(self, socket=None):
self.client_socket = socket
self.method = None
self.path = None
self.form = {"tag": None, "value": None, "fmt": None}
def _unquote(self, str):
res = []
r = str.split('%')
res = res + [ord(c) for c in r[0]]
for i in range(1, len(r)):
s = r[i]
try:
r_first = int(s[:2], 16)
res.append(r_first)
r_last = s[2:]
except Exception:
r_last = '%' + s
if len(r_last) > 0:
res = res + [ord(c) for c in r_last]
return bytes(res).decode()
def parse(self):
gc.collect()
try:
req_data = self.client_socket.recv(4096).decode().replace('+', ' ') # .strip()
if not req_data:
raise Exception('no data')
req_datas = req_data.split('\r\n')
firstline = self._unquote(req_datas[0])
lastline = self._unquote(req_datas[-1])
if not lastline:
for item in req_datas[1:]:
if item.lower().strip().startswith('content-length'):
size = int(item.split(':')[-1])
if size > 0:
lastline = self.client_socket.recv(size).decode().strip()
break
cmd = firstline.split()
self.method = cmd[0]
_path = cmd[1].split('?', 1)
if len(_path) > 1:
self.method = 'POST'
self._set_form(_path[-1])
self.path = _path[0]
if len(lastline) > 0:
self._set_form(lastline)
except Exception as e:
print("request data parsed failure!:%s" % e)
def _set_form(self, data):
params = data.split('&')
for p in params:
k, v = p.split('=')
self.form[k] = v.strip('"')
class Response:
def __init__(self, socket=None):
self.client_socket = socket
self.response_data = None
self.response_state = b'HTTP/1.0 200 OK\r\n'
self.data_type = None
def make(self, data):
gc.collect()
firstline = self.response_state
if self.data_type == 'html':
header = b'Content-Type: text/html; charset=utf-8\r\n'
else:
header = b'Content-Type: application/jsonrequest\r\nAccess-Control-Allow-Origin: *\r\n'
body = b'\r\n%s' % json.dumps(data)
self.response_data = firstline + header + body
def make_page(self, title=None, content=None):
gc.collect()
firstline = self.response_state
header = b'Content-Type: text/html; charset=utf-8\r\n'
template = b'''<html><head><title>{title}</title><meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0"><meta charset='utf-8'><link rel="icon" href="data:;base64,=">
<style>{style}</style></head><body class='c'><p><a href='/'>{home}</a></p>{content}</body></html>
'''
style = "*+*{box-sizing:border-box;margin:.5em 0}@media(min-width:35em){.col{display:table-cell}.row{display:table;border-spacing:1em 0}}.row,.w-100{width:100%}.card:focus,hr{outline:0;border:solid #fa0}.card,pre{padding:1em;border:solid #eee}.btn:hover,a:hover{opacity:.6}.c{max-width:60em;padding:1em;margin:auto;font:1em/1.6 nunito}a{color:#fa0;text-decoration:none}.btn.primary{color:#fff;background:#fa0;border:solid #fa0}pre{overflow:auto}td,th{padding:1em;text-align:left;border-bottom:solid #eee}.btn{cursor:pointer;padding:1em;letter-spacing:.1em;text-transform:uppercase;background:#fff;border:solid;font:.7em nunito}"
data = template.format(title=title, style=style, home=__version__, content=content)
body = b'\r\n%s' % data
self.response_data = firstline + header + body
def send(self):
if self.response_data:
try:
self.client_socket.write(self.response_data)
except Exception as e:
print("response data sent failure!:%s" % e)
class Board:
def __init__(self):
self.scope = {}
self.error = None
self.id = hexlify(unique_id()).decode()
def _get_real_tag(self, raw_tag):
real_tag = raw_tag
raw_tag_lower = raw_tag.lower().strip()
for tag_name in ['button_a',
'button_b',
'touchPad_P',
'touchPad_Y',
'touchPad_T',
'touchPad_H',
'touchPad_O',
'touchPad_N']:
real_tag_lower = tag_name.lower().replace('_', '')
if real_tag_lower == raw_tag_lower:
real_tag = tag_name
return real_tag
if raw_tag_lower.startswith('pin'):
pin_name = 'MPythonPin'
pin_mode = 'digital' if raw_tag_lower[3] == 'd' else 'analog'
pin_num = raw_tag_lower[4:]
real_tag = '%s_%s_%s' % (pin_name, pin_mode, pin_num)
return real_tag
def read(self, raw_tag):
gc.collect()
tag = self._get_real_tag(raw_tag)
value = ''
try:
if tag in ['button_a',
'button_b',
'touchPad_P',
'touchPad_Y',
'touchPad_T',
'touchPad_H',
'touchPad_O',
'touchPad_N',
'light',
'sound']:
sensor = getattr(mpython, tag)
method = 'read' if hasattr(sensor, 'read') else 'value'
value = '%d' % getattr(sensor, method)()
elif tag == 'accelerometer':
accelerometer = getattr(mpython, 'accelerometer')
value = '%f,%f,%f' % (accelerometer.get_x(),
accelerometer.get_y(),
accelerometer.get_z())
elif tag.startswith('MPythonPin'):
pname, pmode, pnum = tag.split('_')
pfun = getattr(mpython, pname)
n = int(pnum)
if pmode == 'digital':
pin = pfun(n, 1)
value = '%d' % pin.read_digital()
elif pmode == 'analog':
pin = pfun(n, 4)
value = '%d' % pin.read_analog()
elif tag.startswith('id'):
value = self.id
elif tag.startswith('time'):
value = time()
except Exception as e:
print("error for board data reading!:%s" % e)
return ["VALUE", raw_tag, value]
def write(self, raw_tag, raw_value):
gc.collect()
tag = self._get_real_tag(raw_tag)
value = raw_value.strip()
try:
if tag.startswith('rgb'):
led = getattr(mpython, 'rgb')
num = 0 if tag[3:] == '' else int(tag[3:])
n = num if num < 3 and num >= 0 else 2
r, g, b = value.split(',')
led[n] = tuple([int(r), int(g), int(b)])
led.write()
elif tag.startswith('display') or tag.startswith('oled'):
oled = getattr(mpython, tag.strip())
values = value.split(':', 1)
method = values[0].strip()
vdata = values[-1].strip()
if method == 'show':
content, x, y = vdata.split(',')
oled.DispChar(content, int(x.strip()), int(y.strip()))
elif method == 'fill':
oled.fill(int(vdata))
else:
oled.DispChar(values[0].strip(), 0, 0)
oled.show()
elif tag.startswith('buzz'):
bz = getattr(mpython, 'buzz')
method = value.strip()
if method.startswith('on'):
param = method.split(':', 1)[-1].strip()
freq = 500 if param == 'on' else int(param)
bz.on(freq)
elif method.startswith('off'):
bz.off()
else:
freq = method.split(':', 1)[0].strip()
bz.on(int(freq))
elif tag.startswith('music'):
method = value.strip()
if method.startswith('pitch'):
param = method.split(':', 1)[-1].strip()
freq, duration = param.split(',')
music.pitch(int(freq), int(duration))
else:
tune = value.upper().split(',')
if len(tune) == 1:
tune_builtin = tune[0].strip()
if hasattr(music, tune_builtin):
tune = getattr(music, tune_builtin)
music.play(tune)
elif tag.startswith('servo'):
servo = getattr(mpython, 'Servo')
pin = int(tag[5:])
param = value.strip()
servo(pin).write_angle(int(param))
elif tag.startswith('MPythonPin'):
pname, pmode, pnum = tag.split('_')
pfun = getattr(mpython, pname)
n = int(pnum)
val = int(value)
if pmode == 'digital':
pin = pfun(n, 2)
pin.write_digital(val)
elif pmode == 'analog':
pin = pfun(n, 3)
pin.write_analog(val)
elif tag.startswith('client'):
cmd = getattr(appclient, value.strip())
cmd()
except Exception as e:
print(" error for board data writing!:%s" % e)
return ["STORED", raw_tag, value]
class Server:
def __init__(self):
self.handlers = {}
self.server_socket = None
self.client_socket = None
def _start_server(self, port=8888, accept_handler=None):
self.stop()
gc.collect()
try:
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ai = socket.getaddrinfo("0.0.0.0", port)
server_addr = ai[0][-1]
s.bind(server_addr)
s.listen(5)
if accept_handler:
s.setsockopt(socket.SOL_SOCKET, 20, accept_handler)
self.server_socket = s
for i in (network.AP_IF, network.STA_IF):
iface = network.WLAN(i)
if iface.active():
suc_info = 'tinywebio started on http://'
print("%s%s:%d" % (suc_info, iface.ifconfig()[0], port))
except Exception as e:
print("appserver error occurred!:%s" % e)
return self.server_socket
def start(self, port=8888):
self._start_server(port, self.connect_client)
def start_foreground(self, port=8888):
socket = self._start_server(port, None)
if socket:
while True:
self.connect_client(socket)
def route(self, url):
def wrapper(func):
self.handlers[url] = func
return func
return wrapper
def connect_client(self, socket):
csock, caddr = socket.accept()
self.client_socket = csock
request = Request(csock)
response = Response(csock)
board = Board()
try:
self.process_data(request, response, board)
except Exception as e:
print("client connection failure!:%s" % e)
finally:
self.client_socket.close()
csock.close()
def process_data(self, request, response, board):
request.parse()
path = request.path
if path:
handler = self.get_handler(path)
if handler:
handler(request, response, board)
def get_handler(self, path):
if path in self.handlers.keys():
return self.handlers[path]
else:
return None
def stop(self):
if self.server_socket:
self.server_socket.close()
if self.client_socket:
self.client_socket.close()
class Remote:
def __init__(self, url, lasttask):
self.lasttask = lasttask
self.method = 'POST'
self.headers = {'Content-Type': 'application/x-www-form-urlencoded'}
self.puburl = '%s/storeavalue' % url
self.suburl = '%s/getvalue' % url
def pub(self, tag, value):
_val = value[-1]
_data = 'tag=%s&value=%s' % (tag, _val)
request(self.method, self.puburl, data=_data, headers=self.headers)
def sub(self, tag):
_data = 'tag=%s' % tag
_res = request(self.method, self.suburl, data=_data, headers=self.headers)
_val = json.loads(_res.text)[-1].strip('"')
value = None
if not _val == self.lasttask[tag]:
self.lasttask[tag] = _val
value = _val.split('_')[0]
return value
class Client:
def __init__(self):
self.url = None
self.topic = {'pub': [], 'sub': []}
self.interval = 1000
self.tim = None
self.lasttask = {}
self.currtask = None
self.tasks = {}
def task(self, name):
def wrapper(func):
self.tasks[name] = func
return func
return wrapper
def setup(self, url=None, pub='', sub='', interval=1000):
self.url = 'http://%s' % url
self.topic['pub'] = pub.split(',')
self.topic['sub'] = sub.split(',')
for _topic in self.topic['sub']:
self.lasttask[_topic.strip()] = None
self.interval = interval
print('TinywebDB server address:%s/%s' % (self.url, hexlify(unique_id()).decode()))
def exec(self):
_board = Board()
_url = '%s/%s' % (self.url, _board.id)
_remote = Remote(_url, self.lasttask)
_topic = self.topic
_tasks = self.tasks
while True:
try:
for _id in _tasks.keys():
_task = _tasks[_id]
if _task:
_task(_remote, _topic, _board)
except Exception as e:
print('executing task failure:%s' % e)
gc.collect()
yield
def start(self):
self.stop()
try:
# settime()
self.tim = Timer(1)
self.currtask = self.exec()
next(self.currtask)
if len(self.url) > 0 and (len(self.topic['pub']) > 0 or len(self.topic['sub']) > 0):
self.tim.init(period=self.interval, mode=Timer.PERIODIC, callback=lambda t: self.currtask.send(None))
except Exception as e:
print('starting client failure:%s' % e)
def stop(self):
if self.currtask:
self.currtask.close()
if self.tim:
self.tim.deinit()
gc.collect()
appclient = Client()
appserver = Server()
gc.collect()
@appserver.route('/')
def index(request, response, board):
title = '%s' % __version__
content = '''
<p>\u529f\u80fd\u5217\u8868
<ul>
<li><a href="/storeavalue">/storeavalue</a>: \u5199\u5165\u6570\u636e</li>
<li><a href="/getvalue">/getvalue</a>: \u8bfb\u53d6\u6570\u636e</li>
</ul>
</p>
'''
response.make_page(title, content)
response.send()
@appserver.route('/getvalue')
def getValue(request, response, board):
if request.method == 'POST':
tag = request.form['tag']
fmt = request.form['fmt']
result = board.read(tag)
response.data_type = fmt
response.make(result)
response.send()
elif request.method == 'GET':
title = '\u8bfb\u53d6\u6570\u636e'
content = '''
<form action="/getvalue" method="post" enctype=application/x-www-form-urlencoded target="result">
<label for="tag">\u6807\u7b7e</label><input type="text" name="tag" class="card w-100" | |
#!/usr/bin/python
import os
import sys
import copy
import logging
import subprocess
import argparse
import pprint
import shutil
from yggdrasil.constants import LANGUAGES, LANGUAGES_WITH_ALIASES
logger = logging.getLogger(__name__)
package_dir = os.path.dirname(os.path.abspath(__file__))
def githook():
r"""Git hook to determine if the Github workflow need to be
re-generated."""
try:
files = subprocess.check_output(
["git", "diff-index", "--cached", "--name-only",
"--diff-filter=ACMRTUXB", "HEAD"],
stderr=subprocess.PIPE).decode('utf-8').splitlines()
except subprocess.CalledProcessError:
return 1
regen = (os.path.join('utils', 'test-install-base.yml') in files)
if regen:
try:
gitdir = subprocess.check_output(
["git", "rev-parse", "--git-dir"],
stderr=subprocess.PIPE).decode('utf-8').strip()
except subprocess.CalledProcessError:
return 1
workflow_dir = os.path.join(gitdir, '..', '.github', 'workflows')
generate_gha_workflow(args=[], gitdir=gitdir)
try:
subprocess.run(
['git', 'add',
os.path.join(workflow_dir, 'test-install.yml')],
check=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
except subprocess.CalledProcessError:
return 1
return 0
class ArgumentTuple(tuple):
def __new__(self, args, kwargs):
return tuple.__new__(ArgumentTuple, (args, kwargs))
class ConditionalArgumentTuple(ArgumentTuple):
def __new__(cls, args, kwargs, conditions=None):
out = ArgumentTuple.__new__(ConditionalArgumentTuple,
args, kwargs)
out.conditions = conditions
if out.conditions is None:
out.conditions = {}
return out
class ArgumentBase(object):
def __init__(self, arguments=None, conditions=None, **kwargs):
self.arguments = arguments
if self.arguments is None:
self.arguments = []
self.conditions = conditions
if self.conditions is None:
self.conditions = {}
self.kwargs = kwargs
class ArgumentParser(ArgumentBase):
pass
class ArgumentGroup(ArgumentBase):
def __init__(self, exclusive=False, **kwargs):
self.exclusive = exclusive
super(ArgumentGroup, self).__init__(**kwargs)
class ArgumentSubparser(ArgumentBase):
def __init__(self, parsers=None, **kwargs):
self.parsers = parsers
if self.parsers is None:
self.parsers = []
super(ArgumentSubparser, self).__init__(**kwargs)
class SubCommandMeta(type):
r"""Meta class for subcommands."""
def __call__(cls, *args, **kwargs):
return cls.call(*args, **kwargs)
def ReplacementWarning(old, new):
import warnings
warnings.warn(("'%s' will soon be removed. Use '%s' instead.")
% (old, new), FutureWarning)
class SubCommand(metaclass=SubCommandMeta):
r"""Class for handling subcommands so that they can be run
as subcommands or individually."""
name = None
help = None
arguments = []
allow_unknown = False
@classmethod
def parse_args(cls, parser, args=None, allow_unknown=False):
# TODO: Check choices for positional arguments that can
# have more than one element
if isinstance(args, argparse.Namespace):
return args
if cls.allow_unknown or allow_unknown:
args, extra = parser.parse_known_args(args=args)
args._extra_commands = extra
else:
args = parser.parse_args(args=args)
for k in ['language', 'languages']:
v = getattr(args, k, None)
if isinstance(v, list):
v_flag = getattr(args, k + '_flag', None)
if isinstance(v_flag, list):
v.extend(v_flag)
if (len(v) == 0) or ('all' in v):
setattr(args, k, LANGUAGES['all'])
args.all_languages = True
return args
@classmethod
def func(cls, args): # pragma: debug
raise NotImplementedError
@classmethod
def call(cls, args=None, **kwargs):
parser = cls.get_parser(args=args)
args = cls.parse_args(parser, args=args)
return cls.func(args, **kwargs)
@classmethod
def get_parser(cls, args=None):
parser = argparse.ArgumentParser(cls.help)
cls.add_arguments(parser, args=args)
return parser
@classmethod
def add_argument_to_parser(cls, parser, x):
if hasattr(x, 'conditions') and x.conditions:
for k, v in x.conditions.items():
if k == 'os':
from yggdrasil import platform
if platform._platform not in v:
return
else: # pragma: debug
raise NotImplementedError(k)
if isinstance(x, list):
for xx in x:
cls.add_argument_to_parser(parser, xx)
elif isinstance(x, (tuple, ArgumentTuple,
ConditionalArgumentTuple)):
assert(len(x) == 2)
args, kwargs = x[:]
try:
parser.add_argument(*args, **kwargs)
except ValueError:
if kwargs.get('action', None) == 'extend':
kwargs['action'] = 'append'
kwargs.pop('nargs', None)
parser.add_argument(*args, **kwargs)
else:
raise
elif isinstance(x, ArgumentGroup):
if x.exclusive:
group = parser.add_mutually_exclusive_group(**x.kwargs)
else:
group = parser.add_argument_group(**x.kwargs)
cls.add_argument_to_parser(group, x.arguments)
elif isinstance(x, ArgumentSubparser):
subparsers = parser.add_subparsers(**x.kwargs)
for xx in x.parsers:
isubparser = subparsers.add_parser(**xx.kwargs)
cls.add_argument_to_parser(isubparser, x.arguments)
cls.add_argument_to_parser(isubparser, xx.arguments)
else:
raise NotImplementedError("type(x) = %s" % type(x))
@classmethod
def add_arguments(cls, parser, args=None):
cls.add_argument_to_parser(parser, cls.arguments)
@classmethod
def add_subparser(cls, subparsers, args=None):
parser = subparsers.add_parser(cls.name, help=cls.help)
cls.add_arguments(parser, args=args)
parser.set_defaults(func=cls.func)
class main(SubCommand):
r"""Runner for yggdrasil CLI."""
name = "yggdrasil"
help = (
"Command line interface for the yggdrasil package.")
arguments = []
@classmethod
def get_parser(cls, **kwargs):
from yggdrasil import __version__ as ver
parser = super(main, cls).get_parser(**kwargs)
parser.add_argument('--version', action='version',
version=('yggdrasil %s' % ver))
subparsers = parser.add_subparsers(title='subcommands',
dest='subcommand')
parser._ygg_subparsers = {}
for x in [yggrun, ygginfo, validate_yaml,
yggcc, yggcompile, yggclean,
ygginstall, update_config,
regen_metaschema, regen_schema,
yggmodelform, yggdevup, run_tsts,
timing_plots, generate_gha_workflow]:
x.add_subparser(subparsers, args=kwargs.get('args', None))
parser._ygg_subparsers[x.name] = x
return parser
@classmethod
def parse_args(cls, parser, args=None, **kwargs):
if args is None:
args = sys.argv[1:]
if isinstance(args, list) and ('test' in args):
kwargs['allow_unknown'] = True
args = super(main, cls).parse_args(parser, args=args, **kwargs)
if args.subcommand:
args = parser._ygg_subparsers[args.subcommand].parse_args(
parser, args=args, **kwargs)
return args
@classmethod
def func(cls, args):
args.func(args)
class yggrun(SubCommand):
r"""Start a run."""
name = "run"
help = "Run an integration."
arguments = [
(('yamlfile', ),
{'nargs': '+',
'help': "One or more yaml specification files."})]
@classmethod
def add_arguments(cls, parser, **kwargs):
from yggdrasil import config
super(yggrun, cls).add_arguments(parser, **kwargs)
config.get_config_parser(parser, skip_sections='testing')
@classmethod
def func(cls, args):
from yggdrasil import runner, config
prog = sys.argv[0].split(os.path.sep)[-1]
with config.parser_config(args):
runner.run(args.yamlfile, ygg_debug_prefix=prog,
production_run=args.production_run)
class ygginfo(SubCommand):
r"""Print information about yggdrasil installation."""
name = 'info'
help = ('Display information about the current yggdrasil '
'installation.')
arguments = [
(('--no-languages', ),
{'action': 'store_true', 'dest': 'no_languages',
'help': ('Don\'t print information about individual '
'languages.')}),
(('--no-comms', ),
{'action': 'store_true', 'dest': 'no_comms',
'help': ('Don\'t print information about individual '
'comms.')}),
(('--verbose', '-v'),
{'action': 'store_true',
'help': ('Increase the verbosity of the printed '
'information.')}),
ArgumentSubparser(
title='tool', dest='tool',
description='Compilation tool types to get info about.',
arguments=[
(('language', ),
{'choices': LANGUAGES_WITH_ALIASES['compiled'],
'type': str.lower,
'help': 'Language to get tool information for.'}),
(('--toolname', ),
{'default': None,
'help': ('Name of tool to get information for. '
'If not provided, information for the '
'default tool will be returned.')}),
(('--flags', ),
{'action': 'store_true',
'help': ('Display the flags that yggdrasil will '
' pass to the tool when it is called.')}),
(('--fullpath', ),
{'action': 'store_true',
'help': 'Get the full path to the tool exectuable.'})],
parsers=[
ArgumentParser(
name='compiler',
help='Get information about a compiler.'),
ArgumentParser(
name='linker',
help='Get information about a linker.',
arguments=[
(('--library', ),
{'action': 'store_true',
'help': 'Get flags for linking a library.'})]),
ArgumentParser(
name='archiver',
help='Get information about a archiver.')])]
@classmethod
def func(cls, args, return_str=False):
from yggdrasil import platform
from yggdrasil.components import import_component
if args.tool:
drv = import_component('model', args.language)
if args.flags:
if args.tool == 'compiler':
flags = drv.get_compiler_flags(
for_model=True, toolname=args.toolname,
dry_run=True, dont_link=True)
if '/link' in flags: # pragma: windows
flags = flags[:flags.index('/link')]
for k in ['-c']:
if k in flags:
flags.remove(k)
else:
if args.tool == 'archiver':
libtype = 'static'
elif getattr(args, 'library', False):
libtype = 'shared'
else:
libtype = 'object'
flags = drv.get_linker_flags(
for_model=True, toolname=args.toolname,
dry_run=True, libtype=libtype)
out = ' '.join(flags)
if platform._is_win: # pragma: windows:
out = out.replace('/', '-')
out = out.replace('\\', '/')
elif args.fullpath:
out = drv.get_tool(args.tool).get_executable(full_path=True)
else:
out = drv.get_tool(args.tool, return_prop='name')
if return_str:
return out
print(out)
return
from yggdrasil import tools, config, __version__
lang_list = tools.get_installed_lang()
comm_list = tools.get_installed_comm()
comm_lang_list = []
prefix = ' '
curr_prefix = ''
vardict = [
('Location', os.path.dirname(__file__)),
('Version', __version__),
('Languages', ', '.join(lang_list)),
('Communication Mechanisms',
', '.join(tools.get_installed_comm())),
('Default Comm Mechanism', tools.get_default_comm()),
('Config File', config.usr_config_file)]
try:
# Add language information
if not args.no_languages:
# Install languages
vardict.append(('Installed Languages:', ''))
curr_prefix += prefix
for lang in sorted(lang_list):
drv = import_component('model', lang)
vardict.append(
(curr_prefix + '%s:' % lang.upper(), ''))
if not drv.comms_implicit:
comm_lang_list.append(lang)
curr_prefix += prefix
exec_name = drv.language_executable()
if exec_name:
if not os.path.isabs(exec_name):
exec_name = shutil.which(exec_name)
vardict.append((curr_prefix + 'Location',
exec_name))
vardict.append((curr_prefix + 'Version',
drv.language_version()))
curr_prefix = curr_prefix.rsplit(prefix, 1)[0]
curr_prefix = curr_prefix.rsplit(prefix, 1)[0]
# Not installed languages
vardict.append(("Languages Not Installed:", ''))
curr_prefix += prefix
for lang in tools.get_supported_lang():
if lang in lang_list:
continue
drv = import_component('model', lang)
vardict.append(
(curr_prefix + '%s:' % lang.upper(), ''))
curr_prefix += prefix
vardict.append(
(curr_prefix + "Language Installed",
drv.is_language_installed()))
if drv.executable_type == 'compiler':
curr_prefix += prefix
vardict += [
(curr_prefix
+ ("%s Installed (%s)"
% (x.title(),
getattr(drv, 'default_%s' % x, None))),
drv.is_tool_installed(x))
for x in ['compiler', 'linker', 'archiver']]
curr_prefix = curr_prefix.rsplit(prefix, 1)[0]
vardict.append(
(curr_prefix + "Base Languages Installed",
drv.are_base_languages_installed()))
missing = []
if not drv.are_base_languages_installed(
missing=missing):
vardict.append(
(curr_prefix
+ "Base Languages Not Installed",
missing))
vardict.append(
(curr_prefix + "Dependencies Installed",
drv.are_dependencies_installed()))
if not drv.are_dependencies_installed():
vardict.append(
(curr_prefix
+ "Dependencies Not Installed",
[b for b in drv.interface_dependencies if
(not drv.is_library_installed(b))]))
vardict.append(
(curr_prefix + "Interface Installed",
drv.is_interface_installed()))
vardict.append((curr_prefix + "Comm Installed",
drv.is_comm_installed()))
vardict.append((curr_prefix + "Configured",
drv.is_configured()))
vardict.append((curr_prefix + "Disabled",
drv.is_disabled()))
curr_prefix = curr_prefix.rsplit(prefix, 1)[0]
curr_prefix = curr_prefix.rsplit(prefix, 1)[0]
# Add comm information
if not args.no_comms:
# Fully installed comms
vardict.append(
('Comms Available for All Languages:', ''))
curr_prefix += prefix
for comm in sorted(comm_list):
cmm = import_component('comm', comm)
vardict.append(
(curr_prefix + '%s' % comm.upper(), ''))
curr_prefix = curr_prefix.rsplit(prefix, 1)[0]
# Partially installed comms
vardict.append(
('Comms Available for Some/No Languages:', ''))
curr_prefix += prefix
for comm in tools.get_supported_comm():
if comm in comm_list:
continue
cmm = import_component('comm', comm)
vardict.append(
(curr_prefix + '%s:' % comm.upper(), ''))
curr_prefix += prefix
avail | |
import sys
import random
import shutil
import copy
from math import ceil, floor
from os import path, remove, mkdir
from time import sleep
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# the same folder where this program is stored
if getattr(sys, 'frozen', False):
mainFolder = path.dirname(sys.executable) # EXE (executable) file
else:
mainFolder = path.dirname(path.realpath(__file__)) # PY (source) file
sys.path.append(mainFolder)
outputFolder = path.join(mainFolder, "output")
import AMR_support
"""
Ability Values:
00 - Nothing
01 - Fire
02 - Ice
03 - Burning
04 - Wheel
05 - Parasol
06 - Cutter
07 - Beam
08 - Stone
09 - Bomb
0A - Throw
0B - Sleep
0C - Cook
0D - Laser
0E - UFO
0F - Spark
10 - Tornado
11 - Hammer
12 - Sword
13 - Cupid
14 - Fighter
15 - Magic
16 - Smash
17 - Mini
18 - Crash
19 - Missile
1A - Master
The remaining values are either some sort of bug/crash, mix (like when you inhale two abilities at one), or duplicate.
"""
abilities = [
"Nothing",
"Fire",
"Ice",
"Burning",
"Wheel",
"Parasol",
"Cutter",
"Beam",
"Stone",
"Bomb",
"Throw",
"Sleep",
"Cook",
"Laser",
"UFO",
"Spark",
"Tornado",
"Hammer",
"Sword",
"Cupid",
"Fighter",
"Magic",
"Smash",
"Mini",
"Crash",
"Missile",
"Master"
]
normalEnemies = {
"Bang-Bang" : [[0x351AB6], 0x19],
"Batty" : [[0x351A86], 0x00],
"Big Waddle Dee" : [[0x3517E6], 0x00],
"Blipper" : [[0x35167E], 0x00],
"Bomber" : [[0x351A0E], 0x18],
"Boxin" : [[0x3519C6], 0x14],
"<NAME>" : [[0x351666], 0x00],
"Chip" : [[0x35170E], 0x00],
"Cookin" : [[0x3519DE], 0x0C],
"Cupie" : [[0x35176E], 0x13],
"Droppy" : [[0x351AFE, 0x3527D6], 0x00], # the second address is the one spawned by Wiz
"Flamer" : [[0x351816], 0x03],
"Foley" : [[0x35197E], 0x09],
"<NAME>" : [[0x351A3E], 0x08],
"Glunk" : [[0x351696], 0x00],
# "Golem" : [[0x351966], 0x08], # mostly-unknown; this only covers the Golems spawned by King Golem; address search showed the same thing for all types of Golems
"Haley" : [[0x35173E], 0x00],
"Heavy Knight" : [[0x351A26], 0x12],
"Hot Head" : [[0x35182E], 0x01],
"Jack" : [[0x3517CE], 0x00],
"Laser Ball" : [[0x351846], 0x0D],
"Leap" : [[0x3517B6], 0x00],
"Metal Guardian" : [[0x351A56], 0x0D],
"Minny" : [[0x3519F6], 0x17],
"Noddy" : [[0x35191E], 0x0B],
# "Parasol Waddle Dee" : [[0x??????], 0x05], # unknown; address search showed it as Parasol object
"Pengy" : [[0x35185E], 0x02],
"Prank" : [[0x351B16], 0x00],
"Rocky" : [[0x351876], 0x08],
"Roly-Poly" : [[0x351756], 0x00],
# "<NAME>" : [[0x??????], 0x00], # unknown
"Shooty" : [[0x351996], 0x00],
"<NAME>" : [[0x35188E], 0x06],
"Snapper" : [[0x352536], 0x12],
"Snooter" : [[0x3516F6], 0x00],
"Soarar" : [[0x351726], 0x00],
"Sparky" : [[0x3518A6], 0x0F],
"Squishy" : [[0x3516AE], 0x00], # Did you know there's only one of these in the entire game? And it's really well-hidden
"Sword Knight" : [[0x3518BE], 0x12],
"Twister" : [[0x3518EE], 0x10],
"UFO" : [[0x3518D6], 0x0E],
"Waddle Dee" : [[0x35164E, 0x351B76], 0x00], # the second address is the mini-boss version
"Waddle Doo" : [[0x3517FE], 0x07],
"Wheelie" : [[0x351906], 0x04]
}
miniBosses = {
"Batafire" : [[0x351BD6], 0x03],
"Bombar" : [[0x351C36], 0x19],
"Bonkers" : [[0x351BA6], 0x11],
"Box Boxer" : [[0x351BEE], 0x14],
"Boxy" : [[0x351C06], 0x15],
"Master Hand" : [[0x351C1E], 0x16],
"Mr. Frosty" : [[0x351B8E], 0x02],
"Phan Phan" : [[0x351BBE], 0x0A]
}
objects = {
"Batafire (Fireball)" : [[0x352566], 0x00],
"Bombar (Bomb)" : [[0x3526FE], 0x09],
"Bombar (Missile)" : [[0x352716], 0x19],
"Bonkers (Large Rock)" : [[0x352626], 0x00],
"Bonkers (Small Rock)" : [[0x35260E], 0x00],
"Box Boxer (Energy Blast)" : [[0x35272E], 0x00],
# "Boxy (Bomb)" : [[0x??????], 0x09], # unknown
"Boxy (Present)" : [[0x3626CE], 0x00],
"Dark Mind (Blue Star)" : [[0x35296E], 0x02],
"Dark Mind (Bomb)" : [[0x351ACE], 0x18],
"Dark Mind (Purple Star)" : [[0x352986], 0x0F],
"Dark Mind (Red Star)" : [[0x352956], 0x01],
"Enemy Star" : [[0x3525C6], 0x00], # the thing that's spawned by basically every boss/mini-boss
"King Golem (Rock)" : [[0x3524D6], 0x00],
"Master/Crazy Hand (Bullet)" : [[0x35290E], 0x03],
# "Master/Crazy Hand (Star)" : [[0x??????], 0x08], # unknown; address search showed it as normal enemy star
"Moley (Bomb)" : [[0x3528AE], 0x09],
"Moley (Large Rock)" : [[0x3528C6], 0x08],
"Moley (Oil Drum)" : [[0x3528DE], 0x03],
"Moley (Screw)" : [[0x35287E], 0x00],
"Moley (Small Rock)" : [[0x352866], 0x00],
"Moley (Spiny)" : [[0x3528F6], 0x06],
"Moley (Tire)" : [[0x352896], 0x04],
"Mr. Frosty (Large Ice)" : [[0x3525F6], 0x00],
"Mr. Frosty (Small Ice)" : [[0x3525DE], 0x00],
"Parasol" : [[0x35257E], 0x05],
"Phan Phan (Apple)" : [[0x35263E], 0x00],
"Prank (Bomb)" : [[0x352686], 0x09],
"Prank (Fireball)" : [[0x352656], 0x01],
"Prank (Ice)" : [[0x35266E], 0x02],
"Titan Head (Missile)" : [[0x35284E], 0x00],
"Wiz (Balloon)" : [[0x352776], 0x00],
"Wiz (Bomb)" : [[0x35278E], 0x09],
"Wiz (Car)" : [[0x35275E], 0x04],
"Wiz (Cloud)" : [[0x3527A6], 0x0F],
"Wiz (Football)" : [[0x352746], 0x00],
"Wiz (Poison Apple)" : [[0x3527BE], 0x0B]
}
def main():
# open the GUI
vp_start_gui()
def randomize():
global sourceRom
global numSeedsFinished
global currSeed
global abilityDistributionType
global basicEnemyBehaviorType
global noneAbilityChanceBasicEnemy
global noneAbilityChanceNonBasicEnemy
global includeMiniBosses
global includeMinnyAndWheelie
global objectRandomizationType
global noneAbilityChanceBasicObject
global noneAbilityChanceNonBasicObject
global generateAbilityLog
sourceRom = AMR_support.sourceRom.get()
try:
assert path.isfile(sourceRom)
except:
return [False, "Invalid ROM input."]
generateAbilityLog = int(AMR_support.generateAbilityLog.get())
numSeedsFinished = 0
if int(AMR_support.useSeed.get()) == 1:
try:
assert(len(AMR_support.seedInput.get()) == 10)
currSeed = int(AMR_support.seedInput.get(), 36)
abilityDistributionType, basicEnemyBehaviorType, noneAbilityChanceBasicEnemy, noneAbilityChanceNonBasicEnemy, includeMiniBosses, includeMinnyAndWheelie, objectRandomizationType, noneAbilityChanceObject = decodeSeed(AMR_support.seedInput.get()[:5], [2,2,60,60,1,1,2,30], 36)
abilityDistributionType += 1
basicEnemyBehaviorType += 1
includeMiniBosses += 1
includeMinnyAndWheelie += 1
objectRandomizationType += 1
assert 1 <= abilityDistributionType <= 3
assert 1 <= basicEnemyBehaviorType <= 3
assert 0 <= noneAbilityChanceBasicEnemy <= 60
assert 0 <= noneAbilityChanceNonBasicEnemy <= 60
assert 1 <= includeMiniBosses <= 2
assert 1 <= includeMinnyAndWheelie <= 2
assert 1 <= objectRandomizationType <= 3
assert 0 <= noneAbilityChanceObject <= 30
noneAbilityChanceBasicObject = noneAbilityChanceObject
noneAbilityChanceNonBasicObject = noneAbilityChanceObject
except:
return [False, "Invalid Seed."]
numSeeds = 0
if not generateSeed(currSeed):
return [False, "Failed to generate the given seed."]
return [True, "Successfully generated the given seed."]
else:
adtDict = {"Pure Random":1, "By Enemy Grouping":2, "By Ability Frequency":3}
abilityDistributionType = adtDict.get(AMR_support.abilityDistributionType.get())
if abilityDistributionType == 1:
bebDict = {"All Random":1, "Basic Enemies Random":2, "No Random (Unchanged)":3}
basicEnemyBehaviorType = bebDict.get(AMR_support.basicEnemyBehaviorType.get())
else:
basicEnemyBehaviorType = 3
if basicEnemyBehaviorType != 3:
noneAbilityChanceEnemy = int(AMR_support.noneAbilityChanceEnemy.get())
noneAbilityChanceEnemy = min(ceil(noneAbilityChanceEnemy/1.67), 60) # this value rounds to increments of 1.67%; this is to reduce the length of the seed
noneAbilityChanceBasicEnemy = noneAbilityChanceEnemy
noneAbilityChanceNonBasicEnemy = noneAbilityChanceEnemy if basicEnemyBehaviorType == 1 else 30
else:
noneAbilityChanceBasicEnemy = 0
noneAbilityChanceNonBasicEnemy = 60
includeMiniBosses = int(AMR_support.includeMiniBosses.get())
includeMinnyAndWheelie = int(AMR_support.includeMinnyAndWheelie.get())
ortDict = {"Yes":1, "Basic Objects Only":2, "No":3}
objectRandomizationType = ortDict.get(AMR_support.objectRandomizationType.get())
if objectRandomizationType != 1:
noneAbilityChanceObject = 30 # unused but needed for seed calculation
noneAbilityChanceBasicObject = 0
noneAbilityChanceNonBasicObject = 30
else:
noneAbilityChanceObject = int(AMR_support.noneAbilityChanceObject.get())
noneAbilityChanceObject = min(ceil(noneAbilityChanceObject/3.34), 30) # this value rounds to increments of 3.34%; this is to reduce the length of the seed
noneAbilityChanceBasicObject = noneAbilityChanceObject
noneAbilityChanceNonBasicObject = noneAbilityChanceObject
try:
assert 1 <= abilityDistributionType <= 3
assert 1 <= basicEnemyBehaviorType <= 3
assert 0 <= noneAbilityChanceBasicEnemy <= 60
assert 0 <= noneAbilityChanceNonBasicEnemy <= 60
assert 1 <= includeMiniBosses <= 2
assert 1 <= includeMinnyAndWheelie <= 2
assert 1 <= objectRandomizationType <= 3
assert 0 <= noneAbilityChanceObject <= 30
settingsSeed = encodeSeed([abilityDistributionType-1, basicEnemyBehaviorType-1, noneAbilityChanceBasicEnemy, noneAbilityChanceNonBasicEnemy, includeMiniBosses-1, includeMinnyAndWheelie-1, objectRandomizationType-1, noneAbilityChanceObject], [2,2,60,60,1,1,2,30])[0]
seedPluralString = " seed"
except:
return [False, "Invalid settings."]
try:
numSeeds = int(AMR_support.numSeeds.get())
1 <= numSeeds <= 20
except:
return [False, "Please select a value between 1 and 20 for # of seeds."]
for i in range(numSeeds):
maxVal = int("ZZZZZ", 36)
genSeed = random.randint(0, maxVal)
currSeed = (settingsSeed*(maxVal+1)) + genSeed
if not generateSeed(currSeed):
if numSeedsFinished > 1:
seedPluralString = " seeds"
if numSeeds > 0:
return [False, "Successfully generated "+str(numSeedsFinished)+seedPluralString+", but then something went wrong."]
else:
return [False, "Failed to generate"+seedPluralString+"."]
if numSeedsFinished > 1:
seedPluralString = " seeds"
return [True, "Successfully generated "+str(numSeeds)+seedPluralString+"."]
# unused
def main_cmd_line():
global sourceRom
global numSeedsFinished
global currSeed
global abilityDistributionType
global basicEnemyBehaviorType
global noneAbilityChanceBasicEnemy
global noneAbilityChanceNonBasicEnemy
global includeMiniBosses
global includeMinnyAndWheelie
global objectRandomizationType
global noneAbilityChanceBasicObject
global noneAbilityChanceNonBasicObject
global generateAbilityLog
print("\n")
print("---------------------------------------------")
print("| Welcome to the Amazing Mirror Randomizer! |")
print("---------------------------------------------")
sourceRom = ""
while sourceRom == "":
Tk().withdraw()
sourceRom = askopenfilename(filetypes=[("GBA ROM files", "*.gba")])
useSeed = makeChoice("Do you already have a seed?", ["Yes", "No"])
seedInput = ""
numSeedsFinished = 1
if useSeed == 1:
currSeed = verifySeed()
generateAbilityLog = makeChoice("Generate a spoiler text file containing ability distribution?", [
"Yes",
"No"])
numSeeds = 1
generateSeed(currSeed)
else:
print("\nAnswer the following questions to generate a ROM.\n[R] means \"Recommended\".")
sleep(1)
abilityDistributionType = makeChoice("[1/6] How should abilities be distributed?", [
"[R] Pure random (anything goes)",
"By enemy grouping (enemies that gave matching abilities in the original game (like Sword Knight and Heavy Knight) will still give matching abilities)",
"By ability frequency (for example, two enemies gave Ice in the original game, so two random enemies will give Ice here)"])
if abilityDistributionType == 1:
basicEnemyBehaviorType = makeChoice("[1a/6] How should enemies that do not give an ability be handled?", [
"[R] All enemies may or may not give an ability",
"Basic enemies that did not originally give an ability (like Waddle Dee) may or may not give an ability; other enemies are still guaranteed to give an ability",
"Unchanged (basic enemies will still not give an ability, and other enemies will)"])
else:
basicEnemyBehaviorType = 3
if basicEnemyBehaviorType != 3:
noneAbilityChanceEnemy = makeChoiceNumInput("[1b/6] For these enemies that may or may not give an ability, how likely is it that they do give an ability? (0\%-100\%) ([R] = 90)", 0, 100)
noneAbilityChanceEnemy = min(ceil(noneAbilityChanceEnemy/1.67), 60) # this value rounds to increments of 1.67%; this is to reduce the length of the seed
noneAbilityChanceBasicEnemy = noneAbilityChanceEnemy
noneAbilityChanceNonBasicEnemy = noneAbilityChanceEnemy if basicEnemyBehaviorType == 1 else 30
else:
noneAbilityChanceBasicEnemy = 0
noneAbilityChanceNonBasicEnemy = 60
includeMiniBosses = makeChoice("[2/6] Include mini-bosses?", [
"[R] Yes (randomize mini-boss abilities)",
"No (do not change mini-bosses)"])
includeMinnyAndWheelie = makeChoice("[3/6] Include Minny and Wheelie? (Not recommended; you need Mini and Wheel at certain parts of the game)", [
"Yes (randomize Minny and Wheelie's abilities)",
"[R] No (do not change their abilities)"])
objectRandomizationType = makeChoice("[4/6] How would you like to randomize other objects (like inhalable enemy projectiles; basically everything except star blocks)?", [
"[R] Randomize all objects"
"Only randomize objects that already give abilities",
"Do not randomize objects"])
if objectRandomizationType != 1:
noneAbilityChanceObject = 30 # unused but needed for seed calculation
noneAbilityChanceBasicObject = 0
noneAbilityChanceNonBasicObject = 30
else:
noneAbilityChanceObject = makeChoiceNumInput("[4a/6] For objects that may or may not give an ability, how likely is it that they do give an ability? (0\%-100\%) ([R] = 90)", 0, 100)
noneAbilityChanceObject = min(ceil(noneAbilityChanceObject/3.34), 30) # this value rounds to increments of 3.34%; this is to reduce the length of the seed
noneAbilityChanceBasicObject = noneAbilityChanceObject
noneAbilityChanceNonBasicObject = noneAbilityChanceObject
generateAbilityLog = makeChoice("[5/6] Generate a spoiler text file containing ability distribution?", [
"Yes",
"No"])
numSeeds = int(makeChoiceNumInput("[6/6] How many seeds do you want to generate with these settings? (up to 20)", 0, 100))
settingsSeed = encodeSeed([abilityDistributionType-1, basicEnemyBehaviorType-1, noneAbilityChanceBasicEnemy, noneAbilityChanceNonBasicEnemy, includeMiniBosses-1, includeMinnyAndWheelie-1, objectRandomizationType-1, noneAbilityChanceObject], [2,2,60,60,1,1,2,30])[0]
for i in range(numSeeds):
maxVal = int("ZZZZZ", 36)
genSeed = random.randint(0, maxVal)
currSeed = (settingsSeed*(maxVal+1)) + genSeed
generateSeed(currSeed)
input("\nPress Enter to exit.")
def generateSeed(seed):
global normalEnemies
global miniBosses
global objects
global myEnemies
global myObjects
global currSeed
global numSeedsFinished
global seedString
global abilityDistributionType
global basicEnemyBehaviorType
global noneAbilityChanceBasicEnemy
global noneAbilityChanceNonBasicEnemy
global includeMiniBosses
global includeMinnyAndWheelie
global objectRandomizationType
global noneAbilityChanceBasicObject
global noneAbilityChanceNonBasicObject
global generateAbilityLog
seedString = str(dec_to_base(currSeed, 36)).upper().zfill(10)
print("\nGenerating ROM #"+str(numSeedsFinished+1)+" with seed "+seedString+".")
random.seed(currSeed)
myEnemies = copy.deepcopy(normalEnemies)
if includeMiniBosses:
myEnemies.update(copy.deepcopy(miniBosses))
myEnemies = shuffleDict(myEnemies)
if not includeMinnyAndWheelie:
del myEnemies["Minny"]
del myEnemies["Wheelie"]
if abilityDistributionType != 3:
abilityArray = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19]
else:
abilityArray = []
for | |
import itertools
import logging
import math
import os
import sys
import time
import warnings
from multiprocessing import Pool
from numba import njit
from pytransit import QuadraticModel
import batman
import ellc
import numpy as np
import astropy.constants as ac
import astropy.units as u
import wotan
from lcbuilder.lcbuilder_class import LcBuilder
from scipy import stats
from scipy.interpolate import interp1d
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
G = 6.674e-11 # m3 kg-1 s-2
AU_TO_RSUN = 215.032
Msolar_to_kg = 2.e30
Mearth_to_kg = 5.972e24
M_earth_to_M_sun = Mearth_to_kg / Msolar_to_kg
R_earth_to_R_sun = 0.009175
class ExoMoonLeastSquares:
def __init__(self, object_dir, cpus, star_mass, star_radius, ab, planet_radius, planet_period, planet_t0, planet_duration, planet_semimajor_axis, planet_inc, planet_ecc,
planet_arg_periastron, planet_impact_param, min_radius, max_radius, t0s, time, flux,
period_grid_size=2000, radius_grid_size=10):
self.object_dir = object_dir
self.cpus = cpus
self.star_mass = star_mass
self.star_radius = star_radius
self.ab = ab
self.planet_radius = planet_radius
self.planet_period = planet_period
self.planet_t0 = planet_t0
self.planet_duration = planet_duration
self.planet_semimajor_axis = planet_semimajor_axis
self.planet_inc = planet_inc
self.planet_ecc = planet_ecc
self.planet_arg_periastron = planet_arg_periastron
self.planet_impact_param = planet_impact_param
self.time = time
self.flux = flux
self.t0s = t0s
self.min_radius = min_radius
self.max_radius = max_radius
self.period_grid_size = period_grid_size
self.radius_grid_size = radius_grid_size
@staticmethod
def compute_semimajor_axis(major_mass, minor_period):
period_seconds = minor_period * 24. * 3600.
mass_kg = major_mass * Msolar_to_kg
a1 = (G * mass_kg * period_seconds ** 2 / 4. / (np.pi ** 2)) ** (1. / 3.)
return a1 / 1.496e11
@staticmethod
def compute_hill_radius(major_mass, minor_mass, semimajor_axis, eccentricity=0):
"""
@param major_mass: The main body mass
@param minor_mass: The minor body mass
@param semimajor_axis: The minor body semimajor axis in AU.
@param eccentricity: the planet eccentricity
@return: the hill radius of the minor body in the same units than the semimajor_axis
"""
return AU_TO_RSUN * semimajor_axis * (1 - eccentricity) * (minor_mass / (3 * major_mass) ** (1 / 3))
@staticmethod
def au_to_period(mass, au):
"""
Calculates the orbital period for the semi-major axis assuming a circular orbit.
@param mass: the stellar mass
@param au: the semi-major axis in astronomical units.
@return: the period in days
"""
mass_kg = mass * 2.e30
a = au * 1.496e11
return ((a ** 3) * 4 * (np.pi ** 2) / G / mass_kg) ** (1. / 2.) / 3600 / 24
@staticmethod
def compute_transit_duration(star_radius,
transiting_body_semimajor_axis, transit_period, transiting_body_radius,
impact_parameter=0):
"""
@param star_radius: star radius
@param transiting_body_semimajor_axis: orbit semimajor axis
@param transit_period: in days
@param transiting_body_radius: transiting body radius
@param impact_parameter:
@return:
@rtype:
"""
return transit_period / np.pi * np.arcsin(np.sqrt((star_radius + transiting_body_radius) ** 2 - (impact_parameter * star_radius) ** 2) / transiting_body_semimajor_axis)
#return 2 * moon_semimajor_axis / (planet_semimajor_axis * 2 * np.pi) * planet_period
@staticmethod
def compute_moon_period_grid(min, max, mode="lin", samples=10000):
if "log" == mode:
return np.logspace(math.log(min, 10), math.log(max, 10), samples, base=10)
else:
return np.linspace(min, max, samples)
def subtract_planet_transit(self, ab, star_radius, star_mass, time, flux, planet_radius, planet_t0,
planet_period, planet_inc=90):
P1 = planet_period * u.day
a = np.cbrt((ac.G * star_mass * u.M_sun * P1 ** 2) / (4 * np.pi ** 2)).to(u.au)
model = ellc.lc(
t_obs=time,
radius_1=(star_radius * u.R_sun).to(u.au) / a, # star radius convert from AU to in units of a
radius_2=(planet_radius * u.R_earth).to(u.au) / a,
# convert from Rearth (equatorial) into AU and then into units of a
sbratio=0,
incl=planet_inc,
light_3=0,
t_zero=planet_t0,
period=planet_period,
a=None,
q=1e-6,
f_c=None, f_s=None,
ldc_1=ab, ldc_2=None,
gdc_1=None, gdc_2=None,
didt=None,
domdt=None,
rotfac_1=1, rotfac_2=1,
hf_1=1.5, hf_2=1.5,
bfac_1=None, bfac_2=None,
heat_1=None, heat_2=None,
lambda_1=None, lambda_2=None,
vsini_1=None, vsini_2=None,
t_exp=None, n_int=None,
grid_1='default', grid_2='default',
ld_1='quad', ld_2=None,
shape_1='sphere', shape_2='sphere',
spots_1=None, spots_2=None,
exact_grav=False, verbose=1)
return flux - model + 1
@staticmethod
#@njit(fastmath=True, parallel=False)
def compute_moon_transit_scenarios(time, flux, planet_t0, moon_initial_alpha, moon_period, moon_orbit_ranges,
moon_orbit_transit_length, moon_transit_duration):
#TODO need to take into account "prograde" or "retrograde" orbit
orbit_scenarios = None
for moon_orbit_range in moon_orbit_ranges:
t0 = moon_orbit_range[0]
t1 = moon_orbit_range[1]
phase_delta = (t0 - planet_t0) % moon_period * 2 * np.pi
alpha = (moon_initial_alpha + phase_delta) % (2 * np.pi)
time_alpha = np.cos(alpha) * moon_orbit_transit_length / 2
moon_t1 = t1 + time_alpha
time_args = np.argwhere((time > moon_t1) & (time < moon_t1 + moon_transit_duration)).flatten()
#TODO we'd need to fill measurement gaps (detected from the time array)
time_moon_transit = time[time_args]
flux_moon_transit = flux[time_args]
time_moon_transit = time_moon_transit - (moon_t1 + moon_transit_duration / 2)
# fig_transit, axs = plt.subplots(1, 1, figsize=(8, 8))
# axs.plot(time_moon_transit, flux_moon_transit, color='gray', alpha=1, rasterized=True,
# label="Flux Transit ")
# axs.set_title("Residuals")
# axs.set_xlabel('Time')
# axs.set_ylabel('Flux')
# fig_transit.show()
if len(time_moon_transit) > 0:
if orbit_scenarios is None:
orbit_scenarios = [[alpha, time_moon_transit, flux_moon_transit]]
orbit_scenarios.append([alpha, time_moon_transit, flux_moon_transit])
return orbit_scenarios
def search(self, search_input, return_lc=False):
logging.info("Searching for period=%.5fd and alpha=%.2frad", search_input.moon_period, search_input.moon_alpha)
planet_duration = self.compute_transit_duration(self.star_radius, self.planet_semimajor_axis * AU_TO_RSUN,
self.planet_period, self.planet_radius * R_earth_to_R_sun,
search_input.impact_param)
moon_semimajor_axis = self.compute_semimajor_axis(planet_mass * M_earth_to_M_sun, search_input.moon_period)
moon_orbit_transit_duration = self.compute_transit_duration(self.star_radius,
self.planet_semimajor_axis * AU_TO_RSUN,
self.planet_period,
moon_semimajor_axis * AU_TO_RSUN,
search_input.impact_param)
moon_transit_length = self.compute_transit_duration(self.star_radius, self.planet_semimajor_axis * AU_TO_RSUN,
self.planet_period, 1 * R_earth_to_R_sun)
# TODO we probably need to define left_transit_length and right_transit_length depending on moon orbit parameters
moon_orbit_tokens = [[t0, t0 - planet_duration / 2] for t0 in self.t0s]
transit_scenarios = ExoMoonLeastSquares.compute_moon_transit_scenarios(self.time, self.flux, self.planet_t0, search_input.moon_alpha,
search_input.moon_period, moon_orbit_tokens,
moon_orbit_transit_duration, moon_transit_length)
scenario_time = []
scenario_flux = []
for normalized_moon_transit_scenario in transit_scenarios:
scenario_time = np.concatenate((scenario_time, normalized_moon_transit_scenario[1].flatten()))
scenario_flux = np.concatenate((scenario_flux, normalized_moon_transit_scenario[2].flatten()))
sorted_time_args = np.argsort(scenario_time)
scenario_time = scenario_time[sorted_time_args]
scenario_flux = scenario_flux[sorted_time_args]
outliers_args = ExoMoonLeastSquares.remove_outliers(scenario_flux, sigma_lower=float('inf'), sigma_upper=3)
scenario_time = scenario_time[~outliers_args].flatten()
scenario_flux = scenario_flux[~outliers_args].flatten()
interpolated = interp1d(np.arange(len(self.model)), self.model, axis=0, fill_value='extrapolate')
model_sample = interpolated(np.linspace(0, len(self.model), len(scenario_time)))
# fig_transit, axs = plt.subplots(1, 1, figsize=(8, 8))
# axs.scatter(scenario_time, scenario_flux, color='gray', alpha=0.4, rasterized=True, label="Flux Transit ")
# axs.plot(scenario_time, model_sample, color='red', alpha=1, rasterized=True, label="Flux Transit ")
# axs.set_title("Residuals")
# axs.set_xlabel('Time')
# axs.set_ylabel('Flux')
# fig_transit.show()
residual_calculation, residual_baseline, residual_radius, residual_model = self.calculate_residuals(scenario_time, scenario_flux,
model_sample, self.min_radius,
self.max_radius,
self.radius_grid_size)
if return_lc:
return residual_calculation, residual_baseline, residual_radius, scenario_time, scenario_flux, residual_model
else:
return residual_calculation, residual_baseline, residual_radius
@staticmethod
def spectra(chi2, oversampling_factor=1, kernel_size=30):
SR = np.min(chi2) / chi2
SDE_raw = (1 - np.mean(SR)) / np.std(SR)
# Scale SDE_power from 0 to SDE_raw
power_raw = SR - np.mean(SR) # shift down to the mean being zero
scale = SDE_raw / np.max(power_raw) # scale factor to touch max=SDE_raw
power_raw = power_raw * scale
# Detrended SDE, named "power"
kernel = oversampling_factor * kernel_size
if kernel % 2 == 0:
kernel = kernel + 1
if len(power_raw) > 2 * kernel:
my_median = ExoMoonLeastSquares.running_median(power_raw, kernel)
power = power_raw - my_median
# Re-normalize to range between median = 0 and peak = SDE
# shift down to the mean being zero
power = power - np.mean(power)
SDE = np.max(power / np.std(power))
# scale factor to touch max=SDE
scale = SDE / np.max(power)
power = power * scale
else:
power = power_raw
SDE = SDE_raw
return SR, power_raw, power, SDE_raw, SDE
@staticmethod
#@njit(fastmath=True, parallel=False)
def calculate_residuals(time, flux, model_sample, min_radius, max_radius, radius_grid_size):
# TODO adjusting model to minimum flux value this might get improved by several scalations of min_flux
best_residual = np.inf
best_radius = min_radius
best_model = model_sample
model_baseline = np.full(len(model_sample), 1)
residuals_baseline = np.sum((flux - model_baseline) ** 2) ** 0.5
#radius_from_mean = np.sqrt((1 - np.mean(flux)) * (1.3**2)) / 0.00975
for radius in np.linspace(min_radius, max_radius, radius_grid_size):
depth = ((radius * R_earth_to_R_sun) ** 2) / star_radius ** 2
flux_at_middle = 1 - depth
model_sample_scaled = np.copy(model_sample)
model_sample_scaled[model_sample_scaled < 1] = model_sample_scaled[model_sample_scaled < 1] * (
flux_at_middle / np.min(model_sample))
radius_residuals = np.sum((flux - model_sample_scaled) ** 2)
if radius_residuals < best_residual:
best_residual = radius_residuals
best_radius = radius
best_model = model_sample_scaled
# fig_transit, axs = plt.subplots(1, 1, figsize=(8, 8))
# axs.scatter(time, flux, color='gray', alpha=0.4, rasterized=True, label="Flux Transit ")
# axs.plot(time, best_model, color='red', alpha=1, rasterized=True, label="Flux Transit ")
# axs.set_title("Residuals")
# axs.set_xlabel('Time')
# axs.set_ylabel('Flux')
# bin_means, bin_edges, binnumber = stats.binned_statistic(time,
# flux,
# statistic='mean', bins=25)
# bin_stds, _, _ = stats.binned_statistic(time,
# flux, statistic='std', bins=25)
# bin_width = (bin_edges[1] - bin_edges[0])
# bin_centers = bin_edges[1:] - bin_width / 2
# axs.errorbar(bin_centers, bin_means, yerr=bin_stds / 2, xerr=bin_width / 2, marker='o', markersize=4,
# color='darkorange', alpha=1, linestyle='none')
# fig_transit.show()
return best_residual, residuals_baseline, best_radius, best_model
@staticmethod
def running_median(data, kernel):
"""Returns sliding median of width 'kernel' and same length as data """
idx = np.arange(kernel) + np.arange(len(data) - kernel + 1)[:, None]
med = np.median(data[idx], axis=1)
# Append the first/last value at the beginning/end to match the length of
# data and returned median
first_values = med[0]
last_values = med[-1]
missing_values = len(data) - len(med)
values_front = int(missing_values * | |
# -*- coding: utf-8 -*-
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
# from mpl_toolkits.basemap import Basemap
import xarray as xr
import re
from collections import OrderedDict
from datetime import datetime, timedelta
from scipy.spatial import cKDTree, KDTree
from pyproj import Proj
import numpy.ma as ma
import argparse
from glob import glob
import json
import os
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
deg2rad = np.pi / 180
NRANGE = {'LPRO': 90, 'SILL': 60, 'FIST': 60, 'VILA': 60, 'PRIO': 60}
dtypes = {"TIME": 'float64',
"DEPH": 'float32',
"BEAR": 'float32',
"RNGE": 'float32',
"LONGITUDE": 'float32',
"LATITUDE": 'float32',
"XDST": 'int32',
"YDST": 'int32',
"RDVA": 'int16',
"DRVA": 'int32',
"EWCT": 'int16',
"NSCT": 'int16',
"MAXV": 'int16',
"MINV": 'int16',
"ESPC": 'int16',
"ETMP": 'int16',
"ERSC": 'int16',
"ERTC": 'int16',
"SPRC": 'int16',
"NARX": 'int8',
"NATX": 'int8',
"SLTR": 'int32',
"SLNR": 'int32',
"SLTT": 'int16',
"SLNT": 'int16',
"TIME_QC": 'int8',
"POSITION_QC": 'int8',
"DEPH_QC": 'int8',
"QCflag": 'int8',
"OWTR_QC": 'int8',
"MDFL_QC": 'int8',
"VART_QC": 'int8',
"CSPD_QC": 'int8',
"AVRB_QC": 'int8',
"RDCT_QC": 'int8'}
scale_factors = {"XDST": 0.001,
"YDST": 0.001,
"RDVA": 0.001,
"DRVA": 0.001,
"EWCT": 0.001,
"NSCT": 0.001,
"ESPC": 0.001,
"ETMP": 0.001,
"MAXV": 0.001,
"MINV": 0.001,
"ERSC": 1,
"ERTC": 1,
"XDST": 0.001,
"YDST": 0.001,
"SPRC": 1,
"NARX": 1,
"NATX": 1,
"SLTR": 0.001,
"SLNR": 0.001,
"SLTT": 0.001,
"SLNT": 0.001,
"TIME_QC": 1,
"POSITION_QC": 1,
"DEPH_QC": 1,
"QCflag": 1,
"OWTR_QC": 1,
"MDFL_QC": 1,
"VART_QC": 1,
"CSPD_QC": 1,
"AVRB_QC": 1,
"RDCT_QC": 1}
add_offsets = {}
for key, value in scale_factors.items():
if isinstance(value, float):
scale_factors[key] = np.float32(scale_factors[key])
add_offsets[key] = np.float32(0)
else:
# Generamos un conversor de tipo a partir del tipo de la variable:
conversor = np.dtype(dtypes[key])
# Utilizamos el conversor para recodificar un tipo nativo de python a un escalar tipo numpy:
scale_factors[key] = np.int_(scale_factors[key]).astype(conversor)
add_offsets[key] = np.int_(0).astype(conversor)
_FillValues = {}
for key, value in dtypes.items():
if 'float' in value:
_FillValues[key] = np.finfo(dtypes[key]).min + 1
else:
_FillValues[key] = np.iinfo(dtypes[key]).min + 1
def rotate_vector(pr, uin, vin, lons, lats, returnxy=False):
"""
Rotate a vector field (``uin,vin``) on a rectilinear grid
with longitudes = ``lons`` and latitudes = ``lats`` from
geographical (lat/lon) into map projection (x/y) coordinates.
Differs from transform_vector in that no interpolation is done.
The vector is returned on the same grid, but rotated into
x,y coordinates.
The input vector field is defined in spherical coordinates (it
has eastward and northward components) while the output
vector field is rotated to map projection coordinates (relative
to x and y). The magnitude of the vector is preserved.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
uin, vin input vector field on a lat/lon grid.
lons, lats Arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cyl``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
============== ====================================================
Returns ``uout, vout`` (rotated vector field).
If the optional keyword argument
``returnxy`` is True (default is False),
returns ``uout,vout,x,y`` (where ``x,y`` are the map projection
coordinates of the grid defined by ``lons,lats``).
"""
# if lons,lats are 1d and uin,vin are 2d, and
# lats describes 1st dim of uin,vin, and
# lons describes 2nd dim of uin,vin, make lons,lats 2d
# with meshgrid.
if lons.ndim == lats.ndim == 1 and uin.ndim == vin.ndim == 2 and \
uin.shape[1] == vin.shape[1] == lons.shape[0] and \
uin.shape[0] == vin.shape[0] == lats.shape[0]:
lons, lats = np.meshgrid(lons, lats)
else:
if not lons.shape == lats.shape == uin.shape == vin.shape:
raise TypeError("shapes of lons,lats and uin,vin don't match")
x, y = pr(lons, lats)
# rotate from geographic to map coordinates.
if ma.isMaskedArray(uin):
mask = ma.getmaskarray(uin)
masked = True
uin = uin.filled(1)
vin = vin.filled(1)
else:
masked = False
# Map the (lon, lat) vector in the complex plane.
uvc = uin + 1j * vin
uvmag = np.abs(uvc)
theta = np.angle(uvc)
# Define a displacement (dlon, dlat) that moves all
# positions (lons, lats) a small distance in the
# direction of the original vector.
dc = 1E-5 * np.exp(theta * 1j)
dlat = dc.imag * np.cos(np.radians(lats))
dlon = dc.real
# Deal with displacements that overshoot the North or South Pole.
farnorth = np.abs(lats + dlat) >= 90.0
somenorth = farnorth.any()
if somenorth:
dlon[farnorth] *= -1.0
dlat[farnorth] *= -1.0
# Add displacement to original location and find the native coordinates.
lon1 = lons + dlon
lat1 = lats + dlat
xn, yn = pr(lon1, lat1)
# Determine the angle of the displacement in the native coordinates.
vecangle = np.arctan2(yn - y, xn - x)
if somenorth:
vecangle[farnorth] += np.pi
# Compute the x-y components of the original vector.
uvcout = uvmag * np.exp(1j * vecangle)
uout = uvcout.real
vout = uvcout.imag
if masked:
uout = ma.array(uout, mask=mask)
vout = ma.array(vout, mask=mask)
if returnxy:
return uout, vout, x, y
else:
return uout, vout
class Radial:
"""
Clase de abstracción para la lectura y procesamiento de ficheros radiales (.ruv)
Atributos
---------
Metodos
-------
"""
def __init__(self, fichero):
"""
Constructor
Parametros
----------
fichero: Fichero .ruv con las velocidades radiales
"""
# El archivo tiene que ser abierto como binary:
contenido = [linea.decode('utf-8').replace('%', '').replace('\n', '') for linea in
open(fichero, 'rb').readlines()
if '%%' not in str(linea)]
metadatos = [linea for linea in contenido if 'Table' not in linea]
metadatos = dict([(linea.split(':')[0], linea.split(':')[1]) for linea in metadatos if ':' in str(linea)])
# Parseamos algunos metadatos que necesitaremos:
self.Origin = np.array(metadatos['Origin'].split(), dtype=float)
self.RangeEnd = int(metadatos['RangeEnd'])
self.RangeResolutionKMeters = float(metadatos['RangeResolutionKMeters'])
self.AntennaBearing = float(metadatos['AntennaBearing'].replace('True', ''))
self.AngularResolution = float(metadatos['AngularResolution'].replace('Deg', ''))
self.TimeStamp = datetime.strptime(metadatos['TimeStamp'], ' %Y %m %d %H %M %S')
# Líneas inicial y final de las tablas:
starts = np.arange(len(contenido))[['TableStart' in linea for linea in contenido]]
ends = np.arange(len(contenido))[['TableEnd' in linea for linea in contenido]]
lengths = ends - starts - 1
# Linea que contiene el header:
columns = np.arange(len(contenido))[['TableColumnTypes' in linea for linea in contenido]]
tablas = []
# Aquí podemos aplicar los cambios en los nombres de las variables:
headers = [contenido[indice].split(':')[1].split() for indice in columns]
headers[0] = ['LOND', 'LATD', 'EWCT', 'NSCT', 'OWTR_QC', 'ESPC', 'ETMP', 'MAXV', 'MINV', 'ERSC', 'ERTC', 'XDST',
'YDST', 'RNGE', 'BEAR', 'RDVA', 'DRVA', 'SPRC']
## Originales: LOND LATD VELU VELV VFLG ESPC ETMP MAXV MINV ERSC ERTC XDST YDST RNGE BEAR VELO HEAD SPRC
for i in range(3):
if lengths[i] != 0:
start = starts[i] + 1
end = ends[i]
tablas.append(pd.DataFrame(np.array([linea.split() for linea in contenido[start:end]], dtype=float),
columns=headers[i]))
# Eventualmente pueden aparecer datos erroneos en estas variables:
tablas[0].ESPC[tablas[0].ESPC == 999.00] = np.nan
tablas[0].ETMP[tablas[0].ETMP == 999.00] = np.nan
# Aquí aplicamos los factores de conversión necesarios:
tablas[0].EWCT /= 100.
tablas[0].NSCT /= 100.
tablas[0].RDVA /= -100.
tablas[0].MINV /= -100.
tablas[0].MAXV /= -100.
tablas[0].ESPC /= 100.
tablas[0].ETMP /= 100.
tablas[0].ERSC /= 1.
tablas[0].ERTC /= 1.
tablas[0].SPRC /= 1.
self.metadatos = metadatos
self.tablas = tablas
def to_grid(self, grid):
# Busqueda cKDTree:
nearest = cKDTree(np.column_stack([grid.longitud.values.flatten(), grid.latitud.values.flatten()]))
puntos = np.column_stack([self.tablas[0].LOND.values, self.tablas[0].LATD.values])
distancias, vecinos = nearest.query(puntos)
variables = ['EWCT', 'NSCT', 'OWTR_QC', 'MINV', 'MAXV', 'RDVA', 'DRVA', 'ESPC', 'ETMP', 'ERSC', 'ERTC', 'SPRC']
self.variables = OrderedDict()
# Complete list of coordinates:
delta = self.TimeStamp - datetime(1950, 1, 1)
self.variables['TIME'] = xr.DataArray([delta.days + delta.seconds / 86400], dims={'TIME': 1})
self.variables['DEPH'] = xr.DataArray([0.], dims={'DEPTH': 1})
for variable in variables:
# Create the matrix to be filled with data:
tmp = np.ones_like(grid.longitud.values.flatten()) * np.nan
# Set nearest neighbours:
tmp[vecinos] = self.tablas[0][variable]
# Back to original shape:
tmp = tmp.reshape(grid.longitud.shape)
# Creamos el DataArray:
if variable in ['EWCT', 'NSCT', 'OWTR_QC', 'MINV', 'MAXV', 'RDVA', 'DRVA', 'ESPC', 'ETMP', 'ERSC', 'ERTC',
'SPRC']:
# Crecemos en DEPTH:
tmp = np.expand_dims(tmp, axis=0)
# Crecemos en TIME:
tmp = np.expand_dims(tmp, axis=0)
self.variables[variable] = xr.DataArray(tmp,
dims={'TIME': 1, 'DEPTH': 1, 'BEAR': grid.nBEAR,
'RNGE': grid.nRNGE},
coords={'TIME': self.variables['TIME'],
'DEPH': self.variables['DEPH'], 'BEAR': grid.BEAR,
'RNGE': grid.RNGE, 'LONGITUDE': grid.longitud,
'LATITUDE': grid.latitud,
'XDST': grid.X, 'YDST': grid.Y})
# Encoding de las variables en el fichero:
self.variables[variable].encoding["scale_factor"] = scale_factors[variable]
self.variables[variable].encoding["add_offset"] = add_offsets[variable]
self.variables[variable].encoding["dtype"] = dtypes[variable]
self.variables[variable].encoding["_FillValue"] = _FillValues[variable]
def QC_control(self):
"""
Método para el control de calidad de los datos
Parametros
----------
Utiliza la lista de variables del objeto, que deben estar ya ongrid.
"""
# Construimos alias de las variables para no tener que reescribir mucho | |
TrainingDataset):
"""
Verify that the provided train dataset is of the correct type
"""
pass
@abstractmethod
def _verify_inference_dataset_type(self, inference_dataset: InferenceDataset):
"""
Verify that the provided inference dataset is of the correct type
"""
pass
@abstractmethod
def _verify_predict_sample(self, predict_sample: Tuple):
"""
verify that the (first) sample contained in the inference dataset matches the model type and the
data the model has been trained on.
"""
pass
@abstractmethod
def _verify_past_future_covariates(self, past_covariates, future_covariates):
"""
Verify that any non-None covariates comply with the model type.
"""
pass
@abstractmethod
def _produce_train_output(self, input_batch: Tuple) -> Tensor:
pass
@abstractmethod
def _get_batch_prediction(self, n: int, input_batch: Tuple, roll_size: int) -> Tensor:
"""
In charge of apply the recurrent logic for non-recurrent models.
Should be overwritten by recurrent models.
"""
pass
@random_method
def fit(self,
series: Union[TimeSeries, Sequence[TimeSeries]],
past_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
future_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
val_series: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
val_past_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
val_future_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
verbose: bool = False,
epochs: int = 0,
max_samples_per_ts: Optional[int] = None,
num_loader_workers: int = 0) -> None:
""" Fit/train the model on one or multiple series.
This method wraps around :func:`fit_from_dataset()`, constructing a default training
dataset for this model. If you need more control on how the series are sliced for training, consider
calling :func:`fit_from_dataset()` with a custom :class:`darts.utils.data.TrainingDataset`.
This function can be called several times to do some extra training. If `epochs` is specified, the model
will be trained for some (extra) `epochs` epochs.
Below, all possible parameters are documented, but not all models support all parameters. For instance,
all the :class:`PastCovariatesTorchModel` support only `past_covariates` and not `future_covariates`. Darts will
complain if you try fitting a model with the wrong covariates argument.
When handling covariates, Darts will try to use the time axes of the target and the covariates
to come up with the right time slices. So the covariates can be longer than needed; as long as the time axes
are correct Darts will handle them correctly. It will also complain if their time span is not sufficient.
Parameters
----------
series
A series or sequence of series serving as target (i.e. what the model will be trained to forecast)
past_covariates
Optionally, a series or sequence of series specifying past-observed covariates
future_covariates
Optionally, a series or sequence of series specifying future-known covariates
val_series
Optionally, one or a sequence of validation target series, which will be used to compute the validation
loss throughout training and keep track of the best performing models.
val_past_covariates
Optionally, the past covariates corresponding to the validation series (must match `covariates`)
val_future_covariates
Optionally, the future covariates corresponding to the validation series (must match `covariates`)
verbose
Optionally, whether to print progress.
epochs
If specified, will train the model for `epochs` (additional) epochs, irrespective of what `n_epochs`
was provided to the model constructor.
max_samples_per_ts
Optionally, a maximum number of samples to use per time series. Models are trained in a supervised fashion
by constructing slices of (input, output) examples. On long time series, this can result in unnecessarily
large number of training samples. This parameter upper-bounds the number of training samples per time
series (taking only the most recent samples in each series). Leaving to None does not apply any
upper bound.
num_loader_workers
Optionally, an integer specifying the `num_workers` to use in PyTorch ``DataLoader`` instances,
both for the training and validation loaders (if any).
A larger number of workers can sometimes increase performance, but can also incur extra overheads
and increase memory usage, as more batches are loaded in parallel.
"""
super().fit(series=series, past_covariates=past_covariates, future_covariates=future_covariates)
# TODO: also check the validation covariates
self._verify_past_future_covariates(past_covariates=past_covariates, future_covariates=future_covariates)
wrap_fn = lambda ts: [ts] if isinstance(ts, TimeSeries) else ts
series = wrap_fn(series)
past_covariates = wrap_fn(past_covariates)
future_covariates = wrap_fn(future_covariates)
val_series = wrap_fn(val_series)
val_past_covariates = wrap_fn(val_past_covariates)
val_future_covariates = wrap_fn(val_future_covariates)
# Check that dimensions of train and val set match; on first series only
if val_series is not None:
match = (series[0].width == val_series[0].width and
(past_covariates[0].width if past_covariates is not None else None) ==
(val_past_covariates[0].width if val_past_covariates is not None else None) and
(future_covariates[0].width if future_covariates is not None else None) ==
(val_future_covariates[0].width if val_future_covariates is not None else None))
raise_if_not(match, 'The dimensions of the series in the training set '
'and the validation set do not match.')
self.encoders = self.initialize_encoders()
if self.encoders.encoding_available:
past_covariates, future_covariates = self.encoders.encode_train(target=series,
past_covariate=past_covariates,
future_covariate=future_covariates)
train_dataset = self._build_train_dataset(target=series,
past_covariates=past_covariates,
future_covariates=future_covariates,
max_samples_per_ts=max_samples_per_ts)
if val_series is not None:
if self.encoders.encoding_available:
val_past_covariates, val_future_covariates = \
self.encoders.encode_train(target=val_series,
past_covariate=val_past_covariates,
future_covariate=val_future_covariates)
val_dataset = self._build_train_dataset(target=val_series,
past_covariates=val_past_covariates,
future_covariates=val_future_covariates,
max_samples_per_ts=max_samples_per_ts)
else:
val_dataset = None
logger.info('Train dataset contains {} samples.'.format(len(train_dataset)))
self.fit_from_dataset(train_dataset, val_dataset, verbose, epochs, num_loader_workers)
@property
@abstractmethod
def _model_encoder_settings(self) -> Tuple[int, int, bool, bool]:
"""Abstract property that returns model specific encoder settings that are used to initialize the encoders.
Must return Tuple (input_chunk_length, output_chunk_length, takes_past_covariates, takes_future_covariates)
"""
pass
def initialize_encoders(self) -> SequentialEncoder:
input_chunk_length, output_chunk_length, takes_past_covariates, takes_future_covariates =\
self._model_encoder_settings
return SequentialEncoder(add_encoders=self._model_params[1].get('add_encoders', None),
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
takes_past_covariates=takes_past_covariates,
takes_future_covariates=takes_future_covariates)
@random_method
def fit_from_dataset(self,
train_dataset: TrainingDataset,
val_dataset: Optional[TrainingDataset] = None,
verbose: bool = False,
epochs: int = 0,
num_loader_workers: int = 0) -> None:
"""
This method allows for training with a specific :class:`darts.utils.data.TrainingDataset` instance.
These datasets implement a PyTorch ``Dataset``, and specify how the target and covariates are sliced
for training. If you are not sure which training dataset to use, consider calling :func:`fit()` instead,
which will create a default training dataset appropriate for this model.
This function can be called several times to do some extra training. If `epochs` is specified, the model
will be trained for some (extra) `epochs` epochs.
Parameters
----------
train_dataset
A training dataset with a type matching this model (e.g. :class:`PastCovariatesTrainingDataset` for
:class:`PastCovariatesTorchModel`).
val_dataset
A training dataset with a type matching this model (e.g. :class:`PastCovariatesTrainingDataset` for
:class:`PastCovariatesTorchModel`s), representing the validation set (to track the validation loss).
verbose
Optionally, whether to print progress.
epochs
If specified, will train the model for `epochs` (additional) epochs, irrespective of what `n_epochs`
was provided to the model constructor.
num_loader_workers
Optionally, an integer specifying the `num_workers` to use in PyTorch ``DataLoader`` instances,
both for the training and validation loaders (if any).
A larger number of workers can sometimes increase performance, but can also incur extra overheads
and increase memory usage, as more batches are loaded in parallel.
"""
self._verify_train_dataset_type(train_dataset)
raise_if(len(train_dataset) == 0,
'The provided training time series dataset is too short for obtaining even one training point.',
logger)
raise_if(val_dataset is not None and len(val_dataset) == 0,
'The provided validation time series dataset is too short for obtaining even one training point.',
logger)
train_sample = train_dataset[0]
if self.model is None:
# Build model, based on the dimensions of the first series in the train set.
self.train_sample, self.output_dim = train_sample, train_sample[-1].shape[1]
self._init_model()
else:
# Check existing model has input/output dims matching what's provided in the training set.
raise_if_not(len(train_sample) == len(self.train_sample),
'The size of the training set samples (tuples) does not match what the model has been '
'previously trained on. Trained on tuples of length {}, received tuples of length {}.'.format(
len(self.train_sample), len(train_sample)
))
same_dims = (tuple(s.shape[1] if s is not None else None for s in train_sample) ==
tuple(s.shape[1] if s is not None else None for s in self.train_sample))
raise_if_not(same_dims,
'The dimensionality of the series in the training set do not match the dimensionality'
' of the series the model has previously been trained on. '
'Model input/output dimensions = {}, provided input/ouptput dimensions = {}'.format(
tuple(s.shape[1] if s is not None else None for s in self.train_sample),
tuple(s.shape[1] if s is not None else None for s in train_sample)
))
# Setting drop_last to False makes the model see each sample at least once, and guarantee the presence of at
# least one batch no matter the chosen batch size
train_loader = DataLoader(train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=num_loader_workers,
pin_memory=True,
drop_last=False,
collate_fn=self._batch_collate_fn)
# Prepare validation data
val_loader = None if val_dataset is None else | |
#!/user/bin/python
## load core library
import glob
import math
import numpy as np
import pandas as pd
### todo
## why not replace math with np: todo
## organize and split the file into smaller ones
## remove unwanted package
## load auxillary
#import csv
import gzip
#import matplotlib.pyplot as plt
#import matplotlib.cm as cm
#from mpl_toolkits.mplot3d import axes3d
import os
import re
#import scipy
#import scipy.stats
#from sklearn.model_selection import KFold
#from sklearn.cluster import KMeans
import sys
import subprocess
#import httplib, urllib
#from StringIO import StringIO
#####################################################################################
### identify continuous stretches from a list of numbers
### join stretches if they closer than "win" length
### input is a list
def find_uninterruptedseq(resnums0, win=0):
resnums = resnums0.copy()
#resnums = list(set(resnums))
nlists = []
while len(resnums) > 0:
s1 = resnums.pop(0)
if (len(resnums) > 0) and resnums[0] == s1 + 1:
nlist = [s1, resnums[0]]
s1 = resnums.pop(0)
while (len(resnums) > 0) and (resnums[0] == s1 + 1):
s1 = resnums.pop(0)
nlist.append(s1)
if len(nlist) > 0:
nlists.append(nlist)
else:
nlists.append([s1])
if win > 0 :
nlists1 = continuous(nlists, win)
return nlists1
else:
return nlists
### join segments/stretches
### seglist is a list of list
def continuous(seglist, win):
if len(seglist) > 1 :
seglistn = []
for i in range(1,len(seglist)):
if min(seglist[i]) - max(seglist[i-1]) < win:
seglist[i] = seglist[i-1] + np.arange(max(seglist[i-1])+1, min(seglist[i])).tolist() + seglist[i]
else:
seglistn.append(seglist[i-1])
seglistn.append(seglist[i])
return seglistn
else:
return seglist
### covert boolean list to index list
def bool2ind(predicted, b=True):
return np.where( np.array(predicted) == b)[0].tolist()
### returns index of A which are present in B
def ismember(A, B):
AinB = [x for x in A if x in B]
AinBi = [i for i, x in enumerate(A) if x in AinB]
return AinBi
############################################################################################
#### 'common amino acid residue values '
AAlist = list("GAVLIMFWPSTCYNQDEKRH")
hphoAAlist = list("GAVLIMFWP") #[ "G", "A", "V", "L", "I", "M", "F", "W", "P" ]
hphiAAlist = list("STCYNQDEKRH")
aa1code = ['A', 'D', 'C', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
aa3code = ['ALA', 'ASP', 'CYS', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE', 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL', 'TRP', 'TYR']
#aa1code = ['A', 'D', 'C', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
aapair = np.array([ [y+x for x in aa1code] for y in aa1code]).flatten()
aaA2AAA = {'A' :'ALA', 'C':'CYS', 'D':'ASP', 'E':'GLU', 'F':'PHE', 'G':'GLY', 'H':'HIS', 'I':'ILE', 'K':'LYS', 'L':'LEU', 'M':'MET', 'N':'ASN', 'P':'PRO', 'Q':'GLN', 'R':'ARG', 'S':'SER', 'T':'THR', 'V': 'VAL', 'W' : 'TRP', 'Y' : 'TYR', 'X': 'UNK' } #, 'B': 'MSE', 'J' : 'PCA', 'O': 'SL5', 'J': 'SWG'
aaAAA2A = {'ALA':'A', 'CYS':'C', 'ASP' : 'D', 'GLU' : 'E' , 'PHE' : 'F', 'GLY' : 'G', 'HIS' : 'H', 'ILE' : 'I', 'LYS': 'K' , 'LEU':'L', 'MET':'M' , 'ASN':'N', 'PRO':'P' , 'GLN':'Q' , 'ARG':'R' , 'SER':'S' , 'THR':'T', 'VAL':'V', 'TRP':'W', 'TYR':'Y', 'UNK' :'X'} #, 'MSE':'B', 'PCA' : 'B', 'SL5' : 'B', 'SWG' : 'B' , 'TPO' : 'B', 'MIR' : 'B', 'PTR' : 'B', 'PIA' : 'B', 'CRF' : 'B', 'CZZ' : 'B'
aa2chph74 = {'A' :0, 'C':0, 'D':-1, 'E':-1, 'F':0, 'G':0, 'H':0.5, 'I':0, 'K':1, 'L':0, 'M':0, 'N':0, 'P':0, 'Q':0, 'R':1, 'S':0, 'T':0, 'V':0, 'W' : 0, 'Y' :0, 'X':0}
aa2chph26 = {'A' :0, 'C':0, 'D':0, 'E':0, 'F':0, 'G':0, 'H':1, 'I':0, 'K':1, 'L':0, 'M':0, 'N':0, 'P':0, 'Q':0, 'R':1, 'S':0, 'T':0, 'V':0, 'W' : 0, 'Y' :0, 'X':0}
## simplify DSSP/STRIDE secstr code
dssp2ss1 = {'H':'H', 'G':'H', 'I':'H', 'E':'E', 'B':'E', 'S':'T', 'T':'T', '':'C'}
dssp2ss2 = {'H':'H', 'G':'H', 'I':'H', 'E':'E', 'B':'E', 'S':'C', 'T':'C', '':'C'}
stride2ss = {'H':'H', 'E':'E', 'T':'C', 'C':'C', 'G':'H', 'I':'H', 'b':'E', 'B':'E'}
aatASA = [110.2, 144.1, 140.4, 174.7, 200.7, 78.7, 181.9, 185, 205.7, 183.1, 200.1, 146.4, 141.9, 178.6, 229, 117.2, 138.7, 153.7, 240.5, 213.7];
aaShphobic = [0.81, 0.88, 0.72, 0.46, 0.95, 0.77, 0.54, 1, 0.26, 0.92, 0.81, 0.45, 0.68, 0.43, 0, 0.6, 0.63, 0.92, 0.85, 0.71]
aaShphobic_c = [x-0.77 for x in aaShphobic]
aa3toaa1 = { a:[b,c,d] for a,b,c,d in zip(aa3code,aa1code,aatASA,aaShphobic)}
aaA2tASA = dict(zip(aa1code, aatASA))
aaA2hpo = dict(zip(aa1code, aaShphobic))
aaA2hpo_c = dict(zip(aa1code, aaShphobic_c))
def aminoA2tASA(aa):
try:
return aaA2tASA[aa]
except:
return 0
def aminoA2hpo(aa):
try:
return aaA2hpo[aa]
except:
return 0
def aminoA2hpo_c(aa):
try:
return aaA2hpo_c[aa]
except:
return 0
def aminoAAA2A(res):
try:
return aaAAA2A[res]
except:
return 'B'
def aminoA2AAA(res):
try:
return aaA2AAA[res]
except:
return 'NSA'
def aminoA2chph74(res):
try:
return aa2chph74[res]
except:
return 0
def aminoA2chph26(res):
try:
return aa2chph26[res]
except:
return 0
aminoAAA2A = np.vectorize(aminoAAA2A)
aminoA2AAA = np.vectorize(aminoA2AAA)
aminoA2hpo = np.vectorize(aminoA2hpo)
aminoA2hpo_c = np.vectorize(aminoA2hpo_c)
aminoA2tASA = np.vectorize(aminoA2tASA)
aminoA2chph74 = np.vectorize(aminoA2chph74)
aminoA2chph26 = np.vectorize(aminoA2chph26)
#### checks a filename exists and accessible
def file_len(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
###########################################################################################
### Sequence-related functions
###########################################################################################
def readfasta(filename):
header = []; seqlist = []; flag = False; seq = [];
with open(filename, 'r') as f:
for line in f:
if line.startswith('>'):
header.append(line[1:-1].strip() )
if flag :
seqlist.append(''.join(seq))
seq = []
else:
flag = True
seq = []
else:
seq.append(line[:-1].replace(' ', ''))
seqlist.append(''.join(seq))
seq = pd.DataFrame(header); seq['seq'] = seqlist
seq.columns = ['header', 'seq']
return seq
def writefasta(filename, header, seqlist, mode="w") :
if len(header) == 0 :
header = ['seq' + str(i) for i in range(len(seqlist))]
elif len(header) == 1 :
header = [header + str(i) for i in range(len(seqlist))]
file = open(filename, mode)
for i in range(len(seqlist)):
strr = '>' + header[i] + '\n' + seqlist[i] + '\n'
file.write(strr)
file.close()
return True
###########################################################################################
# 'Old conservation score program for MSA sequences'
def conservation_score(list_seq):
f_matrix = [[0 for c in range(20)] for r in range(len(list_seq[0]))]
r_list = ['G','A','P','V','L','I','M','F','Y','W','S','T','C','N','Q','K','H','R','D','E']
len_seq = len(list_seq[0])
seq_count = len(list_seq)
for j in range(0,len_seq):
for k in range(0,20):
for i in range(0,seq_count):
if list_seq[i][j] == r_list[k]:
f_matrix[j][k] = f_matrix[j][k] + 1
for j in range(0,len_seq):
for k in range(0,20):
f_matrix[j][k] = (f_matrix[j][k]/seq_count)
score = [0]*(len_seq)
for j in range(0,len_seq):
for k in range(0,20):
if f_matrix[j][k] != 0:
score[j] = score[j] + (f_matrix[j][k]*(math.log(f_matrix[j][k])))
return score
def binary2indseg(binseq, cl):
seglist = []; i = 0
while i < len(binseq) :
if binseq[i] == cl:
seg = []
while i < len(binseq) and binseq[i] == cl :
seg.append(i)
i = i + 1
seglist.append(seg)
i = i + 1
return seglist
def seglist2boolind(seglist, length, sep1=',', sep2='-'):
ind = []
if len(seglist) == 0 :
segboollist = np.zeros(length, bool)
else:
for seg in seglist.split(sep1):
seg = seg.split(sep2)
try:
st = int(seg[0])
ed = int(seg[1])
if st <= ed :
ind.extend( [x for x in range( st-1, ed ) ] )
else:
ind.extend( [x for x in range( ed-1, st ) ] )
except:
print('seglist2boolind: Bad segment format !! ',seg)
return -1
segboollist = np.zeros(length, bool)
segboollist[ind] = True
return segboollist
def ind2bool(ind, length):
bind = [0 for i in range(length) ]
for i in ind:
bind[i] = 1
return bind
def boolind2seglist(segboollist, sep1=',', sep2='-'):
i = 0 ; Flag = 0; seglist = []
if len(segboollist) <= 0:
seglist = ''
else:
while i < len(segboollist) :
if segboollist[i] == 1 and Flag == 0:
Flag = 1; st = i+1
elif segboollist[i] == 0 and Flag == 1:
Flag = 0; ed = i;
seglist.append( str(st)+sep2+str(ed))
i += 1
if Flag == 1:
ed = i;
seglist.append( str(st)+sep2+str(ed))
seglist = ','.join(seglist)
return seglist
#posvec = [ [0,1,2,3,4,0,1,2,3], [1,2,3,4,5,2,3,4,5] ]
#posvec = [ [0,0], [1,2]]
def pairpreference(seqlist, mode):
aa1code = ['A', 'D', 'C', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
aapair = np.array([ [y+x for x in aa1code] for y in aa1code]).flatten()
pairpref = pd.DataFrame( index = aapair)
#pairpref['aa1'] = pairpref.index.str.slice(0,1)
#pairpref['aa2'] = pairpref.index.str.slice(1,2)
if mode == 0 :
pairpref['f12'] = 0.0
pairpref['f13'] = 0.0
for seq in seqlist:
for i in range(len(seq)-2):
try:
pairpref.ix[ seq[i]+seq[i+1],'f12'] += 1
pairpref.ix[ seq[i]+seq[i+2],'f13'] += 1
except :#KeyError as exc:
#print("Caught KeyError: {}".format(exc))
print(seq,i)
pairpref.ix[ seq[i+1]+seq[i+2],'f12'] += 1
elif mode == 1:
posvec = [ [0,1,2,3,4,0,1,2,3], [1,2,3,4,5,2,3,4,5] ]
clist = []
for i in range(len(posvec[0])):
clist.append('f'+str(posvec[0][i]+1)+str(posvec[1][i]+1) )
pairpref[clist[i]] = 0.0;
for seq in seqlist:
for i in range(len(posvec[0])):
pairpref.ix[ seq[posvec[0][i]]+seq[posvec[1][i]],clist[i]] += 1
pairpref /= pairpref.sum()
return pairpref
def calscore_pairpref(seqlist, pairpref, mode):
if mode == 0 :
score = np.zeros((len(seqlist), 2))
k = -1
for seq in seqlist:
k = k + 1; l = len(seq)
score[k, 0] = np.sum(pairpref.loc[[seq[x]+seq[x+1] for x in np.arange(l-1)],'f12'])
score[k, 1] = np.sum(pairpref.loc[[seq[x]+seq[x+2] for | |
#!/usr/bin/env python
'''Parse a C source file.
To use, subclass CParser and override its handle_* methods. Then instantiate
the class with a string to parse.
Derived from ANSI C grammar:
* Lexicon: http://www.lysator.liu.se/c/ANSI-C-grammar-l.html
* Grammar: http://www.lysator.liu.se/c/ANSI-C-grammar-y.html
Reference is C99:
* http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
'''
from __future__ import print_function
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import cPickle
import operator
import os.path
import re
import sys
import time
import warnings
import preprocessor
import yacc
tokens = (
'PP_IF', 'PP_IFDEF', 'PP_IFNDEF', 'PP_ELIF', 'PP_ELSE',
'PP_ENDIF', 'PP_INCLUDE', 'PP_DEFINE', 'PP_DEFINE_CONSTANT', 'PP_UNDEF',
'PP_LINE', 'PP_ERROR', 'PP_PRAGMA',
'IDENTIFIER', 'CONSTANT', 'CHARACTER_CONSTANT', 'STRING_LITERAL', 'SIZEOF',
'PTR_OP', 'INC_OP', 'DEC_OP', 'LEFT_OP', 'RIGHT_OP', 'LE_OP', 'GE_OP',
'EQ_OP', 'NE_OP', 'AND_OP', 'OR_OP', 'MUL_ASSIGN', 'DIV_ASSIGN',
'MOD_ASSIGN', 'ADD_ASSIGN', 'SUB_ASSIGN', 'LEFT_ASSIGN', 'RIGHT_ASSIGN',
'AND_ASSIGN', 'XOR_ASSIGN', 'OR_ASSIGN', 'HASH_HASH', 'PERIOD',
'TYPE_NAME',
'TYPEDEF', 'EXTERN', 'STATIC', 'AUTO', 'REGISTER',
'CHAR', 'SHORT', 'INT', 'LONG', 'SIGNED', 'UNSIGNED', 'FLOAT', 'DOUBLE',
'CONST', 'VOLATILE', 'VOID',
'STRUCT', 'UNION', 'ENUM', 'ELLIPSIS',
'CASE', 'DEFAULT', 'IF', 'ELSE', 'SWITCH', 'WHILE', 'DO', 'FOR', 'GOTO',
'CONTINUE', 'BREAK', 'RETURN', '__ASM__'
)
keywords = [
'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'int',
'long', 'register', 'return', 'short', 'signed', 'sizeof', 'static',
'struct', 'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile',
'while', '__asm__'
]
# --------------------------------------------------------------------------
# C Object Model
# --------------------------------------------------------------------------
class Declaration(object):
def __init__(self):
self.declarator = None
self.type = Type()
self.storage = None
def __repr__(self):
d = {
'declarator': self.declarator,
'type': self.type,
}
if self.storage:
d['storage'] = self.storage
l = ['%s=%r' % (k, v) for k, v in d.items()]
return 'Declaration(%s)' % ', '.join(l)
class Declarator(object):
pointer = None
def __init__(self):
self.identifier = None
self.initializer = None
self.array = None
self.parameters = None
# make pointer read-only to catch mistakes early
pointer = property(lambda self: None)
def __repr__(self):
s = self.identifier or ''
if self.array:
s += repr(self.array)
if self.initializer:
s += ' = %r' % self.initializer
if self.parameters is not None:
s += '(' + ', '.join([repr(p) for p in self.parameters]) + ')'
return s
class Pointer(Declarator):
pointer = None
def __init__(self):
super(Pointer, self).__init__()
self.qualifiers = []
def __repr__(self):
q = ''
if self.qualifiers:
q = '<%s>' % ' '.join(self.qualifiers)
return 'POINTER%s(%r)' % (q, self.pointer) + \
super(Pointer, self).__repr__()
class Array(object):
def __init__(self):
self.size = None
self.array = None
def __repr__(self):
if self.size:
a = '[%r]' % self.size
else:
a = '[]'
if self.array:
return repr(self.array) + a
else:
return a
class Parameter(object):
def __init__(self):
self.type = Type()
self.storage = None
self.declarator = None
def __repr__(self):
d = {
'type': self.type,
}
if self.declarator:
d['declarator'] = self.declarator
if self.storage:
d['storage'] = self.storage
l = ['%s=%r' % (k, v) for k, v in d.items()]
return 'Parameter(%s)' % ', '.join(l)
class Type(object):
def __init__(self):
self.qualifiers = []
self.specifiers = []
def __repr__(self):
return ' '.join(self.qualifiers + [str(s) for s in self.specifiers])
# These are used only internally.
class StorageClassSpecifier(str):
pass
class TypeSpecifier(str):
pass
class StructTypeSpecifier(object):
def __init__(self, is_union, tag, declarations):
self.is_union = is_union
self.tag = tag
self.declarations = declarations
def __repr__(self):
if self.is_union:
s = 'union'
else:
s = 'struct'
if self.tag:
s += ' %s' % self.tag
if self.declarations:
s += ' {%s}' % '; '.join([repr(d) for d in self.declarations])
return s
class EnumSpecifier(object):
def __init__(self, tag, enumerators):
self.tag = tag
self.enumerators = enumerators
def __repr__(self):
s = 'enum'
if self.tag:
s += ' %s' % self.tag
if self.enumerators:
s += ' {%s}' % ', '.join([repr(e) for e in self.enumerators])
return s
class Enumerator(object):
def __init__(self, name, expression):
self.name = name
self.expression = expression
def __repr__(self):
s = self.name
if self.expression:
s += ' = %r' % self.expression
return s
class TypeQualifier(str):
pass
def apply_specifiers(specifiers, declaration):
'''Apply specifiers to the declaration (declaration may be
a Parameter instead).'''
for s in specifiers:
if type(s) == StorageClassSpecifier:
if declaration.storage:
p.parser.cparser.handle_error(
'Declaration has more than one storage class',
'???', p.lineno(1))
return
declaration.storage = s
elif type(s) in (TypeSpecifier, StructTypeSpecifier, EnumSpecifier):
declaration.type.specifiers.append(s)
elif type(s) == TypeQualifier:
declaration.type.qualifiers.append(s)
# --------------------------------------------------------------------------
# Expression Object Model
# --------------------------------------------------------------------------
class EvaluationContext(object):
'''Interface for evaluating expression nodes.
'''
def evaluate_identifier(self, name):
warnings.warn('Attempt to evaluate identifier "%s" failed' % name)
return 0
def evaluate_sizeof(self, type):
warnings.warn('Attempt to evaluate sizeof "%s" failed' % str(type))
return 0
class ExpressionNode(object):
def evaluate(self, context):
return 0
def __str__(self):
return ''
class ConstantExpressionNode(ExpressionNode):
def __init__(self, value):
self.value = value
def evaluate(self, context):
return self.value
def __str__(self):
return str(self.value)
class IdentifierExpressionNode(ExpressionNode):
def __init__(self, name):
self.name = name
def evaluate(self, context):
return context.evaluate_identifier(self.name)
def __str__(self):
return str(self.value)
class UnaryExpressionNode(ExpressionNode):
def __init__(self, op, op_str, child):
self.op = op
self.op_str = op_str
self.child = child
def evaluate(self, context):
return self.op(self.child.evaluate(context))
def __str__(self):
return '(%s %s)' % (self.op_str, self.child)
class SizeOfExpressionNode(ExpressionNode):
def __init__(self, type):
self.type = type
def evaluate(self, context):
return context.evaluate_sizeof(self.type)
def __str__(self):
return 'sizeof(%s)' % str(self.type)
class BinaryExpressionNode(ExpressionNode):
def __init__(self, op, op_str, left, right):
self.op = op
self.op_str = op_str
self.left = left
self.right = right
def evaluate(self, context):
return self.op(self.left.evaluate(context),
self.right.evaluate(context))
def __str__(self):
return '(%s %s %s)' % (self.left, self.op_str, self.right)
class LogicalAndExpressionNode(ExpressionNode):
def __init__(self, left, right):
self.left = left
self.right = right
def evaluate(self, context):
return self.left.evaluate(context) and self.right.evaluate(context)
def __str__(self):
return '(%s && %s)' % (self.left, self.right)
class LogicalOrExpressionNode(ExpressionNode):
def __init__(self, left, right):
self.left = left
self.right = right
def evaluate(self, context):
return self.left.evaluate(context) or self.right.evaluate(context)
def __str__(self):
return '(%s || %s)' % (self.left, self.right)
class ConditionalExpressionNode(ExpressionNode):
def __init__(self, condition, left, right):
self.condition = condition
self.left = left
self.right = right
def evaluate(self, context):
if self.condition.evaluate(context):
return self.left.evaluate(context)
else:
return self.right.evaluate(context)
def __str__(self):
return '(%s ? %s : %s)' % (self.condition, self.left, self.right)
# --------------------------------------------------------------------------
# Grammar
# --------------------------------------------------------------------------
def p_translation_unit(p):
'''translation_unit :
| translation_unit external_declaration
'''
# Starting production.
# Allow empty production so that files with no declarations are still
# valid.
# Intentionally empty
def p_identifier(p):
'''identifier : IDENTIFIER'''
p[0] = IdentifierExpressionNode(p[1])
def p_constant(p):
'''constant : CONSTANT
| CHARACTER_CONSTANT
'''
def _to_int(s):
s = s.rstrip('lLuU')
if s.startswith('0x'):
return int(s, base=16)
elif s.startswith('0'):
return int(s, base=8)
else:
return int(s)
value = p[1]
try:
value = _to_int(value)
except ValueError:
pass
p[0] = ConstantExpressionNode(value)
def p_string_literal(p):
'''string_literal : STRING_LITERAL'''
p[0] = ConstantExpressionNode(p[1])
def p_primary_expression(p):
'''primary_expression : identifier
| constant
| string_literal
| '(' expression ')'
'''
if p[1] == '(':
p[0] = p[2]
else:
p[0] = p[1]
def p_postfix_expression(p):
'''postfix_expression : primary_expression
| postfix_expression '[' expression ']'
| postfix_expression '(' ')'
| postfix_expression '(' argument_expression_list ')'
| postfix_expression PERIOD IDENTIFIER
| postfix_expression PTR_OP IDENTIFIER
| postfix_expression INC_OP
| postfix_expression DEC_OP
'''
# XXX Largely unsupported
p[0] = p[1]
def p_argument_expression_list(p):
'''argument_expression_list : assignment_expression
| argument_expression_list ',' assignment_expression
'''
def p_asm_expression(p):
'''asm_expression : __ASM__ volatile_opt '(' string_literal ')'
| __ASM__ volatile_opt '(' string_literal ':' str_opt_expr_pair_list ')'
| __ASM__ volatile_opt '(' string_literal ':' str_opt_expr_pair_list ':' str_opt_expr_pair_list ')'
| __ASM__ volatile_opt '(' string_literal ':' str_opt_expr_pair_list ':' str_opt_expr_pair_list ':' str_opt_expr_pair_list ')'
'''
# Definitely not ISO C, adapted from example ANTLR GCC parser at
# http://www.antlr.org/grammar/cgram//grammars/GnuCParser.g
# but more lenient (expressions permitted in optional final part, when
# they shouldn't be -- avoids shift/reduce conflict with
# str_opt_expr_pair_list).
# XXX node not supported
p[0] = ExpressionNode()
def p_str_opt_expr_pair_list(p):
'''str_opt_expr_pair_list :
| str_opt_expr_pair
| str_opt_expr_pair_list ',' str_opt_expr_pair
'''
def p_str_opt_expr_pair(p):
'''str_opt_expr_pair : string_literal
| string_literal '(' expression ')'
'''
def p_volatile_opt(p):
'''volatile_opt :
| VOLATILE
'''
def p_unary_expression(p):
'''unary_expression : postfix_expression
| INC_OP unary_expression
| DEC_OP unary_expression
| unary_operator cast_expression
| SIZEOF unary_expression
| SIZEOF '(' type_name ')'
| asm_expression
'''
if len(p) == 2:
p[0] = p[1]
elif p[1] == 'sizeof':
if p[2] == '(':
p[0] = SizeOfExpressionNode(p[3])
else:
p[0] = SizeOfExpressionNode(p[2])
elif type(p[1]) == tuple:
# unary_operator reduces to (op, op_str)
p[0] = UnaryExpressionNode(p[1][0], p[1][1], p[2])
else:
# XXX INC_OP and DEC_OP expression nodes not supported
p[0] = p[2]
def p_unary_operator(p):
'''unary_operator : '&'
| '*'
| '+'
| '-'
| '~'
| '!'
'''
# reduces to (op, op_str)
p[0] = ({
'+': operator.pos,
'-': operator.neg,
'~': operator.inv,
'!': operator.not_,
'&': 'AddressOfUnaryOperator',
'*': 'DereferenceUnaryOperator'}[p[1]], p[1])
def p_cast_expression(p):
'''cast_expression : unary_expression
| '(' type_name ')' cast_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
# XXX cast node not supported
p[0] = p[4]
def p_multiplicative_expression(p):
'''multiplicative_expression : cast_expression
| multiplicative_expression '*' cast_expression
| multiplicative_expression '/' cast_expression
| multiplicative_expression '%' cast_expression
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = BinaryExpressionNode({
'*': operator.mul,
'/': | |
calculate the attacking loss
if (atype in ['FOA', 'SPFOA']) and (M is not None) and (M == 2):
# -- [orig] ranking attack, M=2
#== configuration for FOA:M=2
M_GT = 5 # sensible choice due to SOP dataset property
XI = float(os.getenv('SP', 10.)) # balancing the "SP" and "QA" component
if 'SP' not in atype:
XI = None # override SP weight to None
#== select the M=2 candidates. note, x1 is closer to q than x2
if True:
# local sampling (default)
topmost = int(candi[0].size(0) * 0.01)
topxm = dist.topk(topmost+1, dim=1, largest=False)[1][:,1:] # [output_0, M]
sel = np.vstack([np.random.permutation(topmost) for j in range(topxm.shape[0])])
msample = th.stack([topxm[i][np.sort(sel[i,:M])] for i in range(topxm.shape[0])])
if 'SP' in atype:
mgtruth = th.stack([topxm[i][np.sort(sel[i,M:])[:M_GT]] for i in range(topxm.shape[0])])
else:
# global sampling
distsort = dist.sort(dim=1)[1] # [output_0, candi_0]
mpairs = th.randint(candi[0].shape[0], (output.shape[0], M)).sort(dim=1)[0] # [output_0, M]
msample= th.stack([distsort[i, mpairs[i]] for i in range(output.shape[0])]) # [output_0, M]
if 'SP' in atype:
mgtruth = dist.topk(M_GT+1, dim=1, largest=False)[1][:,1:] # [output_0, M_GT]
embpairs = candi[0][msample, :] # [output_0, M, output_1]
if 'SP' in atype:
embgts = candi[0][mgtruth, :] # [output_0, M_GT, output_1]
# >> compute the (ordinary) loss on selected targets
loss, acc = LossFactory('FOA2')(output, embpairs[:,1,:], embpairs[:,0,:], metric=metric)
#== Semantic preserving? (SP)
if 'SP' in atype:
loss_sp, rank_gt = LossFactory('QA+')(output, embgts, candi[0],
metric=metric, dist=dist, cidx=mgtruth)
loss = loss + XI * loss_sp
prankgt_orig = rank_gt #/ candi[0].size(0)
# >> backup and report
correct_orig = acc * output.shape[0]
loss_orig = loss.clone().detach()
if verbose:
print()
if 'SP' not in atype:
print('* Original Sample', 'loss=', loss.item(), 'FOA:Accu=', acc)
else:
print('* Original Sample', 'loss=', loss.item(), 'where loss_sp=', loss_sp.item(),
'FOA:Accu=', acc, 'GT.R@mean=', rank_gt)
# <transfer>
if transfer is not None:
embpairs_trans = transfer['candidates'][0][msample, :]
_, acc_trans = LossFactory('FOA2')(output_trans, embpairs_trans[:,1,:], embpairs_trans[:,0,:],
metric=('C' if 'C' in transfer['transfer'] else 'E'))
if 'SP' not in atype:
print('* <transfer> Original Sample', 'FOA:Accu=', acc_trans)
else:
embgts_trans = transfer['candidates'][0][mgtruth, :]
_, rank_sp_trans = LossFactory('QA')(output_trans, embgts_trans,
transfer['candidates'][0], pm='+',
metric=('C' if 'C' in transfer['transfer'] else 'E'),
dist=dist_trans, cidx=mgtruth)
print('* <transfer> Original Sample', 'FOA:Accu=', acc_trans,
'GT.R@mean=', rank_sp_trans)
elif (atype in ['FOA', 'SPFOA']) and (M is not None) and (M > 2):
# -- [orig] ranking attack, M>2
#== configuration for FOA:M>2
M_GT = 5 # sensible choice due to SOP dataset property
XI = float(os.getenv('SP', 10.)) # balancing the "SP" and "QA" component
if 'SP' not in atype:
XI = None # override SP weight to None
#== select M>2 candidates, in any order
if True:
# Just select the original top-k
topxm = dist.topk(M, dim=1, largest=False)[1]
rpm = np.stack([np.random.permutation(M) for j in range(topxm.shape[0])])
msample = th.stack([topxm[i][rpm[i]] for i in range(topxm.shape[0])])
if 'SP' in atype:
mgtruth = msample
elif False:
# local sampling (from the topmost 1% samples)
topmost = int(candi[0].size(0) * 0.01)
topxm = dist.topk(topmost+1, dim=1, largest=False)[1][:,1:] # [output_0, M]
sel = np.vstack([np.random.permutation(topmost) for j in range(topxm.shape[0])])
msample = th.stack([topxm[i][sel[i,:M]] for i in range(topxm.shape[0])])
if 'SP' in atype:
mgtruth = th.stack([topxm[i][np.sort(sel[i,M:])[:M_GT]] for i in range(topxm.shape[0])])
else:
# global sampling
msample = th.randint(candi[0].shape[0], (output.shape[0], M)) # [output_0, M]
if 'SP' in atype:
mgtruth = dist.topk(M_GT+1, dim=1, largest=False)[1][:,1:]
embpairs = candi[0][msample, :] # [output_0, M, output_1]
if 'SP' in atype:
embgts = candi[0][mgtruth, :] # [output_0, M_GT, output_1]
# >> adversarial inequalities formed by every pair of samples
loss, tau = LossFactory('FOAX')(output, embpairs, metric=metric)
#== Semantic preserving? (SP)
if 'SP' in atype:
loss_sp, rank_sp = LossFactory('QA+')(output, embgts, candi[0],
metric=metric, dist=dist, cidx=mgtruth)
loss = loss + XI * loss_sp
prankgt_orig = rank_sp #/ candi[0].size(0)
# >> backup and report
correct_orig = tau * output.shape[0] / 100.
loss_orig = loss.clone().detach()
if verbose:
print()
if 'SP' not in atype:
print('* Original Sample', 'loss=', loss.item(), 'FOA:tau=', tau)
else:
print('* Original Sample', 'loss=', loss.item(), 'where loss_sp=', loss_sp.item(),
'FOA:tau=', tau, 'GT.R@mean=', rank_sp)
# <transfer>
if transfer is not None:
embpairs_trans = transfer['candidates'][0][msample, :]
_, tau_trans = LossFactory('FOAX')(output_trans, embpairs_trans,
metric=('C' if 'C' in transfer['transfer'] else 'E'))
if 'SP' not in atype:
print('* <transfer> Original Sample', 'FOA:tau=', tau_trans)
else:
embgts_trans = transfer['candidates'][0][mgtruth, :]
_, rank_sp_trans = LossFactory('QA')(output_trans, embgts_trans,
transfer['candidates'][0], pm='+',
metric=('C' if 'C' in transfer['transfer'] else 'E'),
dist=dist_trans, cidx=mgtruth)
print('* <transfer> Original Sample', 'FOA:tau=', tau_trans,
'GT.R@mean=', rank_sp_trans)
elif (atype in ['QA', 'SPQA']) and (M is not None):
#== semantic-preserving (SP) query attack
#; the pure query attack has a downside: its semantic may be changed
#; during the attack. That would result in a very weird ranking result,
#; even if the attacking goal was achieved.
#== configuration
M_GT = 5 # sensible due to the SOP dataset property
XI = float(os.getenv('SP', 1. if ('+' == pm) else 100.)) # balancing the "SP" and "QA" loss functions
if 'SP' not in atype:
XI = None
#== first, select the attacking targets and the ground-truth vectors for
#; the SP purpose.
if '+' == pm:
# random sampling from populationfor QA+
if 'global' == os.getenv('SAMPLE', 'global'):
msample = th.randint(candi[0].shape[0], (output.shape[0], M)) # [output_0,M]
elif 'local' == os.getenv('SAMPLE', 'global'):
local_lb = int(candi[0].shape[0]*0.01)
local_ub = int(candi[0].shape[0]*0.05)
topxm = dist.topk(local_ub+1, dim=1, largest=False)[1][:,1:]
sel = np.random.randint(local_lb, local_ub, (output.shape[0], M))
msample = th.stack([topxm[i][sel[i]] for i in range(topxm.shape[0])])
if 'SP' in atype:
mgtruth = dist.topk(M_GT+1, dim=1, largest=False)[1][:,1:] # [output_0, M]
elif '-' == pm:
# random sampling from top-3M for QA-
topmost = int(candi[0].size(0) * 0.01)
if int(os.getenv('VIS', 0)) > 0:
topmost = int(candi[0].size(0) * 0.0003)
if transfer is None:
topxm = dist.topk(topmost+1, dim=1, largest=False)[1][:,1:]
else:
topxm = dist_trans.topk(topmost+1, dim=1, largest=False)[1][:,1:]
sel = np.vstack([np.random.permutation(topmost) for i in range(output.shape[0])])
msample = th.stack([topxm[i][sel[i,:M]] for i in range(topxm.shape[0])])
if 'SP' in atype:
mgtruth = th.stack([topxm[i][np.sort(sel[i,M:])[:M_GT]] for i in range(topxm.shape[0])])
embpairs = candi[0][msample, :]
if 'SP' in atype:
embgts = candi[0][mgtruth, :]
#== evaluate the SPQA loss on original samples
if 'SP' in atype:
loss_qa, rank_qa = LossFactory('QA')(output, embpairs, candi[0],
metric=metric, pm=pm, dist=dist, cidx=msample)
loss_sp, rank_sp = LossFactory('QA+')(output, embgts, candi[0],
metric=metric, dist=dist, cidx=mgtruth)
loss = loss_qa + XI * loss_sp
else:
loss_qa, rank_qa = LossFactory('QA')(output, embpairs, candi[0],
metric=metric, pm=pm, dist=dist, cidx=msample)
loss = loss_qa
#== overall loss function of the batch
mrank = rank_qa / candi[0].shape[0]
correct_orig = mrank * output.shape[0] / 100.
loss_orig = loss.clone().detach()
if 'SP' in atype:
mrankgt = rank_sp / candi[0].shape[0]
sp_orig = mrankgt * output.shape[0] / 100.
prankgt_orig = mrankgt
#== backup and report
if verbose:
print()
if 'SP' in atype:
print('* Original Sample', 'loss=', loss.item(),
f'SPQA{pm}:rank=', mrank,
f'SPQA{pm}:GTrank=', mrankgt)
else:
print('* Original Sample', 'loss=', loss.item(),
f'QA{pm}:rank=', mrank)
# <transfer>
if transfer is not None:
embpairs_trans = transfer['candidates'][0][msample, :]
_, rank_qa_trans = LossFactory('QA')(output_trans, embpairs_trans,
transfer['candidates'][0], pm=pm,
metric=('C' if 'C' in transfer['transfer'] else 'E'),
dist=dist_trans, cidx=msample)
if 'SP' in atype:
embgts_trans = transfer['candidates'][0][mgtruth, :]
_, rank_sp_trans = LossFactory('QA')(output_trans, embgts_trans,
transfer['candidates'][0], pm=pm,
metric=('C' if 'C' in transfer['transfer'] else 'E'),
dist=dist_trans, cidx=mgtruth)
if 'SP' not in atype:
print('* <transfer> Original Sample', f'QA{pm}:rank=', rank_qa_trans / candi[0].shape[0])
else:
print('* <transfer> Original Sample', f'SPQA{pm}:rank=', rank_qa_trans / candi[0].shape[0],
f'SPQA{pm}:GTrank=', rank_sp_trans / candi[0].shape[0])
else:
raise Exception("Unknown attack")
# >>>>>>>>>>>>>>>>>>>>>>>>>> ORIG >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# <<<<<<<<<<<<<<<<<<<<<<<<<< MARK: STATTACK <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# -- [attack] start attack, maxiter is the number of PGD iterations (FGSM: maxiter=1)
# -- [alpha/epsilon] PGD parameter tricks balancing yield v.s. time consumption
# Note, 1/255 \approx 0.004
if True:
alpha = 1./255.
maxiter = 24
elif eps > 1./255.:
alpha = float(min(max(eps/10., 1./255.), 0.01)) # PGD hyper-parameter
maxiter = int(min(max(10, 2*eps/alpha), 30)) # PGD hyper-parameter
elif eps < 1./255.:
maxiter = 1
alpha = eps / max(1, maxiter//2) # doesn't attack
# this ensures the best attacking result, but is very time-consuming
if int(os.getenv('PGD_UTMOST', 0)) > 0:
print('| Overriding PGD Parameter for the utmost attacking result|')
alpha = 1./255.
maxiter = int(os.getenv('PGD_UTMOST'))
# PGD with/without random init?
if int(os.getenv('RINIT', 0))>0:
if not normimg:
images = images + eps*2*(0.5-th.rand(images.shape)).to(images.device)
images = th.clamp(images, min=0., max=1.)
images = images.detach()
images.requires_grad = True
else:
images = images + (eps/IMstd[:,None,None])*2*(0.5-th.rand(images.shape)).to(images.device)
images = th.max(images, renorm(th.zeros(images.shape).to(device)))
images = th.min(images, renorm(th.ones(images.shape).to(device)))
images = images.detach()
images.requires_grad = | |
= exhibit.sort_index()
# subtract the premium to get the actual capital
try:
for m in ['EL', 'VaR', 'TVaR', 'ScaledVaR', 'ScaledTVaR', 'EqRiskVaR', 'EqRiskTVaR', 'MerPer', 'coTVaR',
'EPD', 'ScaledEPD', 'EqRiskEPD', 'covar']:
# Q as computed above gives assets not equity...so adjust
# usual calculus: P = (L + ra)/(1+r); Q = a-P, remember Q above is a (sorry)
exhibit.loc[(m, 'L'), :] = exhibit.loc[('T', 'L'), :]
exhibit.loc[(m, 'P'), :] = (exhibit.loc[('T', 'L'), :] + ROE * exhibit.loc[(m, 'Q'), :]) / (1 + ROE)
exhibit.loc[(m, 'Q'), :] -= exhibit.loc[(m, 'P'), :].values
exhibit.loc[(m, 'M'), :] = exhibit.loc[(m, 'P'), :] - exhibit.loc[('T', 'L'), :].values
exhibit.loc[(m, 'LR'), :] = exhibit.loc[('T', 'L'), :] / exhibit.loc[(m, 'P'), :]
exhibit.loc[(m, 'ROE'), :] = exhibit.loc[(m, 'M'), :] / exhibit.loc[(m, 'Q'), :]
exhibit.loc[(m, 'PQ'), :] = exhibit.loc[(m, 'P'), :] / exhibit.loc[(m, 'Q'), :]
exhibit.loc[(m, 'a'), :] = exhibit.loc[(m, 'P'), :] + exhibit.loc[(m, 'Q'), :]
except Exception as e:
logger.error(f'Exception {e} creating LR, P, Q, ROE, or PQ')
# print(ans.distortion.name)
# display(exhibit)
ans.audit_df.loc['TVaR@'] = p_t
ans.audit_df.loc['erVaR'] = pv
ans.audit_df.loc['erTVaR'] = pt
ans.audit_df.loc['EPD@'] = pe
ans.audit_df.loc['erEPD'] = p_e
ans.update(exhibit=exhibit)
return ans
def analyze_distortion_plots(self, ans, dist, a_cal, p, a_max, ROE, LR):
"""
Create plots from an analyze_distortion ans class
note: this only looks at distortion related items...it doesn't use anything from the comps
:param ans:
:param dist:
:param a_cal:
:param p:
:param a_max:
:param ROE:
:param LR:
:return:
"""
augmented_df = ans.augmented_df
# top down stats: e.g. T.P[a_cal] - T.P and zero above a_cal
# these are only going to apply to total...will not bother with by line
# call this series V.P with v indicating a down arrow and a letter after and close to T
# hack out space
for nc in ['L', 'P', 'M', 'Q', 'LR', 'ROE', 'PQ']:
augmented_df[f'V.{nc}_total'] = 0
augmented_df.loc[0:a_cal, 'V.L_total'] = \
augmented_df.at[a_cal, 'T.L_total'] - augmented_df.loc[0:a_cal, 'T.L_total']
augmented_df.loc[0:a_cal, 'V.P_total'] = \
augmented_df.at[a_cal, 'T.P_total'] - augmented_df.loc[0:a_cal, 'T.P_total']
augmented_df.loc[0:a_cal, 'V.M_total'] = \
augmented_df.at[a_cal, 'T.M_total'] - augmented_df.loc[0:a_cal, 'T.M_total']
augmented_df.loc[0:a_cal, 'V.Q_total'] = \
augmented_df.at[a_cal, 'T.Q_total'] - augmented_df.loc[0:a_cal, 'T.Q_total']
augmented_df.loc[0:a_cal, 'V.LR_total'] = \
augmented_df.loc[0:a_cal, 'V.L_total'] / augmented_df.loc[0:a_cal, 'V.P_total']
augmented_df.loc[0:a_cal, 'V.PQ_total'] = \
augmented_df.loc[0:a_cal, 'V.P_total'] / augmented_df.loc[0:a_cal, 'V.Q_total']
augmented_df.loc[0:a_cal, 'V.ROE_total'] = \
augmented_df.loc[0:a_cal, 'V.M_total'] / augmented_df.loc[0:a_cal, 'V.Q_total']
# bottom up calc is already done: it is the T. series in augmented_df
# marginal calc also done: M. series
# f_6_part = f_trinity = f_8_part = f_distortion = f_close = None
# plot the distortion
f_distortion, ax = plt.subplots(1, 1)
dist.plot(ax=ax)
# six part up and down plot
def tidy(a, y=True):
"""
function to tidy up the graphics
"""
n = 6
a.set(xlabel='Assets')
a.xaxis.set_major_locator(FixedLocator([a_cal]))
ff = f'A={a_cal:,.0f}'
a.xaxis.set_major_formatter(FixedFormatter([ff]))
a.xaxis.set_minor_locator(MaxNLocator(n))
a.xaxis.set_minor_formatter(StrMethodFormatter('{x:,.0f}'))
if y:
a.yaxis.set_major_locator(MaxNLocator(n))
a.yaxis.set_minor_locator(AutoMinorLocator(4))
# gridlines with various options
# https://matplotlib.org/3.1.0/gallery/color/named_colors.html
a.grid(which='major', axis='x', c='cornflowerblue', alpha=1, linewidth=1)
a.grid(which='major', axis='y', c='lightgrey', alpha=0.5, linewidth=1)
a.grid(which='minor', axis='x', c='lightgrey', alpha=0.5, linewidth=1)
a.grid(which='minor', axis='y', c='gainsboro', alpha=0.25, linewidth=0.5)
# tick marks
a.tick_params('x', which='major', labelsize=7, length=10, width=0.75, color='cornflowerblue',
direction='out')
a.tick_params('y', which='major', labelsize=7, length=5, width=0.75, color='black',
direction='out')
a.tick_params('both', which='minor', labelsize=7, length=2, width=0.5, color='black',
direction='out')
# plots
f_6_part, axs = plt.subplots(3, 2, figsize=(8, 10), sharex=True, constrained_layout=True)
axi = iter(axs.flatten())
# ONE
ax = next(axi)
# df[['Layer Loss', 'Layer Prem', 'Layer Capital']]
augmented_df.filter(regex='^F|^gS|^S').rename(columns=self.renamer). \
plot(xlim=[0, a_max], ylim=[-0.025, 1.025], logy=False, title='F, S, gS: marginal premium and loss',
ax=ax)
tidy(ax)
ax.legend(frameon=True, loc='center right')
# TWO
ax = next(axi)
# df[['Layer Capital', 'Layer Margin']].plot(xlim=xlim, ax=ax)
augmented_df.filter(regex='^M\.[QM]_total').rename(columns=self.renamer). \
plot(xlim=[0, a_max], ylim=[-0.05, 1.05], logy=False, title='Marginal Capital and Margin', ax=ax)
tidy(ax)
ax.legend(frameon=True, loc='center right')
# THREE
ax = next(axi)
# df[['Premium↓', 'Loss↓', 'Capital↓', 'Assets↓', 'Risk Margin↓']].plot(xlim=xlim, ax=ax)
augmented_df.filter(regex='^V\.(L|P|Q|M)_total').rename(columns=self.renamer). \
plot(xlim=[0, a_max], ylim=[0, a_max], logy=False, title=f'Decreasing LPMQ from {a_cal:.0f}',
ax=ax)
(a_cal - augmented_df.loc[:a_cal, 'loss']).plot(ax=ax, label='Assets')
tidy(ax)
ax.legend(frameon=True, loc='upper right')
# FOUR
ax = next(axi)
augmented_df.filter(regex='^(T|V|M)\.LR_total').rename(columns=self.renamer). \
plot(xlim=[0, a_cal * 1.1], ylim=[-0.05, 1.05], ax=ax, title='Increasing, Decreasing and Marginal LRs')
tidy(ax)
ax.legend(frameon=True, loc='lower left')
# FIVE
ax = next(axi)
augmented_df.filter(regex='^T\.(L|P|Q|M)_total|loss').rename(columns=self.renamer). \
plot(xlim=[0, a_max], ylim=[0, a_max], logy=False, title=f'Increasing LPMQ to {a_cal:.0f}',
ax=ax)
tidy(ax)
ax.legend(frameon=True, loc='upper left')
# SIX
# could include leverage?
ax = next(axi)
augmented_df.filter(regex='^(M|T|V)\.ROE_(total)?$').rename(columns=self.renamer). \
plot(xlim=[0, a_max], # ylim=[-0.05, 1.05],
logy=False, title=f'Increasing, Decreasing and Marginal ROE to {a_cal:.0f}',
ax=ax)
# df[['ROE↓', '*ROE↓', 'ROE↑', 'Marginal ROE', ]].plot(xlim=xlim, logy=False, ax=ax, ylim=ylim)
# df[['ROE↓', 'ROE↑', 'Marginal ROE', 'P:S↓', 'P:S↑']].plot(xlim=xlim, logy=False, ax=a, ylim=[0,_])
ax.plot([0, a_max], [ROE, ROE], ":", linewidth=2, alpha=0.75, label='Avg ROE')
# print('plot 6 completed\n' * 6)
try:
tidy(ax)
ax.legend(loc='upper right')
title = f'{self.name} with {str(dist)} Distortion\nCalibrated to LR={LR:.3f} and p={p:.3f}, ' \
f'Assets={a_cal:,.1f}, ROE={ROE:.3f}'
f_6_part.suptitle(title, fontsize='x-large')
except Exception as e:
logger.error(f'Formatting error in last plot...\n{e}\n...continuing')
# trinity plots
def tidy2(a, k, xloc=0.25):
n = 4
a.xaxis.set_major_locator(MultipleLocator(xloc))
a.xaxis.set_minor_locator(AutoMinorLocator(4))
a.xaxis.set_major_formatter(StrMethodFormatter('{x:.2f}'))
a.yaxis.set_major_locator(MaxNLocator(2 * n))
a.yaxis.set_minor_locator(AutoMinorLocator(4))
a.grid(which='major', axis='x', c='lightgrey', alpha=0.5, linewidth=1)
a.grid(which='major', axis='y', c='lightgrey', alpha=0.5, linewidth=1)
a.grid(which='minor', axis='x', c='gainsboro', alpha=0.25, linewidth=0.5)
a.grid(which='minor', axis='y', c='gainsboro', alpha=0.25, linewidth=0.5)
# tick marks
a.tick_params('both', which='major', labelsize=7, length=4, width=0.75, color='black', direction='out')
a.tick_params('both', which='minor', labelsize=7, length=2, width=0.5, color='black', direction='out')
# line to show where capital lies
a.plot([0, 1], [k, k], linewidth=1, c='black', label='Assets')
plots_done = []
try:
f_trinity, axs = plt.subplots(1, 5, figsize=(8, 3), constrained_layout=True, sharey=True)
axi = iter(axs.flatten())
xr = [-0.05, 1.05]
audit = augmented_df.loc[:a_max, :]
# ONE
ax = next(axi)
ax.plot(audit.gS, audit.loss, label='M.P_total')
ax.plot(audit.S, audit.loss, label='M.L_total')
ax.set(xlim=xr, title='Marginal Prem & Loss')
ax.set(xlabel='Loss = S = Pr(X>a)\nPrem = g(S)', ylabel="Assets, a")
tidy2(ax, a_cal)
ax.legend(loc="upper right", frameon=True, edgecolor=None)
plots_done.append(1)
# TWO
ax = next(axi)
m = audit.F - audit.gF
ax.plot(m, audit.loss, linewidth=2, label='M')
ax.set(xlim=-0.01, title='Marginal Margin', xlabel='M = g(S) - S')
tidy2(ax, a_cal, m.max() * 1.05 / 4)
plots_done.append(2)
# THREE
ax = next(axi)
ax.plot(1 - audit.gS, audit.loss, label='Q')
ax.set(xlim=xr, title='Marginal Equity')
ax.set(xlabel='Q = 1 - g(S)')
tidy2(ax, a_cal)
plots_done.append(3)
# FOUR
ax = next(axi)
temp = audit.loc[self.q(1e-5):, :]
r = (temp.gS - temp.S) / (1 - temp.gS)
ax.plot(r, temp.loss, linewidth=2, label='ROE')
ax.set(xlim=-0.05, title='Layer ROE')
ax.set(xlabel='ROE = M / Q')
tidy2(ax, a_cal, r.max() * 1.05 / 4)
plots_done.append(4)
# FIVE
ax = next(axi)
ax.plot(audit.S / audit.gS, audit.loss)
ax.set(xlim=xr, title='Layer LR')
ax.set(xlabel='LR = S / g(S)')
tidy2(ax, a_cal)
plots_done.append(5)
except Exception as e:
logger.error(f'Plotting error in trinity plots\n{e}\nPlots done {plots_done}\n...continuing')
#
#
#
# from original example_factory_sublines
try:
temp = augmented_df.filter(regex='exi_xgtag?_(?!sum)|^S|^gS|^(M|T)\.').copy()
renamer = self.renamer
augmented_df.index.name = 'Assets a'
temp.index.name = 'Assets a'
f_8_part, axs = plt.subplots(4, 2, figsize=(8, 10), constrained_layout=True, squeeze=False)
ax = iter(axs.flatten())
# ONE
a = (1 - augmented_df.filter(regex='p_').cumsum()).rename(columns=renamer).sort_index(1). \
plot(ylim=[0, 1], xlim=[0, a_max], title='Survival functions', ax=next(ax))
a.grid('b')
# TWO
a = augmented_df.filter(regex='exi_xgtag?').rename(columns=renamer).sort_index(1). \
plot(ylim=[0, 1], xlim=[0, a_max], title=r'$\alpha=E[X_i/X | X>a],\beta=E_Q$ by Line', ax=next(ax))
a.grid('b')
# THREE total margins
a = augmented_df.filter(regex=r'^T\.M').rename(columns=renamer).sort_index(1). \
plot(xlim=[0, a_max], title='Total Margins by Line', ax=next(ax))
a.grid('b')
# FOUR marginal margins was dividing by bs end of first line
# for some reason the last entry in M.M_total can be problematic.
a = (augmented_df.filter(regex=r'^M\.M').rename(columns=renamer).sort_index(1).iloc[:-1, :].
plot(xlim=[0, a_max], title='Marginal Margins by Line', ax=next(ax)))
a.grid('b')
# FIVE
a = augmented_df.filter(regex=r'^M\.Q|gF').rename(columns=renamer).sort_index(1). \
plot(xlim=[0, a_max], title='Capital = 1-gS = gF', ax=next(ax))
a.grid('b')
for _ in a.lines:
if _.get_label() == 'gF':
_.set(linewidth=5, alpha=0.3)
# recreate legend because changed lines
a.legend()
# SIX see apply distortion, line 1890 ROE is in augmented_df
a = augmented_df.filter(regex='^ROE$|exi_xeqa').rename(columns=renamer).sort_index(1). \
plot(xlim=[0, a_max], title='M.ROE Total and $E[X_i/X | X=a]$ by line', ax=next(ax))
a.grid('b')
# SEVEN improve scale selection
a = augmented_df.filter(regex='M\.LR').rename(columns=renamer).sort_index(1). \
plot(ylim=[-.05, 1.5], xlim=[0, a_max], title='Marginal LR',
ax=next(ax))
a.grid('b')
# EIGHT
a = augmented_df.filter(regex='T.LR_').rename(columns=renamer).sort_index(1). \
plot(ylim=[-.05, 1.25], xlim=[0, a_max], title='Increasing Total LR by Line',
ax=next(ax))
a.grid('b')
a.legend(loc='center right')
except Exception as e:
logger.error('Error', e)
#
# close up of plot 2
#
bit = augmented_df.query(f'loss < {a_max}').filter(regex='exi_xgtag?_.*(?<!sum)$')
f_close, ax = plt.subplots(1, 1, figsize=(8, 5))
ax = bit.rename(columns=renamer).plot(ylim=[-0.025, 1.025], ax=ax)
ax.grid()
nl = len(self.line_names)
for i, l in enumerate(ax.lines[nl:]):
ax.lines[i].set(linewidth=1, linestyle='--')
l.set(color=ax.lines[i].get_color(), linewidth=2)
ax.legend(loc='upper left')
# slightly evil
ans.update(fig_distortion=f_distortion, fig_six_up_down=f_6_part,
fig_trinity=f_trinity, fig_eight=f_8_part, fig_close=f_close)
return ans
def top_down(self, distortions, A_or_p):
"""
DataFrame summary and nice plots showing marginal and average ROE, lr etc. as you write a layer from x to A
If A=0 A=q(log) is used
Not integrated into graphics format (plot)
:param distortions: list or dictionary of CDistortion objects, or a single CDist | |
#!/usr/bin/env python3
"""
Pings each possible IP address in a network to see what hosts are UP
"""
__author__ = "ScottishGuy95"
__license__ = "MIT"
import re
import subprocess
import platform
import ipaddress
import threading
from queue import Queue
import socket
import requests
import time
def get_host_ip():
"""
Gets the computers IP address
:return: (str) IP address of the computer
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("8.8.8.8", 80)) # Runs a basic connection to external address
# Takes the IP from the list
ip = sock.getsockname()[0]
sock.close()
return ip
def get_netmask(ip):
"""
Takes an IP address and returns the subnet mask for that IP
:param ip:(str) The IP address of a host device
:return mask: (str) The subnet mask of the given IP
"""
mask = ''
# Check what OS is running, as the netmask is retrieved differently depending on OS
if platform.system() == "Windows":
# Run the windows command ipconfig and store results
proc = subprocess.Popen('ipconfig', stdout=subprocess.PIPE)
# Find the IP address from the data
while True:
line = proc.stdout.readline()
if ip.encode() in line:
break
# Take the IP string, removing the irrelevant data
mask = proc.stdout.readline().rstrip().split(b':')[-1].replace(b' ', b'').decode()
else:
# Run the linux command ifconfig and store resulting data
proc = subprocess.Popen('ifconfig', stdout=subprocess.PIPE)
# Find the IP address
while True:
line = proc.stdout.readline()
if ip.encode() in line:
break
# Remove the spacing and escape characters
theMask = line.rstrip().split(b':')[-1].replace(b' ', b'').decode().split('broadcast')[0]
# Loop through the string from the end to start, pulling out each number and dot
# Until it reaches the first character, leaving only the netmask value
for x in theMask[::-1]:
if x.isdigit() or x == '.':
mask += x
continue
else:
break
# Reverse the netmask so its in the correct direction
mask = mask[::-1]
if len(mask) > 1:
return mask
else:
print('Error! Given details are invalid and the mask was not able to be defined.\nPlease ensure a valid IP is '
'given.')
def get_cidr(mask):
"""
Takes a given subnet mask and converts it to CIDR notation
:param mask: (str) The subnet mask to convert
:return: (int): The CIDR value
"""
# Converts each block of the given mask into its binary version, and counts the 1s
return sum(bin(int(x)).count('1') for x in mask.split('.'))
def get_network_range(ip, cidr):
"""
Creates the local network IP range and the CIDR into a single String and returns it
:param: ip: (str) The IP address of the host device
:param: cidr: (str) The CIDR notation to add to the final string
:return: (str) The local network range in IP/CIDR format
"""
host_ip = ip.split('.') # Split the IP into each part
# Check the CIDR value and replace each block with 0s as required to match the mask
if 24 <= cidr <= 32:
host_ip[-1] = 0
elif 16 <= cidr <= 24:
host_ip[-1] = 0
host_ip[-2] = 0
elif 8 <= cidr <= 16:
host_ip[-1] = 0
host_ip[-2] = 0
host_ip[-3] = 0
# Combine the values into a final IP/CIDR string
return '.'.join(str(x) for x in host_ip) + '/' + str(cidr)
def check_mac(host):
"""
Attempts to retrieve the MAC address of a given device using OS commands
:param host: (str) The IP address of a given device on the network
:return: (str) The name of a device or Unknown in case of any errors
"""
arp = subprocess.Popen(['arp', '-a', host], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')
# Linux separates MACs using ':'. Windows separates MACs using '-'
rule = re.compile(r'([0-9a-zA-Z](\:|-?)){12}')
result = re.search(rule, arp)
# Returns None if no match was found during the regex search
if result is not None:
return result.group()
else:
return 'Unknown'
def check_vendor(mac, connection):
"""
Makes use of a free API to check a MAC addresses vendor
Requires self.internet_con to be True
:param connection: (Boolean) True if the device has an internet connection
:param mac: (str) A MAC address of a given device
:return: (str) The vendor of a given host devices network card
"""
if connection:
try:
time.sleep(1) # Free API requires a delay to avoid overloading the service
vendor_req = requests.get('https://api.macvendors.com/' + mac)
vendor = vendor_req.content.decode() # Decode into a readable format
if 'Not Found' in vendor:
return 'Unknown'
return vendor
except requests.exceptions.ConnectionError:
return 'Unknown'
except:
return 'Unknown'
else:
return 'Unknown'
def check_hostname(ip):
"""
Attempts to get a valid hostname to represent a given IP address
:param ip: (str) The IP address of a given device on a network
:return: (str) The name of a given device, or Unknown for any other situations
"""
try:
hostname = socket.gethostbyaddr(ip)
return hostname[0]
except socket.herror:
return 'Unknown'
def check_internet_connection():
"""
Attempts to connect to online services most likely to be available and returns boolean if it connects
Used as a way of detecting if the host device has an internet connection or not
:return:(boolean) True or False depending on if theres a internet connection
"""
test_urls = ['https://1.1.1.1/', 'https://www.google.com']
for url in test_urls:
try:
requests.get(url)
return True
except:
continue
return False
class PingSweep:
thread_lock = threading.Lock()
def __init__(self, manual=False):
self.start_time = time.time()
self.pings = Queue()
self.host_ip = get_host_ip() # The IP of the host running the scan
self.subnet_mask = get_netmask(self.host_ip)
self.cidr = get_cidr(self.subnet_mask)
if manual is False:
self.range_to_scan = get_network_range(self.host_ip, self.cidr)
print('Detected network: ' + str(self.range_to_scan))
else:
self.range_to_scan = manual
# A list of all possible IP addresses to ping on the network
self.network = list(ipaddress.ip_network(self.range_to_scan).hosts())
self.hosts_list = []
self.internet_con = check_internet_connection()
if not self.internet_con:
print('[!] No internet connection detected. Vendor information will be unavailable')
self.start_threads()
self.full_hosts = {i: [] for i in self.hosts_list} # Each host IP is a key, list of values for MAC/Vendor
# Used to add the MAC/Vendors/Hostnames before the data can be accessed
self.add_host_details()
self.total_hosts = len(self.get_all_hosts())
def threader(self):
"""
Used for each ping, for each thread created, running until all threads are complete
"""
while True:
worker = self.pings.get()
self.ping_sweep(worker)
self.pings.task_done()
def start_threads(self):
"""
Creates and manages the threads that help improve the ping speed
"""
# Start 100 threads that will run each ping
for x in range(100):
# Call the threader, then class them as daemon
# to ensure program does not end until threads are done
t = threading.Thread(target=self.threader)
t.daemon = True
t.start()
# Add each host ot our Queue
for host in range(len(self.network)):
self.pings.put(host)
# Wait until all the workers are done
self.pings.join()
def ping_sweep(self, host):
"""
Pings the given IP address and stores any hosts that are up
:param host: (str) The IP address to ping
"""
# Check what OS is running to run a different ping command
if platform.system().lower() == "windows":
# Windows: -n sets how many requests to send, -w handles timeout length in seconds
# Linux: -c sets how many requests to send, -w handles timeout length in seconds
cmd = ["ping", "-n", "2", "-w", "2", str(self.network[host])]
else:
cmd = ["ping", "-c", "2", "-w", "2", str(self.network[host])]
# Runs the ping command and outputs the data into a readable string
ping = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].decode('utf-8')
# Locks this step until the entire ping sweep is complete
with self.thread_lock:
# Checks for strings that are present in successful ping scans only
# Linux: 64 bytes, Windows: bytes=32
if "bytes=32" in ping or "64 bytes" in ping:
self.hosts_list.append(str(self.network[host])) # Adds the valid IP address to the list
def get_all_hosts(self):
"""
Returns the list of IP addresses
:return: (lst) The results of the ping sweep scan in a list
"""
self.hosts_list = sorted(self.hosts_list, key=lambda ip: int(ip.split('.')[-1]))
return self.hosts_list
def add_host_details(self):
"""
Updates the host dictionary with the MAC addresses, hostnames and Vendors where possible
Doing this now means all the data is accessible for the other aspects of the program
"""
for host in self.full_hosts.keys():
self.full_hosts.setdefault(host, []).append(check_mac(host))
self.full_hosts.setdefault(host, []).append(check_vendor(self.full_hosts.get(host)[0], self.internet_con))
self.full_hosts.setdefault(host, []).append(check_hostname(host))
def get_mac(self, host):
"""
Gets the MAC address for the given host
:param host: (str) The IP address of a device on the network
:return: (str) The MAC address of a device or 'Unknown'
"""
return self.full_hosts.get(host)[0]
def get_vendor(self, host):
"""
Gets the | |
newitem.update(itemdata, type=self.type.type, hideDeprecated=hideDeprecated, hideAliases=hideAliases, showHidden=showHidden, fieldFilters=fieldFilters):
self.children.append(newitem)
elif isinstance(self.type, das.schematypes.Tuple):
self.typestr = "tuple"
self.resizable = False
self.orderable = True
if self.exists():
for i in xrange(len(self.data)):
itemname = "(%d)" % i
itemdata = self.data[i]
newitem = ModelItem(itemname, row=i, parent=self)
if newitem.update(itemdata, type=self.type.types[i], hideDeprecated=hideDeprecated, hideAliases=hideAliases, showHidden=showHidden, fieldFilters=fieldFilters):
self.children.append(newitem)
elif isinstance(self.type, das.schematypes.Set):
self.typestr = "set"
self.resizable = True
self.orderable = False
if self.exists():
i = 0
for itemdata in self.data:
itemname = "{%d}" % i
newitem = ModelItem(itemname, row=i, parent=self)
if newitem.update(itemdata, type=self.type.type, hideDeprecated=hideDeprecated, hideAliases=hideAliases, showHidden=showHidden, fieldFilters=fieldFilters):
self.children.append(newitem)
i += 1
elif isinstance(self.type, (das.schematypes.Struct, das.schematypes.StaticDict)):
self.typestr = "struct"
self.resizable = False
self.orderable = False
self.mapping = True
self.mappingkeys = {}
self.uniformmapping = False
i = 0
# for k in sorted(self.type.keys()):
for k in self.type.ordered_keys():
t = self.type[k]
optional = isinstance(t, das.schematypes.Optional)
self.mappingkeys[k] = optional
if optional:
self.resizable = True
if self.exists() and (showHidden or not t.hidden):
if das.schematypes.Alias.Check(t):
if hideAliases:
continue
v = None
elif not k in self.data:
if hideDeprecated and isinstance(t, das.schematypes.Deprecated):
continue
v = t.make_default()
else:
v = self.data[k]
newitem = ModelItem(k, row=i, key=k, parent=self)
if newitem.update(v, type=t, hideDeprecated=hideDeprecated, hideAliases=hideAliases, showHidden=showHidden, fieldFilters=fieldFilters):
self.children.append(newitem)
i += 1
elif isinstance(self.type, (das.schematypes.Dict, das.schematypes.DynamicDict)):
self.typestr = "dict"
self.resizable = True
self.orderable = False
self.mapping = True
self.mappingkeytype = self.type.ktype
self.uniformmapping = (len(self.type.vtypeOverrides) == 0)
if self.exists():
i = 0
dkeys = [x for x in self.data.iterkeys()]
for k in sorted(dkeys):
if isinstance(k, basestring):
itemname = k
elif hasattr(k, "value_to_string"):
itemname = k.value_to_string()
else:
itemname = str(k)
v = self.data[k]
vtype = self.type.vtypeOverrides.get(k, self.type.vtype)
newitem = ModelItem(itemname, row=i, key=k, parent=self)
if newitem.update(v, type=vtype, hideDeprecated=hideDeprecated, hideAliases=hideAliases, showHidden=showHidden, fieldFilters=fieldFilters):
self.children.append(newitem)
i += 1
# Never filter out root item, even when all its children are gone
if self.parent is None:
return True
else:
if len(self.children) > 0:
return True
else:
if fieldFilters and not fieldFilters.matches(self.fullname(skipRoot=True)):
return False
else:
return True
else:
if fieldFilters and not fieldFilters.matches(self.fullname(skipRoot=True)):
return False
else:
return True
class NewValueDialog(QtWidgets.QDialog):
def __init__(self, vtype, excludes=None, name=None, parent=None):
super(NewValueDialog, self).__init__(parent, QtCore.Qt.WindowTitleHint|QtCore.Qt.WindowSystemMenuHint)
self.setWindowTitle("Create new value")
self.excludes = excludes
self.data = vtype.make_default()
self.editor = Editor(self.data, type=vtype, name=name, headers=None, parent=self)
layout = QtWidgets.QVBoxLayout()
self.okbtn = QtWidgets.QPushButton("Ok", self)
self.okbtn.setEnabled(True if excludes is None else (self.data not in self.excludes))
cancelbtn = QtWidgets.QPushButton("Cancel", self)
btnl = QtWidgets.QHBoxLayout()
btnl.addWidget(self.okbtn, 1)
btnl.addWidget(cancelbtn, 1)
layout.addWidget(self.editor, 1)
layout.addLayout(btnl, 0)
self.setLayout(layout)
# Wire callbacks
self.editor.modelUpdated.connect(self.onDataChanged)
self.okbtn.clicked.connect(self.accept)
cancelbtn.clicked.connect(self.reject)
self.resize(400, 200)
def onDataChanged(self, model):
self.data = model.getData()
if self.excludes is not None and self.data in self.excludes:
self.data = None
self.okbtn.setEnabled(self.data is not None)
def accept(self):
super(NewValueDialog, self).accept()
def reject(self):
self.data = None
super(NewValueDialog, self).reject()
class FieldSlider(QtWidgets.QFrame):
# (value, invalid, errmsg)
realValueChanged = QtCore.Signal(float, bool, str)
intValueChanged = QtCore.Signal(int, bool, str)
def __init__(self, vmin, vmax, real=False, decimal=1, parent=None):
super(FieldSlider, self).__init__(parent)
self._value = None
self.real = real
self.scale = 1
self.min = vmin
self.max = vmax
sldmin = int(vmin)
sldmax = int(vmax)
if self.real:
self.scale = math.pow(10, decimal)
sldmin = int(math.ceil(self.min * self.scale))
sldmax = int(math.floor(self.max * self.scale))
if self.min < (sldmin / self.scale) or (sldmax / self.scale) < self.max:
print("[das] Not enough precision in slider (%d decimal(s)) for value range [%f, %f]" % (decimal, self.min, self.max))
self.fld = QtWidgets.QLineEdit(self)
self.fld.setObjectName("field")
self.sld = QtWidgets.QSlider(QtCore.Qt.Horizontal, self)
self.sld.setObjectName("slider")
self.sld.setTracking(True)
self.sld.setMinimum(sldmin)
self.sld.setMaximum(sldmax)
lay = QtWidgets.QHBoxLayout()
lay.setContentsMargins(0, 0, 0, 0)
lay.setSpacing(2)
lay.addWidget(self.fld, 0)
lay.addWidget(self.sld, 1)
self.setLayout(lay)
self.sld.valueChanged.connect(self.sliderChanged)
self.fld.textChanged.connect(self.textChanged)
self.valueChanged = (self.realValueChanged if self.real else self.intValueChanged)
def focusInEvent(self, event):
if event.gotFocus():
self.fld.setFocus(event.reason())
self.fld.selectAll()
event.accept()
def _setValue(self, val, updateField=True, updateSlider=True):
self._value = (float(val) if self.real else int(val))
if self._value < self.min:
self._value = self.min
updateField = True
updateSlider = True
elif self._value > self.max:
self._value = self.max
updateField = True
updateSlider = True
if updateField:
self.fld.blockSignals(True)
self.fld.setText(str(self._value))
self.fld.blockSignals(False)
if updateSlider:
self.sld.blockSignals(True)
if self.real:
self.sld.setValue(int(math.floor(0.5 + self._value * self.scale)))
else:
self.sld.setValue(self._value)
self.sld.blockSignals(False)
def setValue(self, val):
self._setValue(val, updateField=True, updateSlider=True)
def value(self):
return self._value
def text(self):
return str(self._value)
def textChanged(self, txt):
invalid = False
errmsg = ""
try:
if self.real:
val = float(txt)
else:
val = int(txt)
except Exception, e:
invalid = True
errmsg = str(e)
# if text is not empty, reset field to real value
if txt:
self.fld.blockSignals(True)
self.fld.setText(str(self.value()))
self.fld.blockSignals(False)
else:
self._setValue(val, updateField=False)
self.valueChanged.emit(self.value(), invalid, errmsg)
def sliderChanged(self, val):
# as we round down slider min value and round up slider max value
# we may need to adjust here
self._setValue(val / self.scale, updateSlider=False)
self.valueChanged.emit(self.value(), False, "")
class ModelItemDelegate(QtWidgets.QItemDelegate):
def __init__(self, parent=None):
super(ModelItemDelegate, self).__init__(parent)
def createEditor(self, parent, viewOptions, modelIndex):
item = modelIndex.internalPointer()
rv = None
if modelIndex.column() == 0:
if item.parent and item.parent.mapping and item.parent.mappingkeytype is not None:
rv = self.createMappingKeyEditor(parent, item)
elif modelIndex.column() == 1:
if item.editable:
if item.multi:
rv = self.createOrEditor(parent, item)
else:
if isinstance(item.type, das.schematypes.Boolean):
rv = self.createBoolEditor(parent, item)
elif isinstance(item.type, das.schematypes.Integer):
rv = self.createIntEditor(parent, item)
elif isinstance(item.type, das.schematypes.Real):
rv = self.createFltEditor(parent, item)
elif isinstance(item.type, das.schematypes.String):
rv = self.createStrEditor(parent, item)
elif isinstance(item.type, das.schematypes.Class):
rv = self.createClassEditor(parent, item)
# Ignore 'Empty' and 'Alias'
elif modelIndex.column() == 2:
rv = self.createTypeEditor(parent, item)
return rv
def createTypeEditor(self, parent, item):
tv = item.get_valid_types()
tv.sort(key=lambda x: x[0])
rv = QtWidgets.QComboBox(parent=parent)
for typ, val in tv:
rv.addItem(typ, userData=val)
rv.setProperty("setEditorData", self.setTypeEditorData)
rv.setProperty("setModelData", self.setTypeModelData)
return rv
def createMappingKeyEditor(self, parent, item):
rv = QtWidgets.QLineEdit(parent)
def textChanged(txt):
# Convert text to a python value
try:
val = eval(txt, {}, {})
except:
val = txt
# Create a new key
newkey = das.copy(item.key)
try:
newkey = val
except Exception, e:
rv.setProperty("invalidState", True)
rv.setProperty("message", "Invalid key (%s)" % e)
else:
# Set the new key
tmpdict = item.parent.type.make_default()
tmpval = item.parent.type.vtype.make_default()
try:
tmpdict[newkey] = tmpval
except Exception, e:
rv.setProperty("invalidState", True)
rv.setProperty("message", "Invalid key (%s)" % e)
else:
rv.setProperty("invalidState", False)
rv.textChanged.connect(textChanged)
rv.setProperty("setEditorData", self.setMappingKeyEditorData)
rv.setProperty("setModelData", self.setMappingKeyModelData)
return rv
def createOrEditor(self, parent, item):
rv = QtWidgets.QLineEdit(parent)
def textChanged(txt):
invalid = (len(item.get_valid_types(string=txt)) == 0)
rv.setProperty("invalidState", invalid)
if invalid:
rv.setProperty("message", "'%s' doesn't match any supported types" % txt)
rv.textChanged.connect(textChanged)
rv.setProperty("setEditorData", self.setOrEditorData)
rv.setProperty("setModelData", self.setOrModelData)
return rv
def createBoolEditor(self, parent, item):
rv = QtWidgets.QCheckBox(parent)
rv.setProperty("setEditorData", self.setBoolEditorData)
rv.setProperty("setModelData", self.setBoolModelData)
return rv
def createIntEditor(self, parent, item):
if item.type.enum is not None:
rv = QtWidgets.QComboBox(parent)
for k in sorted(item.type.enum.keys(), key=lambda x: item.type.enum[x]):
v = item.type.enum[k]
rv.addItem(k, userData=v)
elif item.type.min is not None and item.type.max is not None:
rv = FieldSlider(item.type.min, item.type.max, real=False, parent=parent)
def valueChanged(val, invalid, errmsg):
rv.setProperty("invalidState", invalid)
if invalid:
rv.setProperty("message", errmsg)
rv.intValueChanged.connect(valueChanged)
else:
rv = QtWidgets.QLineEdit(parent)
def textChanged(txt):
try:
int(txt)
except Exception, e:
rv.setProperty("invalidState", True)
rv.setProperty("message", str(e))
# if text is not empty, reset to original value
if txt:
rv.setText(str(item.data))
else:
rv.setProperty("invalidState", False)
rv.textChanged.connect(textChanged)
rv.setProperty("setEditorData", self.setIntEditorData)
rv.setProperty("setModelData", self.setIntModelData)
return rv
def createFltEditor(self, parent, item):
if item.type.min is not None and item.type.max is not None:
rv = FieldSlider(item.type.min, item.type.max, real=True, decimal=4, parent=parent)
def valueChanged(val, invalid, errmsg):
rv.setProperty("invalidState", invalid)
if invalid:
rv.setProperty("message", errmsg)
rv.realValueChanged.connect(valueChanged)
else:
rv = QtWidgets.QLineEdit(parent)
def textChanged(txt):
try:
float(txt)
except Exception, e:
rv.setProperty("invalidState", True)
rv.setProperty("message", str(e))
# if text is not empty, reset to original value
if txt:
rv.setText(str(item.data))
else:
rv.setProperty("invalidState", False)
rv.textChanged.connect(textChanged)
rv.setProperty("setEditorData", self.setFltEditorData)
rv.setProperty("setModelData", self.setFltModelData)
return rv
def createStrEditor(self, parent, item):
if item.type.choices is not None:
rv = QtWidgets.QComboBox(parent)
rv.addItems(item.type.choices)
rv.setEditable(not item.type.strict)
else:
rv = QtWidgets.QLineEdit(parent)
def textChanged(txt):
if item.type.matches is not None:
invalid = (not item.type.matches.match(txt))
rv.setProperty("invalidState", invalid)
if invalid:
rv.setProperty("message", "'%s' doesn't match '%s'" % (txt, item.type.matches.pattern))
else:
rv.setProperty("invalidState", False)
rv.textChanged.connect(textChanged)
rv.setProperty("setEditorData", self.setStrEditorData)
rv.setProperty("setModelData", self.setStrModelData)
return rv
def createClassEditor(self, parent, item):
rv = QtWidgets.QLineEdit(parent)
def textChanged(txt):
try:
item.data.copy().string_to_value(txt)
except Exception, e:
rv.setProperty("invalidState", True)
rv.setProperty("message", str(e))
else:
rv.setProperty("invalidState", False)
rv.textChanged.connect(textChanged)
rv.setProperty("setEditorData", self.setClassEditorData)
rv.setProperty("setModelData", self.setClassModelData)
return rv
def setEditorData(self, widget, modelIndex):
item = modelIndex.internalPointer()
func = widget.property("setEditorData")
if func:
func(widget, item)
def setTypeEditorData(self, widget, item):
widget.setCurrentIndex(widget.findText(item.typestr))
def setMappingKeyEditorData(self, widget, item):
if hasattr(item.key, "value_to_string"):
widget.setText(item.key.value_to_string())
else:
widget.setText(str(item.key))
def setOrEditorData(self, widget, item):
if item.data is None or isinstance(item.data, bool):
s = str(item.data).lower()
else:
s = str(item.data)
widget.setText(s)
def setBoolEditorData(self, widget, item):
widget.setCheckState(QtCore.Qt.Checked if item.data else QtCore.Qt.Unchecked)
def setIntEditorData(self, widget, item):
if item.type.enum is not None:
widget.setCurrentIndex(widget.findData(item.data))
else:
if item.type.min is not None and item.type.max is not None:
widget.setValue(item.data)
else:
widget.setText(str(item.data))
def setFltEditorData(self, widget, | |
'tan'
self.float31 = FloatText(value='15.12', step='1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [self.bool1, name_btn, self.float31, units_btn, ]
box31 = Box(children=row, layout=box_layout)
# -------------------------
div_row5 = Button(description='phenotype:motility', disabled=True, layout=divider_button_layout)
div_row5.style.button_color = 'orange'
name_btn = Button(description='speed', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float32 = FloatText(value='4', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='micron/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float32, units_btn]
box32 = Box(children=row, layout=box_layout)
name_btn = Button(description='persistence_time', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float33 = FloatText(value='5', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float33, units_btn]
box33 = Box(children=row, layout=box_layout)
name_btn = Button(description='migration_bias', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float34 = FloatText(value='0.7', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float34, units_btn]
box34 = Box(children=row, layout=box_layout)
self.bool2 = Checkbox(description='enabled', value=False,layout=name_button_layout)
self.bool3 = Checkbox(description='use_2D', value=True,layout=name_button_layout)
chemotaxis_btn = Button(description='chemotaxis', disabled=True, layout={'width':'30%'})
chemotaxis_btn.style.button_color = '#ffde6b'
self.bool4 = Checkbox(description='enabled', value=False,layout=name_button_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.chemotaxis_substrate1 = Text(value='chemokine', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.chemotaxis_substrate1]
box35 = Box(children=row, layout=box_layout)
name_btn = Button(description='direction', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.chemotaxis_direction1 = Text(value='1', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.chemotaxis_direction1]
box36 = Box(children=row, layout=box_layout)
# -------------------------
div_row6 = Button(description='phenotype:secretion', disabled=True, layout=divider_button_layout)
div_row6.style.button_color = 'orange'
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.text0 = Text(value='interferon 1', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text0]
box37 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float35 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float35, units_btn]
box38 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float36 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float36, units_btn]
box39 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.text1 = Text(value='pro-inflammatory cytokine', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text1]
box40 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float37 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float37, units_btn]
box41 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float38 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float38, units_btn]
box42 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.text2 = Text(value='chemokine', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text2]
box43 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float39 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float39, units_btn]
box44 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float40 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float40, units_btn]
box45 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.text3 = Text(value='debris', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text3]
box46 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float41 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float41, units_btn]
box47 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float42 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float42, units_btn]
box48 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.text4 = Text(value='anti-inflammatory cytokine', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text4]
box49 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float43 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float43, units_btn]
box50 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float44 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float44, units_btn]
box51 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.text5 = Text(value='collagen', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text5]
box52 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float45 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float45, units_btn]
box53 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float46 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float46, units_btn]
box54 = Box(children=row, layout=box_layout)
# -------------------------
div_row7 = Button(description='phenotype:molecular', disabled=True, layout=divider_button_layout)
div_row7.style.button_color = 'orange'
# ================== <custom_data>, if present ==================
div_row8 = Button(description='Custom Data',disabled=True, layout=divider_button_layout)
div_row8.style.button_color = 'cyan'
name_btn = Button(description='virion', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float47 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='virions', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='endocytosed virions', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float47, units_btn, description_btn]
box55 = Box(children=row, layout=box_layout)
name_btn = Button(description='uncoated_virion', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float48 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='virions', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='uncoated endocytosed virions', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float48, units_btn, description_btn]
box56 = Box(children=row, layout=box_layout)
name_btn = Button(description='viral_RNA', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float49 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='RNA', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='total (functional) viral RNA copies', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float49, units_btn, description_btn]
box57 = Box(children=row, layout=box_layout)
name_btn = Button(description='viral_protein', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float50 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='protein', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='total assembled sets of viral protein', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float50, units_btn, description_btn]
box58 = Box(children=row, layout=box_layout)
name_btn = Button(description='export_virion', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float51 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='virions', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='ready to export virion', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float51, units_btn, description_btn]
box59 = Box(children=row, layout=box_layout)
name_btn = Button(description='assembled_virion', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float52 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='virions', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='total assembled virions', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float52, units_btn, description_btn]
box60 = Box(children=row, layout=box_layout)
name_btn = Button(description='virion_uncoating_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float53 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='rate at which an internalized virion is uncoated', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float53, units_btn, description_btn]
box61 = Box(children=row, layout=box_layout)
name_btn = Button(description='uncoated_to_RNA_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float54 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='rate at which uncoated virion makes its mRNA available', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float54, units_btn, description_btn]
box62 = Box(children=row, layout=box_layout)
name_btn = Button(description='protein_synthesis_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float55 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='rate at mRNA creates complete set of proteins', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float55, units_btn, description_btn]
box63 = Box(children=row, layout=box_layout)
name_btn = Button(description='virion_assembly_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float56 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn = Button(description='rate at which viral proteins are assembled into complete virion', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float56, units_btn, description_btn]
box64 = Box(children=row, layout=box_layout)
name_btn = Button(description='virion_export_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float57 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
description_btn = Button(description='rate at which a virion is exported from a live cell', disabled=True, layout=desc_button_layout)
description_btn.style.button_color = 'tan'
row = [name_btn, self.float57, units_btn, description_btn]
box65 = Box(children=row, layout=box_layout)
name_btn = Button(description='unbound_external_ACE2', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float58 = FloatText(value='1000', step='100', style=style, layout=widget_layout)
units_btn = Button(description='receptors', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
description_btn | |
XXXX XX
XXXXX XXXXX XXXX XXXXXXXXXXXXX XXXX XXX XXXXXXX XXX XXXXXX XXXXXXX XXXX
XXXXXXXXXXXXX XXXXX XX XXXXXX XXXXXXXXX XXXXX XXXXXXXXXXXXXXX XXXXXXXXX XX
XXXXX XX XXX XXXXXXXXX XXXX XXXXXXXX XXXX XXXX XX XXXXXXXXX XX XXXX
XXXXXXXXXXXXX XXXX XXXX XXX XXXX XXXXXXXXXXXX
X XXXX XXXXXXXX XXXXXX XXXXX XX XXX XXXX XXXXXX XXX XXXX XXXXXXXX XX XXX XXX
XXXXX XXXXXXXXXX XXXX XX XXXXXXXXXXXXXXXXXXXX
X XXXXXXX XXXXXXX XXXXXXXXXX XX XXXXXXX XXXXXXXXXXXX XXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX
XXX XXXX XX XXXXXXXXXXXX XXXXXX XXX XXX XXXXXXXXXXXX XX XXXXX XXX XXX XXX
XXXXXXX XXXXXX XXXXXXXX
XXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXX XXXX XXXXXXXX
XXXXXX XXXXXXXXXX XXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXX XXXXXXXX XXXX XXXXXXX XX XXX XXXX XXXXX XXXXXXX XXXX XXXXXX XX
XXXXX XXXXXXXXXXX
XX XXX XXXXX XXXXX XXXXXX XXXX XXXXXXX XXX XXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXX
XXXX XXXXXXXXXX XXX XXXX XXXXXXXX XXXXXX
XX XXXXXXXXXXX XXX XXX XXXXXXXXX XXX XXXXXXX XXX XXX XX XXX XXXXXXXXXX XXXXX
XXX XXXXXXXXXXX XXXXXXX XXXXX
XX XXXXXXX XXXXXXXX
XXXXX XXXXXXX XXX XXXXXX XX XXXX XXXXXXXXXXXXX XXXXXXXXX XXX XXXXXXXX XXXX
XXXXXXX XXXX XXX XXXXXXXX XXXXXXXXX XXXXX XXXXXXXX XXXX XXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX
XXXX XXXXXXX XXXX XXX XXXXXXX XXXXXXXXX XXXXXXXX XX XXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXX XXXXXXXXX XX X XXXXXXX XXX XXXXXXXXXXXX XXXXXXXXXX XXXXXX
XXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXX XXX XXXXXXXX
XXXXXXX XXXXXX XXXXXXX XXXXXXX XXXXXXX XX XXXXXX XXX XXX XX XXXX
XXXXXXXXXXXXX XXXXX XXX XXXX XXXXX XXXX XXXXXXX XXXXXXXXX XXXXXXXX XXX
XXXXXXX XXXXXX XXX XXX XXX XXXXXXX XXXXXXX XXXXXXXX
XXXXXXXX XXXXX XXXX XXXX XXXX XXXXXXX XX XXX XXX XXXXXXXX XXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX
X XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX XXXXXXXXXXXX XXX XXXX X
XX XX XXXX XXXXXX XX X XXXXXX XXXXX XXX XXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
X XX XXXX XXXXXXXXXXXXXX XXXX XXXX XX XXXXXX XXXX XXXXX XXXXXXX XX XXXXX XXX
XXXXXX XXXXXX XX XX XXXXXXXXXXXXXXX XXXXXXX XXXXX XX XXX XXXXXX XX
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXX XXXXXXXXX XX XXXXX
XXXXXXXXXXXXX XXXXX XXXXX
X XXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXX X
XXXXXXXXXXX XXXXXX XXX XXXXXX XXX X XXXXX XXXXXXXXXXX XXXXXXXXX XXXXXXXXXXX XX
XXX XXXXXX XXXXXXX XX XXXXXXXX XXXX XX XXX XXXXXXXX XXX XXX XXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXX XX XXX XXXXXXXXXXXXX XXX
XXXXXXX XX XXX XXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXX XXXXXXXXXX XXXXXXX XXXX
XXXXXXXXXX XXXXX XXXXXXXX XXXXXXX XXXXXXXX XX XXXXXXXXXX XXXX XX XXX XXXXXXX
X XXX XX X XXXXXX XX XXX XXXX XXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXX XXXXXX XXXXXX
XXXXXX X XXXXX XXXXXXX XX XXX XXXXXXXXXXX XXXXX XXXXXXXXXX XXXX XXXXXXXXXX
XXXXXX XX XXXX XXXXX XX XXXXXX XXXX
X XXXXX XXXX XXXX XXXXXXX XX XXX XXX XXXXXXXX XXX XXXXXXX XX XXXXX XXXXXXXXX
XXXXXXXXXX XXXXXXX XX XXXXXXXXXXXXXXXX XX XXXXXX XXXXXXX XX XXXXX XXX
XXXXXXX XXXXXXXX XX XXXX XXX XXX X XXXXXXXXXXXX XXXXXXXX XXXXXXX X XXXX XXX
XXXXXXXX XXXXXXX XX XXXXXX XXXX XXX XXXXXXX XXXXXXXX XXX XXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXX
X XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXX XX XXX XXXXXX XXXX XXXXXXX XXX
XXXXXXXXX XXXXXXXXX XXXX XXXXXXXXXXXXX XXX XXX XXXXXXXXXX XXX XXXX XXXXXXX
XXXXXXXXXXX XX XX XXX XXXX XXXXXXXX XXXXX XXXX XXX XXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXX
X XXX XXXXXXXXXXXXXXXXXXX XXX XXXX XXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XX XX XX XXXXXXXXXXXXXX
XXXXXX XX XXXXXXXXXX XXXXXX XXX XX XX XXXXXXXX XX XXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXX XXXX XXXXX
XXXXXXXXX XXXXXX XX XXXXXXXX XXXXXX XXX XXXX XXXXXXXX XXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXX XX XXXXXXX XXX XXXXXXXX XX XXX
XXXX XXXXXXX X XXXXX XXXXXXXXXX XXXXX XXXXXXX XXXXXXXXX XXXXXX XXX XXXXXX
XXXXXX XX XXXXXXXXX XXXXXXXXXXX XXXXXXX X XXXXXXXXX XXXXX XX X XXXXX XXXXX
XXX XXX XXXXX XXXXXXXX XXXXX XXXXX X XXXXXXXX XXXXXXXXX XX XXXXXXXX
X XX X XXXXXXXXXXXXXXXXXX XXXXXXXXX XX XXXXXX XXXX X XXXXXX XXXX XXXXXXXXX X
XXXXXXXXX XX XX XXX XXXXXXXXX XXX XXXXXXXX XXXX XXXX
XXXXX XXXXXXXX XXXXXXXXX XX XXXX XX XXXXXXXXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXX XXXXX XX XX XXXXXX XX XXX XXXXXXXBBBXXXXXX XXXX XX
XXXXXX XXXXXXXX XXXXXXXXX XX XXXX XXXX XXXXXX XXXX XXXXXXXXXXXXXXXXXX XX
XXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXX XXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
X XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXXXXXX XX XXXXX
XXX XXXXX XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXX XXXXX XXX
XXXXX XXXXXXX X XXXXXXXXX XXX XXXXXX XXXXXX XXXX XXXXXXXXXXXXXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXX XXXXXXXX XXXXXXXXXX XXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX X XXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XXXXXX XX XX XXXXXX XXX XXXXXX XX XXXXXXXXX XXX XXXX
X XX XXXXXX XXX XXX XXXX X XXXXX XXXXXX XXX XXXXXXXXXXXXXXX XXX XXXXXXXXXX XX
XX X XXXXX XXXXXXXXX XXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXX X XXXXX
XXXXXXXX XX XX XXXXXXXX XXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX
XXXXXXXX XXXXXXXXX XXX XXXXX XXXXXXXXXX XXXX XXX XXXX XXXXXXXXX XX XXXX
XXXXXXXX XXXXX XXXXXXXXX XXX XXX XXXXXX
X XXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX
XXXXXXXX X XXXXXXX XXX XXXXXXXXX XX XXXXXXX XX XXX XX XX XXXXXXXXX XX X XXXXXX
XXXX XXXXXXXXX XXX XXXX XXXXXXX XX XXXXXXXXXX XX XX XXXXXXXXXX XXXX XXX XXXX
XX XXX XXXXX XXXXXX XXX XXXXXX XXXXXX XXXX XXXXXX XXXXXXXXX XX XXXX XXX XXX
XXXXXXXX XXXXXXXXX XXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XXXXX XXXXXXXXX
XXXX XXXXXXXXXXXXX XX XXX XXXXXXX
X XXX XXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXX XXXXXXXX XXXXXXXX XXX
XXXXXXXXX XXXX XXXX XXXXXXXXX XXXXXX XXXXXXXX XXX XXX XXXXX
X XXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXX XXXXXXXX XXX XXXXXXXXXXXXXXXX XXXXXXXX XX XXX XXXXXXXX XXXXXXX XXXXXXXX
XXXX XXXXXX XXX XXXX XXXXXXXX XX XXXXXX XXXXXX
X XXX XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXX XXX XXX XXXXXXX XXXXXXXXXX XXXXXXXXX XX XXXX XXXXXXXXX XXXXXXXXXXXX
XXX XXXXXXX XXXXXXXX XXXXX XXXX XXX XX XXXXXX XX XXXXXXXXXXXXXXXXXXXXXX
XXXXX XXX XXXXXX XX X XXXXXXXXXXXXX XX XXX XXXXX XXXXXX XXX XXXXXXXXX XX XXXX
XXXXXX XXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXX XXX XXXXXXXXXX XXXXXXXXXX XX XXXXXX XXXX XXX XXX
XXXXXXX XXXX XXX XXXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXXXX XXXXXXXXX XXX XXXX XXXXXXXXXX XXX XX XX XXXXXX XXXXXXXXXX XX
XXXX XX XXXXXXXXX XX XXXXXX XXX XXX XXXX XXX XXXXXXX XX XXXXXX XXXX XXXX XXXXX
XXXX XX XXXXXX XXXXXX XXXX X XXXXXX XXXXXXXXX XX X XXXXXX XXXXXXX XXXX XXXXXXXXX
XXX XXXX XXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXX XXXXXXXXX
XX XX XXXXXXXX XXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXX XXXXXXXXXX XXXXXXXX XXXXX XXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXX XX XXXXXXXX XXXXXXX XXXXXXX XXX XXXXXXX XX XXXXXXXX XXXX XXX
XXXXXXXXXX XXX XXX XX XXXXXXXX XXXX XXX XXXX XX X XXXXXXXXXXXX XXXXXX XXX XXXX
XXX XX XXX XXXXXXX XXXXXXXXXX XXXXXXX XX XXXXXXXXXX XXXXXXXXX
XXXXXX XXXXXXXX XXXXXXXXXX XX XXX XXX XXXX XXXXXX XXXXXXX XX XXXXXXXXXX
XXXXXXXXXX XXXXXXX XX XXXXXXXXXXX XXXXXXXXXXXX XXX XX XXXX XXXXXXXXX XX XXX
XXXXXX XXXXXXXXX XXXXXXXXXXX XXXXXXXXX XX XXXXXXXXXX XXXX
XXXXXXX XX XXXXXXXXXXXXX XXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXX XXXXXX XXXXXXXXX XXXXXXX XXX XXXXXXXXX XX XXXXX XXX XXXXXXXX
XXXXXXXX XXX XX XX XXXXXXXX XX XXXXXXXX XXX XXXXXXXXXXXXX XXX XXXXXXXXXXXXXXX
XXXX XXXXXX XXXXX XXXXXXXXX XXXXXX
XXXXXX XXX XXXXXX X XXXXXXX XX XXXXXXX XXXX XXXXXXXXXXXXXX XXX XXXXXXXXXX
XXXXXXXX XXXXXXX XXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXX XX XXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXX XXXX XXXXXXXXXX XXXXX XXXXXXX XX XXXXXX XXXXX XXXXX XXXXX
XXXXXX XXXXXXXXXX XX XXXXXXX XXX XXXXXXXXX XXXX XX XXX XXX XX XXXX XXXXXXXXX XX
XXXXXX XXX XXX XXXXXXXXXX
BBBB BBBBB BBBB BBBBBB
XXXX
BBBB BBBBBBB BBBB BBBBBB
XXX XXXX XXXXXXXXXXXX XXX XXX XXXXXXXX XXXX XXXX XXXXXXXXXXX XXX XX XXXXXX
XXXX XXX XXX XXXXXXXX XXXX XX XXXXXXXX XXXX XXX XXX XXXXXXXXX XX XXXXXX
XXXXXXXXXXXXX XXXX XXXXXX XXXXXXXX XX XXXXXXX XXXXXXXX XXXXXXXXX XXXXXX XX
XXXXXXXX XX XXX XXX XXXXXXXXXX XXXXXXXXX
XX XXXXXXXXXX XXX XXX XXXXXXXXXXX XXXXXXX XXXXXXXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXBBBBBBBBBB BBB
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXX XX XXXXXXX X XXX XX
XXXXX XXXXXXXX XXXX XX XXXX XXXXXXX XXXX XX X XXXXXXXXX XXXXX XXXX XXXXXXXXX
XXX XXXXXXX XXXXXXXXXXX XXXXXXX XXX XXXXXXXXXX XXXXXXXXX XXXXX XXXX XXXXXXX XXX
XXXXXXX XXXXXXXX XXXX XXXXXXX XXX XXXX XXXXXX XX XXXXX XXX XX X XXXXXXX XX
XXXXXXXXXX XXXX XXX
X XXXXXXXXX XXX XXXXXXXX XX
X XXXXX XXXX XXXXXXXXXXX XXXXX XX XXXXXX XX XX XXXXXXXX XX
X XXXXX X XXXXXXXXXXX XXXXXXX XXXXX XXXX XXXXXXXX XXXX XXXXXXX XXXXXXXXXXXXX
XXXX XXXXX XXX XXXXX XXXXXXXXXXX XXXX XX X XXXXXXXXXXX XXXXX XXXXXXXXXX XX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XX XXXXXXX XXXXX
XXXXXXXXX XX XXXXXX XXXXX XX XXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XX
XXXXXXXXX
XXXXXXX XX XXXXXXXXXXX XXXXXXXX XXXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXX XXX XXXXX
XXX XXXX X XXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX XXXXXXX XX XXXX XXXXXXXX XXX XXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXX
XXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXX XXXXXXXXX
XXXXXXXXXXXXXXXX XXXXXX XX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXX XXXXXXX XXXX XXX XXXX XXXXXXX XXX XXXXXXX XXX XXXXXXXXXXXXXXXX XXXXXX
XX X XXXXXXX XXX XXXXXX XXX XXXXXX XXXX XXXXXX XX | |
runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == self.DETAILS_STDOUT
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_send_get_request_and_print_details_of_deployment_when_using_config_file(
self, get_patched, deployments_details_config_path):
get_patched.return_value = MockResponse(self.LIST_JSON)
command = self.COMMAND_WITH_OPTIONS_FILE[:] + [deployments_details_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == self.DETAILS_STDOUT
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_proper_message_when_wrong_api_key_was_used(self, get_patched):
get_patched.return_value = MockResponse({"status": 400, "message": "Invalid API token"}, 400)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == "Failed to fetch data: Invalid API token\n", result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_proper_message_when_wrong_deployment_id_was_used(self, get_patched):
get_patched.return_value = MockResponse(self.LIST_WITH_FILTER_RESPONSE_JSON_WHEN_NO_DEPLOYMENTS_FOUND)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == "Deployment not found\n", result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_proper_message_when_error_status_was_returned_by_api_without_message(self, get_patched):
get_patched.return_value = MockResponse(status_code=400)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == "Failed to fetch data\n", result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_proper_message_when_error_message_was_returned_by_api(self, get_patched):
get_patched.return_value = MockResponse(self.RESPONSE_WITH_ERROR_MESSAGE, status_code=404)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == "Failed to fetch data: Some error message\n", result.exc_info
class TestDeploymentsMetricsGetCommand(object):
GET_DEPLOYMENTS_LIST_URL = "https://api.paperspace.io/deployments/getDeploymentList/"
GET_METRICS_URL = "https://aws-testing.paperspace.io/metrics/api/v1/range"
BASIC_OPTIONS_COMMAND = [
"deployments", "metrics", "get",
"--id", "dev61ity7lx232",
]
ALL_OPTIONS_COMMAND = [
"deployments", "metrics", "get",
"--id", "dev61ity7lx232",
"--metric", "gpuMemoryFree",
"--metric", "gpuMemoryUsed",
"--interval", "20s",
"--start", "2020-04-01",
"--end", "2020-04-02 21:37:00",
"--apiKey", "some_key",
]
FULL_OPTIONS_COMMAND_WITH_OPTIONS_FILE = [
"deployments", "metrics", "get",
"--optionsFile", # path added in test,
]
GET_DEPLOYMENTS_LIST_REQUEST_PARAMS = {"filter": {"where": {"and": [{"id": "dev61ity7lx232"}]}}}
BASIC_COMMAND_GET_METRICS_REQUEST_PARAMS = {
"start": "2019-04-04T10:53:56Z",
"handle": "dev61ity7lx232",
"interval": "30s",
"charts": "cpuPercentage,memoryUsage",
"objecttype": "modelDeployment",
}
ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS = {
"start": "2020-04-01T00:00:00Z",
"handle": "dev61ity7lx232",
"interval": "20s",
"charts": "gpuMemoryFree,gpuMemoryUsed",
"objecttype": "modelDeployment",
"end": "2020-04-02T21:37:00Z",
}
GET_DEPLOYMENTS_RESPONSE_JSON = example_responses.LIST_DEPLOYMENTS
GET_METRICS_RESPONSE_JSON = example_responses.DEPLOYMENTS_METRICS_GET_RESPONSE
EXPECTED_STDOUT = """{
"cpuPercentage": {
"desgffa3mtgepvm-0": [
{
"time_stamp": 1587340800,
"value": "0.0388702066666724"
},
{
"time_stamp": 1587370800,
"value": "0.04452898888887249"
},
{
"time_stamp": 1587400800,
"value": "0.044658617777757724"
},
{
"time_stamp": 1587430800,
"value": "0.04922275555555997"
},
{
"time_stamp": 1587460800,
"value": "0.0589409911111084"
},
{
"time_stamp": 1587490800,
"value": "0.02873176888891117"
},
{
"time_stamp": 1587520800,
"value": "0.042048226666666876"
},
{
"time_stamp": 1587550800,
"value": "0.04952780222222625"
}
],
"desgffa3mtgepvm-1": [
{
"time_stamp": 1587340800,
"value": "0.05044751111111307"
},
{
"time_stamp": 1587370800,
"value": "0.04381767555555724"
},
{
"time_stamp": 1587400800,
"value": "0.03436263111110646"
},
{
"time_stamp": 1587430800,
"value": "0.048889264444432624"
},
{
"time_stamp": 1587460800,
"value": "0.041525960000020255"
},
{
"time_stamp": 1587490800,
"value": "0.04574227333332853"
},
{
"time_stamp": 1587520800,
"value": "0.03383691777780011"
},
{
"time_stamp": 1587550800,
"value": "0.045942304444426756"
}
]
},
"memoryUsage": {
"desgffa3mtgepvm-0": [
{
"time_stamp": 1587340800,
"value": "34910208"
},
{
"time_stamp": 1587370800,
"value": "34910208"
},
{
"time_stamp": 1587400800,
"value": "34914304"
},
{
"time_stamp": 1587430800,
"value": "34914304"
},
{
"time_stamp": 1587460800,
"value": "34914304"
},
{
"time_stamp": 1587490800,
"value": "34914304"
},
{
"time_stamp": 1587520800,
"value": "34914304"
},
{
"time_stamp": 1587550800,
"value": "34914304"
}
],
"desgffa3mtgepvm-1": [
{
"time_stamp": 1587340800,
"value": "35942400"
},
{
"time_stamp": 1587370800,
"value": "35942400"
},
{
"time_stamp": 1587400800,
"value": "35942400"
},
{
"time_stamp": 1587430800,
"value": "35942400"
},
{
"time_stamp": 1587460800,
"value": "35942400"
},
{
"time_stamp": 1587490800,
"value": "35942400"
},
{
"time_stamp": 1587520800,
"value": "35942400"
},
{
"time_stamp": 1587550800,
"value": "35942400"
}
]
}
}
"""
EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED = "Failed to fetch data: Incorrect API Key provided\nForbidden\n"
EXPECTED_STDOUT_WHEN_EXPERIMENT_WAS_NOT_FOUND = "Deployment not found\n"
EXPECTED_STDOUT_WHEN_EXPERIMENT_WAS_NOT_STARTED = "Model deployment has not started yet\n"
EXPECTED_STDOUT_WHEN_NO_METRICS_WERE_FOUND = "{}\n"
EXPECTED_STDOUT_WHEN_ERROR_CODE_WAS_RETURNED_WITHOUT_ERROR_MESSAGE = "Failed to fetch data\n"
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_all_available_metrics_when_metrics_get_command_was_used_with_basic_options(self, get_patched):
get_patched.side_effect = [
MockResponse(self.GET_DEPLOYMENTS_RESPONSE_JSON),
MockResponse(self.GET_METRICS_RESPONSE_JSON),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND)
assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), \
str(result.output) + str(result.exc_info)
get_patched.assert_has_calls(
[
mock.call(
self.GET_DEPLOYMENTS_LIST_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_PARAMS,
params=None,
headers=EXPECTED_HEADERS,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.BASIC_COMMAND_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_metrics_when_metrics_get_command_was_used_with_all_options(self, get_patched):
get_patched.side_effect = [
MockResponse(self.GET_DEPLOYMENTS_RESPONSE_JSON),
MockResponse(self.GET_METRICS_RESPONSE_JSON),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
# comparing objects instead of strings because Py2 and Py3 produce slightly different outputs
assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_DEPLOYMENTS_LIST_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_PARAMS,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_metrics_when_metrics_get_was_executed_and_options_file_was_used(
self, get_patched, deployments_metrics_get_config_path):
get_patched.side_effect = [
MockResponse(self.GET_DEPLOYMENTS_RESPONSE_JSON),
MockResponse(self.GET_METRICS_RESPONSE_JSON),
]
command = self.FULL_OPTIONS_COMMAND_WITH_OPTIONS_FILE[:] + [deployments_metrics_get_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
# comparing objects instead of strings because Py2 and Py3 produce slightly different outputs
assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_DEPLOYMENTS_LIST_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_PARAMS,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_invalid_api_key_was_used(self, get_patched):
get_patched.return_value = MockResponse({"details": "Incorrect API Key provided", "error": "Forbidden"},
status_code=403)
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED, result.exc_info
get_patched.assert_called_once_with(
self.GET_DEPLOYMENTS_LIST_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_PARAMS,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_deployment_was_not_found(self, get_patched):
get_patched.side_effect = [
MockResponse({"deploymentList": [], "total": 123, "displayTotal": 0, "runningTotal": 6}),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_EXPERIMENT_WAS_NOT_FOUND, result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_DEPLOYMENTS_LIST_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_PARAMS,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_message_when_was_no_metrics_were_returned(self, get_patched):
get_patched.side_effect = [
MockResponse(self.GET_DEPLOYMENTS_RESPONSE_JSON),
MockResponse(example_responses.EXPERIMENTS_METRICS_GET_RESPONSE_WHEN_NO_DATA_WAS_FOUND),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_NO_METRICS_WERE_FOUND, result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_DEPLOYMENTS_LIST_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_PARAMS,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_error_code_was_returned_without_error_message(self, get_patched):
get_patched.side_effect = [
MockResponse(self.GET_DEPLOYMENTS_RESPONSE_JSON),
MockResponse(status_code=500),
]
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_ERROR_CODE_WAS_RETURNED_WITHOUT_ERROR_MESSAGE, result.exc_info
get_patched.assert_has_calls(
[
mock.call(
self.GET_DEPLOYMENTS_LIST_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_PARAMS,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
mock.call(
self.GET_METRICS_URL,
json=None,
params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
),
]
)
assert result.exit_code == 0, result.exc_info
class TestDeploymentssMetricsStreamCommand(object):
LIST_DEPLOYMENTS_URL = "https://api.paperspace.io/deployments/getDeploymentList/"
GET_METRICS_URL = "https://aws-testing.paperspace.io/metrics/api/v1/stream"
BASIC_OPTIONS_COMMAND = [
"deployments", "metrics", "stream",
"--id", "dev61ity7lx232",
]
ALL_OPTIONS_COMMAND = [
"deployments", "metrics", "stream",
"--id", "dev61ity7lx232",
"--metric", "gpuMemoryFree",
"--metric", "gpuMemoryUsed",
"--interval", "20s",
"--apiKey", "some_key",
]
ALL_OPTIONS_COMMAND_WITH_OPTIONS_FILE = [
"deployments", "metrics", "stream",
"--optionsFile", # path added in test,
]
GET_DEPLOYMENTS_LIST_REQUEST_JSON = {"filter": {"where": {"and": [{"id": "dev61ity7lx232"}]}}}
BASIC_COMMAND_CHART_DESCRIPTOR = '{"chart_names": ["cpuPercentage", "memoryUsage"], "handles": ["dev61ity7lx232"' \
'], "object_type": "modelDeployment", "poll_interval": "30s"}'
ALL_COMMANDS_CHART_DESCRIPTOR = '{"chart_names": ["gpuMemoryFree", "gpuMemoryUsed"], "handles": ["dev61ity7lx232' \
'"], "object_type": "modelDeployment", "poll_interval": "20s"}'
GET_LIST_OF_DEPLOYMENTS_RESPONSE_JSON = example_responses.LIST_DEPLOYMENTS
EXPECTED_TABLE_1 = """+-------------------+---------------+-------------+
| Pod | cpuPercentage | memoryUsage |
+-------------------+---------------+-------------+
| desgffa3mtgepvm-0 | | 34914304 |
| desgffa3mtgepvm-1 | | 35942400 |
+-------------------+---------------+-------------+"""
EXPECTED_TABLE_2 = """+-------------------+----------------------+-------------+
| Pod | cpuPercentage | memoryUsage |
+-------------------+----------------------+-------------+
| desgffa3mtgepvm-0 | 0.044894188888835944 | 34914304 |
| desgffa3mtgepvm-1 | 0.048185748888916656 | 35942400 |
+-------------------+----------------------+-------------+"""
EXPECTED_TABLE_3 = """+-------------------+----------------------+-------------+
| Pod | cpuPercentage | memoryUsage |
+-------------------+----------------------+-------------+
| desgffa3mtgepvm-0 | 0.044894188888835944 | 34914304 |
| desgffa3mtgepvm-1 | 0.048185748888916656 | 35942400 |
+-------------------+----------------------+-------------+"""
ALL_OPTIONS_EXPECTED_TABLE_1 = """+-------------------+---------------+---------------+
| Pod | gpuMemoryFree | gpuMemoryUsed |
+-------------------+---------------+---------------+
| desgffa3mtgepvm-0 | | 0 |
| desgffa3mtgepvm-1 | | 0 |
+-------------------+---------------+---------------+"""
ALL_OPTIONS_EXPECTED_TABLE_2 = """+-------------------+---------------+---------------+
| Pod | gpuMemoryFree | gpuMemoryUsed |
+-------------------+---------------+---------------+
| desgffa3mtgepvm-0 | | 321 |
| desgffa3mtgepvm-1 | | 432 |
+-------------------+---------------+---------------+"""
ALL_OPTIONS_EXPECTED_TABLE_3 = """+-------------------+---------------+---------------+
| Pod | gpuMemoryFree | gpuMemoryUsed |
+-------------------+---------------+---------------+
| desgffa3mtgepvm-0 | 1234 | 321 |
| desgffa3mtgepvm-1 | 234 | 432 |
+-------------------+---------------+---------------+"""
EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED = "Failed to fetch data: Incorrect API Key provided\nForbidden\n"
EXPECTED_STDOUT_WHEN_DEPLOYMENT_WAS_NOT_FOUND = "Deployment not found\n"
@mock.patch("gradient.commands.common.TerminalPrinter")
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_all_available_metrics_when_metrics_get_command_was_used_with_basic_options(
self, get_patched, create_ws_connection_patched, terminal_printer_cls_patched,
basic_options_metrics_stream_websocket_connection_iterator):
get_patched.return_value = MockResponse(self.GET_LIST_OF_DEPLOYMENTS_RESPONSE_JSON)
ws_connection_instance_mock = mock.MagicMock()
ws_connection_instance_mock.__iter__ = basic_options_metrics_stream_websocket_connection_iterator
create_ws_connection_patched.return_value = ws_connection_instance_mock
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND)
terminal_printer_cls_patched().init.assert_called_once()
terminal_printer_cls_patched().rewrite_screen.assert_has_calls([
mock.call(self.EXPECTED_TABLE_1),
mock.call(self.EXPECTED_TABLE_2),
mock.call(self.EXPECTED_TABLE_3),
])
terminal_printer_cls_patched().cleanup.assert_called_once()
get_patched.assert_called_once_with(
self.LIST_DEPLOYMENTS_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS,
)
ws_connection_instance_mock.send.assert_called_once_with(self.BASIC_COMMAND_CHART_DESCRIPTOR)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.commands.common.TerminalPrinter")
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_metrics_when_metrics_get_command_was_used_with_all_options(
self, get_patched, create_ws_connection_patched, terminal_printer_cls_patched,
all_options_metrics_stream_websocket_connection_iterator):
get_patched.return_value = MockResponse(self.GET_LIST_OF_DEPLOYMENTS_RESPONSE_JSON)
ws_connection_instance_mock = mock.MagicMock()
ws_connection_instance_mock.__iter__ = all_options_metrics_stream_websocket_connection_iterator
create_ws_connection_patched.return_value = ws_connection_instance_mock
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
terminal_printer_cls_patched().init.assert_called_once()
terminal_printer_cls_patched().rewrite_screen.assert_has_calls([
mock.call(self.ALL_OPTIONS_EXPECTED_TABLE_1),
mock.call(self.ALL_OPTIONS_EXPECTED_TABLE_2),
mock.call(self.ALL_OPTIONS_EXPECTED_TABLE_3),
])
terminal_printer_cls_patched().cleanup.assert_called_once()
get_patched.assert_called_once_with(
self.LIST_DEPLOYMENTS_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
ws_connection_instance_mock.send.assert_called_once_with(self.ALL_COMMANDS_CHART_DESCRIPTOR)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.commands.common.TerminalPrinter")
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_read_metrics_when_metrics_get_was_executed_and_options_file_was_used(
self, get_patched, create_ws_connection_patched, terminal_printer_cls_patched,
all_options_metrics_stream_websocket_connection_iterator,
deployments_metrics_stream_config_path):
get_patched.return_value = MockResponse(self.GET_LIST_OF_DEPLOYMENTS_RESPONSE_JSON)
ws_connection_instance_mock = mock.MagicMock()
ws_connection_instance_mock.__iter__ = all_options_metrics_stream_websocket_connection_iterator
create_ws_connection_patched.return_value = ws_connection_instance_mock
command = self.ALL_OPTIONS_COMMAND_WITH_OPTIONS_FILE[:] + [deployments_metrics_stream_config_path]
runner = CliRunner()
result = runner.invoke(cli.cli, command)
terminal_printer_cls_patched().init.assert_called_once()
terminal_printer_cls_patched().rewrite_screen.assert_has_calls([
mock.call(self.ALL_OPTIONS_EXPECTED_TABLE_1),
mock.call(self.ALL_OPTIONS_EXPECTED_TABLE_2),
mock.call(self.ALL_OPTIONS_EXPECTED_TABLE_3),
])
terminal_printer_cls_patched().cleanup.assert_called_once()
get_patched.assert_called_once_with(
self.LIST_DEPLOYMENTS_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
ws_connection_instance_mock.send.assert_called_once_with(self.ALL_COMMANDS_CHART_DESCRIPTOR)
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.commands.common.TerminalPrinter")
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_invalid_api_key_was_used(
self, get_patched, create_ws_connection_patched, terminal_printer_cls_patched):
get_patched.return_value = MockResponse({"status": 400, "message": "Invalid API token"}, 400)
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert "Failed to fetch data: Invalid API token\n" == result.output, result.exc_info
get_patched.assert_called_once_with(
self.LIST_DEPLOYMENTS_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
create_ws_connection_patched.assert_not_called()
assert result.exit_code == 0, result.exc_info
@mock.patch("gradient.commands.common.TerminalPrinter")
@mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection")
@mock.patch("gradient.api_sdk.clients.http_client.requests.get")
def test_should_print_valid_error_message_when_deployment_was_not_found(
self, get_patched, create_ws_connection_patched, terminal_printer_cls_patched):
get_patched.return_value = MockResponse({"deploymentList": []})
runner = CliRunner()
result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND)
assert result.output == self.EXPECTED_STDOUT_WHEN_DEPLOYMENT_WAS_NOT_FOUND, result.exc_info
get_patched.assert_called_once_with(
self.LIST_DEPLOYMENTS_URL,
json=self.GET_DEPLOYMENTS_LIST_REQUEST_JSON,
params=None,
headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
)
create_ws_connection_patched.assert_not_called()
assert result.exit_code == 0, result.exc_info
class TestExperimentLogs(object):
| |
"Озаску"
},
"OSLO": {
"de_DE": "Oslo",
"es_ES": "Oslo",
"fr_FR": "Oslo",
"it_IT": "Oslo",
"ja_JP": "オスロ",
"ko_KR": "오슬로",
"pl_PL": "Oslo",
"pt_BR": "Oslo",
"ru_RU": "Осло"
},
"OSTERSUND": {
"de_DE": "Östersund",
"es_ES": "Östersund",
"fr_FR": "Östersund",
"it_IT": "Östersund",
"ja_JP": "エステルスンド",
"ko_KR": "외스테르순드",
"pl_PL": "Östersund",
"pt_BR": "Östersund",
"ru_RU": "Эстерсунд"
},
"OSTIA": {
"de_DE": "Ostia",
"es_ES": "Ostia",
"fr_FR": "Ostia",
"it_IT": "Ostia",
"ja_JP": "オスティア",
"ko_KR": "오스티아",
"pl_PL": "Ostia",
"pt_BR": "Ostia",
"ru_RU": "Остия"
},
"OSTROGSKI_1": {
"de_DE": "Korez",
"es_ES": "Korets",
"fr_FR": "Korets",
"it_IT": "Korets",
"ja_JP": "コレッツ",
"ko_KR": "코레츠",
"pl_PL": "Korzec",
"pt_BR": "Korets",
"ru_RU": "Корец"
},
"OSTROGSKI_2": {
"de_DE": "Isjaslaw",
"es_ES": "Iziaslav",
"fr_FR": "Zaslav",
"it_IT": "Zaslavia",
"ja_JP": "ザスラフ",
"ko_KR": "자슬라우",
"pl_PL": "Zasław",
"pt_BR": "Zaslav",
"ru_RU": "Изяслав"
},
"OSTROGSKI_3": {
"de_DE": "Riwne",
"es_ES": "Rivne",
"fr_FR": "Rivne",
"it_IT": "Rivne",
"ja_JP": "リウネ",
"ko_KR": "리브네",
"pl_PL": "Równe",
"pt_BR": "Rivne",
"ru_RU": "Ровно"
},
"OSTROGSKI_4": {
"de_DE": "Schytomyr",
"es_ES": "Zhytómyr",
"fr_FR": "Jytomyr",
"it_IT": "Zhytomyr",
"ja_JP": "ジトームィル",
"ko_KR": "지토미르",
"pl_PL": "Żytomierz",
"pt_BR": "Jitomir",
"ru_RU": "Житомир"
},
"OSTROGSKI_5": {
"de_DE": "Kiew",
"es_ES": "Kiev",
"fr_FR": "Kiev",
"it_IT": "Kiev",
"ja_JP": "キエフ",
"ko_KR": "키예프",
"pl_PL": "Kijów",
"pt_BR": "Kiev",
"ru_RU": "Киев"
},
"OSTROGSKI_6": {
"de_DE": "Brazlaw",
"es_ES": "Bratslav",
"fr_FR": "Bratslav",
"it_IT": "Bracław",
"ja_JP": "ブラーツラウ",
"ko_KR": "브라츨라우",
"pl_PL": "Bracław",
"pt_BR": "Bratslav",
"ru_RU": "Брацлав"
},
"OSTROGSKI_7": {
"de_DE": "Terebowlja",
"es_ES": "Terebovlia",
"fr_FR": "Terebovlia",
"it_IT": "Trembowla",
"ja_JP": "テレボーウリャ",
"ko_KR": "테레보울랴",
"pl_PL": "Trembowla",
"pt_BR": "Terebovlia",
"ru_RU": "Теребовля"
},
"OSTROGSKI_8": {
"de_DE": "Kamenez-Podolsk",
"es_ES": "Kamianets-Podilskyi",
"fr_FR": "Kamianets-Podilsky",
"it_IT": "<NAME>",
"ja_JP": "カームヤネツィ=ポジーリシクィイ",
"ko_KR": "카미에니에츠 포돌스키",
"pl_PL": "Kamieniec Podolski",
"pt_BR": "Kamieniec-Podolski",
"ru_RU": "Каменец-Подольский"
},
"OSTROGSKI_9": {
"de_DE": "Olyka",
"es_ES": "Olyka",
"fr_FR": "Olyka",
"it_IT": "Olyka",
"ja_JP": "オリカ",
"ko_KR": "올리카",
"pl_PL": "Ołyka",
"pt_BR": "Olyka",
"ru_RU": "Олыка"
},
"OSTROGSKI_CAPITAL": {
"de_DE": "Ostroh",
"es_ES": "Ostroh",
"fr_FR": "Ostroh",
"it_IT": "Ostroh",
"ja_JP": "オストロフ",
"ko_KR": "오스트로흐",
"pl_PL": "Ostróg",
"pt_BR": "Ostroh",
"ru_RU": "Острог"
},
"OTA": {
"de_DE": "Ota",
"es_ES": "Ota",
"fr_FR": "Ota",
"it_IT": "Ota",
"ja_JP": "太田",
"ko_KR": "오타",
"pl_PL": "Ota",
"pt_BR": "Ota",
"ru_RU": "Ота"
},
"OTATARA_PA": {
"de_DE": "Otatara Pā",
"es_ES": "Otatara pā",
"fr_FR": "Otatara pā",
"it_IT": "Otatara pā",
"ja_JP": "オタタラ・パ",
"ko_KR": "오타타라 파",
"pl_PL": "Otatara pā",
"pt_BR": "Otatara pā",
"ru_RU": "Отатара-Па"
},
"OTSU": {
"de_DE": "Otsu",
"es_ES": "Otsu",
"fr_FR": "Otsu",
"it_IT": "Otsu",
"ja_JP": "大津",
"ko_KR": "오쓰",
"pl_PL": "Otsu",
"pt_BR": "Otsu",
"ru_RU": "Оцу"
},
"OTTAWA": {
"de_DE": "Ottawa",
"es_ES": "Ottawa",
"fr_FR": "Ottawa",
"it_IT": "Ottawa",
"ja_JP": "オタワ",
"ko_KR": "오타와",
"pl_PL": "Ottawa",
"pt_BR": "Ottawa",
"ru_RU": "Оттава"
},
"OURIQUE": {
"de_DE": "Ourique",
"es_ES": "Ourique",
"fr_FR": "Ourique",
"it_IT": "Ourique",
"ja_JP": "オウリケ",
"ko_KR": "오우리퀘",
"pl_PL": "Ourique",
"pt_BR": "Ourique",
"ru_RU": "Орике"
},
"OVIEDO": {
"de_DE": "Oviedo",
"es_ES": "Oviedo",
"fr_FR": "Oviedo",
"it_IT": "Oviedo",
"ja_JP": "オビエド",
"ko_KR": "오비에도",
"pl_PL": "Oviedo",
"pt_BR": "Oviedo",
"ru_RU": "Овьедо"
},
"OXFORD": {
"de_DE": "Oxford",
"es_ES": "Oxford",
"fr_FR": "Oxford",
"it_IT": "Oxford",
"ja_JP": "オックスフォード",
"ko_KR": "옥스퍼드",
"pl_PL": "Oxford",
"pt_BR": "Oxford",
"ru_RU": "Оксфорд"
},
"PACHAKAMAQ": {
"de_DE": "Pachacámac",
"es_ES": "Pachacamac",
"fr_FR": "Pachakamaq",
"it_IT": "Pachakamaq",
"ja_JP": "パチャカマック",
"ko_KR": "파차카막",
"pl_PL": "Pachakamaq",
"pt_BR": "Pachacámac",
"ru_RU": "Пачакамак"
},
"PAGARUYUNG": {
"de_DE": "Pagaruyung",
"es_ES": "Pagaruyung",
"fr_FR": "Pagaruyung",
"it_IT": "Pagaruyung",
"ja_JP": "パガルユン",
"ko_KR": "파가루융",
"pl_PL": "Pagarujung",
"pt_BR": "Pagaruyung",
"ru_RU": "Пагаруюнг"
},
"PALEMBANG": {
"de_DE": "Palembang",
"es_ES": "Palembang",
"fr_FR": "Palembang",
"it_IT": "Palembang",
"ja_JP": "パレンバン",
"ko_KR": "팔렘방",
"pl_PL": "Palembang",
"pt_BR": "Palimbão",
"ru_RU": "Палембанг"
},
"PALENQUE": {
"de_DE": "Mitla",
"es_ES": "Mitla",
"fr_FR": "Mitla",
"it_IT": "Mitla",
"ja_JP": "ミトラ",
"ko_KR": "미틀라",
"pl_PL": "Mitla",
"pt_BR": "Mitla",
"ru_RU": "Митла"
},
"PALENQUE_MAYA": {
"de_DE": "Palenque",
"es_ES": "Palenque",
"fr_FR": "Palenque",
"it_IT": "Palenque",
"ja_JP": "パレンケ",
"ko_KR": "팔렝케",
"pl_PL": "Palenque",
"pt_BR": "Palenque",
"ru_RU": "Паленке"
},
"PALMYRA": {
"de_DE": "Palmyra",
"es_ES": "Palmira",
"fr_FR": "Palmyra",
"it_IT": "Palmira",
"ja_JP": "パルミラ",
"ko_KR": "팔미라",
"pl_PL": "Palmyra",
"pt_BR": "Palmyra",
"ru_RU": "Пальмира"
},
"PAMPLONA": {
"de_DE": "Pamplona",
"es_ES": "Pamplona",
"fr_FR": "Pampelune",
"it_IT": "Pamplona",
"ja_JP": "パンプローナ",
"ko_KR": "팜플로나",
"pl_PL": "Pamplona",
"pt_BR": "Pamplona",
"ru_RU": "Памплона"
},
"PANAMA": {
"de_DE": "Panamá",
"es_ES": "Panamá",
"fr_FR": "Panama",
"it_IT": "Panamá",
"ja_JP": "パナマ",
"ko_KR": "파나마",
"pl_PL": "Panama",
"pt_BR": "Panamá",
"ru_RU": "Панама"
},
"PANGKALPINANG": {
"de_DE": "Pangkal Pinang ",
"es_ES": "Pangkalpinang ",
"fr_FR": "Pangkalpinang ",
"it_IT": "Pangkalpinang ",
"ja_JP": "パンカルピナン",
"ko_KR": "팡칼피낭",
"pl_PL": "Pangkalpinang",
"pt_BR": "Pangkal Pinang ",
"ru_RU": "Панкалпинанг"
},
"PANTICAPAEUM": {
"de_DE": "Panticapaeum",
"es_ES": "Panticapeo",
"fr_FR": "Panticapée",
"it_IT": "Panticapeo",
"ja_JP": "パンティカパイオン",
"ko_KR": "판티카페움",
"pl_PL": "Panticapaeum",
"pt_BR": "Panticapeu",
"ru_RU": "Пантикапей"
},
"PAPEWE": {
"de_DE": "Papewe",
"es_ES": "Papewe",
"fr_FR": "Papewe",
"it_IT": "Papewe",
"ja_JP": "パペウェ",
"ko_KR": "파페웨",
"pl_PL": "Papewe",
"pt_BR": "Papewe",
"ru_RU": "Папеве"
},
"PAPHOS": {
"de_DE": "Paphos",
"es_ES": "Pafos",
"fr_FR": "Paphos",
"it_IT": "Pafo",
"ja_JP": "パフォス",
"ko_KR": "파포스",
"pl_PL": "Pafos",
"pt_BR": "Pafos",
"ru_RU": "Пафос"
},
"PARAMONGA": {
"de_DE": "Paramonga",
"es_ES": "Paramonga",
"fr_FR": "Paramonga",
"it_IT": "Paramonga",
"ja_JP": "パラモンガ",
"ko_KR": "파라몽가",
"pl_PL": "Paramonga",
"pt_BR": "Paramonga",
"ru_RU": "Парамонга"
},
"PARIA": {
"de_DE": "Paria",
"es_ES": "Paria",
"fr_FR": "Paria",
"it_IT": "Paria",
"ja_JP": "パリア",
"ko_KR": "파리아",
"pl_PL": "Paria",
"pt_BR": "Paria",
"ru_RU": "Пария"
},
"PARIS": {
"de_DE": "Paris",
"es_ES": "París",
"fr_FR": "Paris",
"it_IT": "Parigi",
"ja_JP": "パリ",
"ko_KR": "파리",
"pl_PL": "Paryż",
"pt_BR": "Paris",
"ru_RU": "Париж"
},
"PARI_RU": {
"de_DE": "Pari-rua",
"es_ES": "Pari-ru",
"fr_FR": "Pari-ru",
"it_IT": "Pari-ru",
"ja_JP": "パリ・ル",
"ko_KR": "파리-루",
"pl_PL": "Pari-ru",
"pt_BR": "Pari-ru",
"ru_RU": "Пари-Ру"
},
"PARNAIBA": {
"de_DE": "Parnaíba",
"es_ES": "Parnaíba",
"fr_FR": "Parnaíba",
"it_IT": "Parnaíba",
"ja_JP": "パルナイバ",
"ko_KR": "파르나이바",
"pl_PL": "Parnaíba",
"pt_BR": "Parnaíba",
"ru_RU": "Парнаиба"
},
"PAROS": {
"de_DE": "Paros",
"es_ES": "Paros",
"fr_FR": "Paros",
"it_IT": "Paro",
"ja_JP": "パロス",
"ko_KR": "파로스",
"pl_PL": "Paros",
"pt_BR": "Paros",
"ru_RU": "Парос"
},
"PARSA": {
"de_DE": "Parsa",
"es_ES": "Parsa",
"fr_FR": "Parsa",
"it_IT": "Parsa",
"ja_JP": "パルサ",
"ko_KR": "파르사",
"pl_PL": "Parsa",
"pt_BR": "Parsa",
"ru_RU": "Парса"
},
"PASARGADAE": {
"de_DE": "Pasargade",
"es_ES": "Pasargada",
"fr_FR": "Pasargadae",
"it_IT": "Pasargade",
"ja_JP": "パサルガダエ",
"ko_KR": "파사르가대",
"pl_PL": "Pasargady",
"pt_BR": "Pasárgada",
"ru_RU": "Пасаргады"
},
"PASKWAW_ASKIHK": {
"de_DE": "Paskwaw-Askihk",
"es_ES": "Paskwaw-Askihk",
"fr_FR": "Paskwaw-Askihk",
"it_IT": "Paskwaw-Askihk",
"ja_JP": "パスクワウ・アスキク",
"ko_KR": "파스콰우아스키크",
"pl_PL": "Paskwaw-Askihk",
"pt_BR": "Paskwaw-Askihk",
"ru_RU": "Паскво-Аскик"
},
"PASTO": {
"de_DE": "Pasto",
"es_ES": "Pasto",
"fr_FR": "Pasto",
"it_IT": "Pasto",
"ja_JP": "パスト",
"ko_KR": "파스토",
"pl_PL": "Pasto",
"pt_BR": "Pasto",
"ru_RU": "Пасто"
},
"PATNA": {
"de_DE": "Patna",
"es_ES": "Patna",
"fr_FR": "Patna",
"it_IT": "Patna",
"ja_JP": "パトナ",
"ko_KR": "파트나",
"pl_PL": "Patna",
"pt_BR": "Patna",
"ru_RU": "Патна"
},
"PATRAS": {
"de_DE": "Patras",
"es_ES": "Patras",
"fr_FR": "Patras",
"it_IT": "Patrasso",
"ja_JP": "パトラス",
"ko_KR": "파트라스",
"pl_PL": "Patras",
"pt_BR": "Patras",
"ru_RU": "Патры"
},
"PAZURISH_DAGAN": {
"de_DE": "Pazurish-Dagan",
"es_ES": "Pazurish-Dagan",
"fr_FR": "Puzrish-Dagan",
"it_IT": "Pazurish-Dagan",
"ja_JP": "パズリシュ=ダガン",
"ko_KR": "파주리즈다강",
"pl_PL": "Pazurish-Dagan",
"pt_BR": "Pazurish-Dagan",
"ru_RU": "Пазуриш-Даган"
},
"PAZYRYK": {
"de_DE": "Pazyryk",
"es_ES": "Pazyryk",
"fr_FR": "Pazyryk",
"it_IT": "Pazyryk",
"ja_JP": "パジリク",
"ko_KR": "파지리크",
"pl_PL": "Pazyryk",
"pt_BR": "Pazyryk",
"ru_RU": "Пазырык"
},
"PEAIRT": {
"de_DE": "Perth",
"es_ES": "Peairt",
"fr_FR": "Peairt",
"it_IT": "Peairt",
"ja_JP": "パース",
"ko_KR": "페어스",
"pl_PL": "Peairt",
"pt_BR": "Peairt",
"ru_RU": "Перт"
},
"PECS": {
"de_DE": "Pécs",
"es_ES": "Pécs",
"fr_FR": "Pécs",
"it_IT": "Pécs",
"ja_JP": "ペーチ",
"ko_KR": "페치",
"pl_PL": "Pecz",
"pt_BR": "Pécs",
"ru_RU": "Печ"
},
"PEDEME": {
"de_DE": "Pedeme",
"es_ES": "Pedeme",
"fr_FR": "Pedeme",
"it_IT": "Pedeme",
"ja_JP": "ペデメ",
"ko_KR": "페데메",
"pl_PL": "Pedeme",
"pt_BR": "Pedeme",
"ru_RU": "Педеме"
},
"PEEBLES": {
"de_DE": "Peebles",
"es_ES": "Peebles",
"fr_FR": "Peebles",
"it_IT": "Peebles",
"ja_JP": "ピーブルス",
"ko_KR": "피블즈",
"pl_PL": "Peebles",
"pt_BR": "Peebles",
"ru_RU": "Пиблс"
},
"PEEPEEKISIS": {
"de_DE": "Peepeekisis",
"es_ES": "Peepeekisis",
"fr_FR": "Peepeekisis",
"it_IT": "Peepeekisis",
"ja_JP": "ペーペーキシス",
"ko_KR": "피피키시스",
"pl_PL": "Peepeekisis",
"pt_BR": "Peepeekisis",
"ru_RU": "Пипикисис"
},
"PEHUEN_MAPU": {
"de_DE": "Pehuen Mapu",
"es_ES": "Pehuen Mapu",
"fr_FR": "Pehuen Mapu",
"it_IT": "Pehuen Mapu",
"ja_JP": "ペフエン・マプ",
"ko_KR": "페우엔 마푸",
"pl_PL": "Pehuen Mapu",
"pt_BR": "Pehuen Mapu",
"ru_RU": "Пеуэн-Мапу"
},
"PEKALONGAN": {
"de_DE": "Pekalongan",
"es_ES": "Pekalongan",
"fr_FR": "Pekalongan",
"it_IT": "Pekalongan",
"ja_JP": "ペカロンガン",
"ko_KR": "페칼롱간",
"pl_PL": "Pekalongan",
"pt_BR": "Pekalongan",
"ru_RU": "Пекалонган"
},
"PELLA": {
"de_DE": "Pella",
"es_ES": "Pela",
"fr_FR": "Pella",
"it_IT": "Pella",
"ja_JP": "ペラ",
"ko_KR": "펠라",
"pl_PL": "Pella",
"pt_BR": "Pela",
"ru_RU": "Пелла"
},
"PENI_MAPU": {
"de_DE": "Peñi Mapu",
"es_ES": "Peñi Mapu",
"fr_FR": "Peñi Mapu",
"it_IT": "Peñi Mapu",
"ja_JP": "ペニ・マプ",
"ko_KR": "페니 마푸",
"pl_PL": "Peñi Mapu",
"pt_BR": "Peñi Mapu",
"ru_RU": "Пеньи-Мапу"
},
"PERGAMON": {
"de_DE": "Pergamon",
"es_ES": "Pérgamo",
"fr_FR": "Pergame",
"it_IT": "Pergamo",
"ja_JP": "ペルガモン",
"ko_KR": "페르가몬",
"pl_PL": "Pergamon",
"pt_BR": "Pergamon",
"ru_RU": "Пергам"
},
"PERM": {
"de_DE": "Perm",
"es_ES": "Perm",
"fr_FR": "Perm",
"it_IT": "Perm",
"ja_JP": "ペルミ",
"ko_KR": "페름",
"pl_PL": "Perm",
"pt_BR": "Perm",
"ru_RU": "Пермь"
},
"PERTH": {
"de_DE": "Perth",
"es_ES": "Perth",
"fr_FR": "Perth",
"it_IT": "Perth",
"ja_JP": "パース",
"ko_KR": "퍼스",
"pl_PL": "Perth",
"pt_BR": "Perth",
"ru_RU": "Перт"
},
"PER_BAST": {
"de_DE": "Per-Bast",
"es_ES": "Per-Bast",
"fr_FR": "Per-Bast",
"it_IT": "Per-Bast",
"ja_JP": "ペル・バスト",
"ko_KR": "페르바스트",
"pl_PL": "Per-Bast",
"pt_BR": "Tell Basta",
"ru_RU": "Пер-Баст"
},
"PEST": {
"de_DE": "Pest",
"es_ES": "Pest",
"fr_FR": "Pest",
| |
update=update_renderpath)
rp_greasepencil = BoolProperty(name="Grease Pencil", description="Render Grease Pencil data", default=False, update=update_renderpath)
rp_ocean = BoolProperty(name="Ocean", description="Ocean pass", default=False, update=update_renderpath)
rp_gi = EnumProperty(
items=[('Off', 'Off', 'Off'),
('Voxel GI', 'Voxel GI', 'Voxel GI'),
('Voxel AO', 'Voxel AO', 'Voxel AO')
],
name="Global Illumination", description="Dynamic global illumination", default='Off', update=update_renderpath)
rp_voxelgi_resolution = EnumProperty(
items=[('32', '32', '32'),
('64', '64', '64'),
('128', '128', '128'),
('256', '256', '256'),
('512', '512', '512')],
name="Resolution", description="3D texture resolution", default='128', update=update_renderpath)
rp_voxelgi_resolution_z = EnumProperty(
items=[('1.0', '1.0', '1.0'),
('0.5', '0.5', '0.5'),
('0.25', '0.25', '0.25')],
name="Resolution Z", description="3D texture z resolution multiplier", default='1.0', update=update_renderpath)
arm_clouds = BoolProperty(name="Clouds", default=False, update=assets.invalidate_shader_cache)
arm_soft_shadows = EnumProperty(
items=[('On', 'On', 'On'),
('Off', 'Off', 'Off'),
('Auto', 'Auto', 'Auto')],
name="Soft Shadows", description="Soft shadows with variable penumbra (spot and non-cascaded sun light supported)", default='Off', update=assets.invalidate_shader_cache)
arm_soft_shadows_penumbra = IntProperty(name="Penumbra", description="Variable penumbra scale", default=1, min=0, max=10, update=assets.invalidate_shader_cache)
arm_soft_shadows_distance = FloatProperty(name="Distance", description="Variable penumbra distance", default=1.0, min=0, max=10, update=assets.invalidate_shader_cache)
arm_shadows_cubemap = BoolProperty(name="Cubemap", description="Use cubemap to capture point light shadows", default=True)
arm_ssrs = BoolProperty(name="SSRS", description="Screen-space ray-traced shadows", default=False, update=assets.invalidate_shader_cache)
arm_texture_filter = EnumProperty(
items=[('Anisotropic', 'Anisotropic', 'Anisotropic'),
('Linear', 'Linear', 'Linear'),
('Point', 'Closest', 'Point'),
('Manual', 'Manual', 'Manual')],
name="Texture Filtering", description="Set Manual to honor interpolation setting on Image Texture node", default='Anisotropic')
arm_material_model = EnumProperty(
items=[('Full', 'Full', 'Full'),
('Mobile', 'Mobile', 'Mobile'),
('Solid', 'Solid', 'Solid'),
],
name="Materials", description="Material builder", default='Full', update=update_material_model)
arm_rp_displacement = EnumProperty(
items=[('Off', 'Off', 'Off'),
('Vertex', 'Vertex', 'Vertex'),
('Tessellation', 'Tessellation', 'Tessellation')],
name="Displacement", description="Enable material displacement", default='Vertex', update=assets.invalidate_shader_cache)
arm_tess_mesh_inner = IntProperty(name="Inner", description="Inner tessellation level", default=7)
arm_tess_mesh_outer = IntProperty(name="Outer", description="Outer tessellation level", default=7)
arm_tess_shadows_inner = IntProperty(name="Inner", description="Inner tessellation level", default=7)
arm_tess_shadows_outer = IntProperty(name="Outer", description="Outer tessellation level", default=7)
arm_rp_resolution = EnumProperty(
items=[('Display', 'Display', 'Display'),
('Custom', 'Custom', 'Custom')],
name="Resolution", description="Resolution to perform rendering at", default='Display', update=update_renderpath)
arm_rp_resolution_size = IntProperty(name="Size", description="Resolution height in pixels(for example 720p), width is auto-fit to preserve aspect ratio", default=720, min=0, update=update_renderpath)
arm_rp_resolution_filter = EnumProperty(
items=[('Linear', 'Linear', 'Linear'),
('Point', 'Closest', 'Point')],
name="Filter", description="Scaling filter", default='Linear')
rp_dynres = BoolProperty(name="Dynamic Resolution", description="Dynamic resolution scaling for performance", default=False, update=update_renderpath)
arm_ssr_half_res = BoolProperty(name="Half Res", description="Trace in half resolution", default=True, update=update_renderpath)
rp_voxelgi_relight = BoolProperty(name="Relight", description="Relight voxels when light is moved", default=True, update=update_renderpath)
arm_voxelgi_dimensions = FloatProperty(name="Dimensions", description="Voxelization bounds",default=16, update=assets.invalidate_shader_cache)
arm_voxelgi_revoxelize = BoolProperty(name="Revoxelize", description="Revoxelize scene each frame", default=False, update=assets.invalidate_shader_cache)
arm_voxelgi_temporal = BoolProperty(name="Temporal Filter", description="Use temporal filtering to stabilize voxels", default=False, update=assets.invalidate_shader_cache)
arm_voxelgi_bounces = EnumProperty(
items=[('1', '1', '1'),
('2', '2', '2')],
name="Bounces", description="Trace multiple light bounces", default='1', update=update_renderpath)
arm_voxelgi_camera = BoolProperty(name="Dynamic Camera", description="Use camera as voxelization origin", default=False, update=assets.invalidate_shader_cache)
# arm_voxelgi_anisotropic = BoolProperty(name="Anisotropic", description="Use anisotropic voxels", default=False, update=update_renderpath)
arm_voxelgi_shadows = BoolProperty(name="Trace Shadows", description="Use voxels to render shadows", default=False, update=update_renderpath)
arm_samples_per_pixel = EnumProperty(
items=[('1', '1', '1'),
('2', '2', '2'),
('4', '4', '4'),
('8', '8', '8'),
('16', '16', '16')],
name="MSAA", description="Samples per pixel usable for render paths drawing directly to framebuffer", default='1')
arm_voxelgi_diff = FloatProperty(name="Diffuse", description="", default=3.0, update=assets.invalidate_shader_cache)
arm_voxelgi_cones = EnumProperty(
items=[('9', '9', '9'),
('5', '5', '5'),
('3', '3', '3'),
('1', '1', '1'),
],
name="Cones", description="Number of cones to trace", default='5', update=assets.invalidate_shader_cache)
arm_voxelgi_spec = FloatProperty(name="Specular", description="", default=1.0, update=assets.invalidate_shader_cache)
arm_voxelgi_occ = FloatProperty(name="Occlusion", description="", default=1.0, update=assets.invalidate_shader_cache)
arm_voxelgi_env = FloatProperty(name="Env Map", description="Contribute light from environment map", default=0.0, update=assets.invalidate_shader_cache)
arm_voxelgi_step = FloatProperty(name="Step", description="Step size", default=1.0, update=assets.invalidate_shader_cache)
arm_voxelgi_offset = FloatProperty(name="Offset", description="Ray offset", default=1.0, update=assets.invalidate_shader_cache)
arm_voxelgi_range = FloatProperty(name="Range", description="Maximum range", default=2.0, update=assets.invalidate_shader_cache)
arm_sss_width = FloatProperty(name="Width", description="SSS blur strength", default=1.0, update=assets.invalidate_shader_cache)
arm_clouds_density = FloatProperty(name="Density", default=1.0, min=0.0, max=1.0, update=assets.invalidate_shader_cache)
arm_clouds_size = FloatProperty(name="Size", default=1.0, min=0.0, max=10.0, update=assets.invalidate_shader_cache)
arm_clouds_lower = FloatProperty(name="Lower", default=2.0, min=1.0, max=10.0, update=assets.invalidate_shader_cache)
arm_clouds_upper = FloatProperty(name="Upper", default=3.5, min=1.0, max=10.0, update=assets.invalidate_shader_cache)
arm_clouds_wind = FloatVectorProperty(name="Wind", default=[0.2, 0.06], size=2, update=assets.invalidate_shader_cache)
arm_clouds_secondary = FloatProperty(name="Secondary", default=0.0, min=0.0, max=10.0, update=assets.invalidate_shader_cache)
arm_clouds_precipitation = FloatProperty(name="Precipitation", default=1.0, min=0.0, max=2.0, update=assets.invalidate_shader_cache)
arm_clouds_eccentricity = FloatProperty(name="Eccentricity", default=0.6, min=0.0, max=1.0, update=assets.invalidate_shader_cache)
arm_ocean_base_color = FloatVectorProperty(name="Base Color", size=3, default=[0.1, 0.19, 0.37], subtype='COLOR', min=0, max=1, update=assets.invalidate_shader_cache)
arm_ocean_water_color = FloatVectorProperty(name="Water Color", size=3, default=[0.6, 0.7, 0.9], subtype='COLOR', min=0, max=1, update=assets.invalidate_shader_cache)
arm_ocean_level = FloatProperty(name="Level", default=0.0, update=assets.invalidate_shader_cache)
arm_ocean_amplitude = FloatProperty(name="Amplitude", default=2.5, update=assets.invalidate_shader_cache)
arm_ocean_height = FloatProperty(name="Height", default=0.6, update=assets.invalidate_shader_cache)
arm_ocean_choppy = FloatProperty(name="Choppy", default=4.0, update=assets.invalidate_shader_cache)
arm_ocean_speed = FloatProperty(name="Speed", default=1.5, update=assets.invalidate_shader_cache)
arm_ocean_freq = FloatProperty(name="Freq", default=0.16, update=assets.invalidate_shader_cache)
arm_ocean_fade = FloatProperty(name="Fade", default=1.8, update=assets.invalidate_shader_cache)
arm_ssgi_strength = FloatProperty(name="Strength", default=1.0, update=assets.invalidate_shader_cache)
arm_ssgi_radius = FloatProperty(name="Radius", default=1.0, update=assets.invalidate_shader_cache)
arm_ssgi_step = FloatProperty(name="Step", default=2.0, update=assets.invalidate_shader_cache)
arm_ssgi_max_steps = IntProperty(name="Max Steps", default=8, update=assets.invalidate_shader_cache)
arm_ssgi_rays = EnumProperty(
items=[('9', '9', '9'),
('5', '5', '5'),
],
name="Rays", description="Number of rays to trace for RTAO", default='5', update=assets.invalidate_shader_cache)
arm_ssgi_half_res = BoolProperty(name="Half Res", description="Trace in half resolution", default=False, update=assets.invalidate_shader_cache)
arm_bloom_threshold = FloatProperty(name="Threshold", default=1.0, update=assets.invalidate_shader_cache)
arm_bloom_strength = FloatProperty(name="Strength", default=3.5, update=assets.invalidate_shader_cache)
arm_bloom_radius = FloatProperty(name="Radius", default=3.0, update=assets.invalidate_shader_cache)
arm_motion_blur_intensity = FloatProperty(name="Intensity", default=1.0, update=assets.invalidate_shader_cache)
arm_ssr_ray_step = FloatProperty(name="Step", default=0.04, update=assets.invalidate_shader_cache)
arm_ssr_min_ray_step = FloatProperty(name="Step Min", default=0.05, update=assets.invalidate_shader_cache)
arm_ssr_search_dist = FloatProperty(name="Search", default=5.0, update=assets.invalidate_shader_cache)
arm_ssr_falloff_exp = FloatProperty(name="Falloff", default=5.0, update=assets.invalidate_shader_cache)
arm_ssr_jitter = FloatProperty(name="Jitter", default=0.6, update=assets.invalidate_shader_cache)
arm_volumetric_light_air_turbidity = FloatProperty(name="Air Turbidity", default=1.0, update=assets.invalidate_shader_cache)
arm_volumetric_light_air_color = FloatVectorProperty(name="Air Color", size=3, default=[1.0, 1.0, 1.0], subtype='COLOR', min=0, max=1, update=assets.invalidate_shader_cache)
arm_volumetric_light_steps = IntProperty(name="Steps", default=20, min=0, update=assets.invalidate_shader_cache)
arm_shadowmap_split = FloatProperty(name="Cascade Split", description="Split factor for cascaded shadow maps, higher factor favors detail on close surfaces", default=0.8, update=assets.invalidate_shader_cache)
arm_shadowmap_bounds = FloatProperty(name="Cascade Bounds", description="Multiply cascade bounds to capture bigger area", default=1.0, update=assets.invalidate_compiled_data)
arm_autoexposure_strength = FloatProperty(name="Auto Exposure Strength", default=0.7, update=assets.invalidate_shader_cache)
arm_ssrs_ray_step = FloatProperty(name="Step", default=0.01, update=assets.invalidate_shader_cache)
# Compositor
arm_letterbox = BoolProperty(name="Letterbox", default=False, update=assets.invalidate_shader_cache)
arm_letterbox_size = FloatProperty(name="Size", default=0.1, update=assets.invalidate_shader_cache)
arm_grain = BoolProperty(name="Film Grain", default=False, update=assets.invalidate_shader_cache)
arm_grain_strength = FloatProperty(name="Strength", default=2.0, update=assets.invalidate_shader_cache)
arm_sharpen = BoolProperty(name="Sharpen", default=False, update=assets.invalidate_shader_cache)
arm_sharpen_strength = FloatProperty(name="Strength", default=0.25, update=assets.invalidate_shader_cache)
arm_fog = BoolProperty(name="Volumetric Fog", default=False, update=assets.invalidate_shader_cache)
arm_fog_color = FloatVectorProperty(name="Color", size=3, subtype='COLOR', default=[0.5, 0.6, 0.7], min=0, max=1, update=assets.invalidate_shader_cache)
arm_fog_amounta = FloatProperty(name="Amount A", default=0.25, update=assets.invalidate_shader_cache)
arm_fog_amountb = FloatProperty(name="Amount B", default=0.5, update=assets.invalidate_shader_cache)
arm_tonemap = EnumProperty(
items=[('Off', 'Off', 'Off'),
('Filmic', 'Filmic', 'Filmic'),
('Filmic2', 'Filmic2', 'Filmic2'),
('Reinhard', 'Reinhard', 'Reinhard'),
('Uncharted', 'Uncharted', 'Uncharted')],
name='Tonemap', description='Tonemapping operator', default='Filmic', update=assets.invalidate_shader_cache)
arm_lens_texture = StringProperty(name="Lens Texture", default="")
arm_fisheye = BoolProperty(name="Fish Eye", default=False, update=assets.invalidate_shader_cache)
arm_vignette = BoolProperty(name="Vignette", default=False, update=assets.invalidate_shader_cache)
arm_lensflare = BoolProperty(name="Lens Flare", default=False, update=assets.invalidate_shader_cache)
arm_lut_texture = StringProperty(name="LUT Texture", description="Color Grading", default="", update=assets.invalidate_shader_cache)
arm_skin = EnumProperty(
items=[('GPU (Dual-Quat)', 'GPU (Dual-Quat)', 'GPU (Dual-Quat)'),
('GPU (Matrix)', 'GPU (Matrix)', 'GPU (Matrix)'),
('CPU', 'CPU', 'CPU'),
('Off', 'Off', 'Off')],
name='Skinning', description='Skinning method', default='GPU (Dual-Quat)', update=assets.invalidate_shader_cache)
arm_skin_max_bones_auto = BoolProperty(name="Auto Bones", description="Calculate amount of maximum bones based on armatures", default=True, update=assets.invalidate_compiled_data)
arm_skin_max_bones = IntProperty(name="Max Bones", default=50, min=1, max=3000, update=assets.invalidate_shader_cache)
arm_particles = EnumProperty(
items=[('GPU', 'GPU', 'GPU'),
('CPU', 'CPU', 'CPU'),
('Off', 'Off', 'Off')],
name='Particles', description='Simulation method', default='GPU', update=assets.invalidate_shader_cache)
# Material override flags
arm_culling = BoolProperty(name="Culling", default=True)
arm_two_sided_area_light = BoolProperty(name="Two-Sided Area Light", description="Emit light from both faces of area plane", default=False, update=assets.invalidate_shader_cache)
class ArmRPList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
custom_icon = 'OBJECT_DATAMODE'
if self.layout_type in {'DEFAULT', 'COMPACT'}:
row = layout.row()
row.prop(item, "name", text="", emboss=False, icon=custom_icon)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
layout.label(text="", icon = custom_icon)
class ArmRPListNewItem(bpy.types.Operator):
# Add a new item to the list
bl_idname = "arm_rplist.new_item"
bl_label = "New"
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
def draw(self,context):
layout = self.layout
layout.prop(bpy.data.worlds['Arm'], 'rp_preset', expand=True)
def execute(self, context):
wrd = bpy.data.worlds['Arm']
wrd.arm_rplist.add()
wrd.arm_rplist_index = len(wrd.arm_rplist) - 1
update_preset(wrd, context)
return{'FINISHED'}
class ArmRPListDeleteItem(bpy.types.Operator):
# Delete the selected item from the list
bl_idname = "arm_rplist.delete_item"
bl_label = "Deletes an item"
@classmethod
def poll(self, context):
""" Enable if there's something in the list """
mdata = bpy.data.worlds['Arm']
return len(mdata.arm_rplist) > 0
def execute(self, context):
mdata = bpy.data.worlds['Arm']
list = mdata.arm_rplist
index = mdata.arm_rplist_index
list.remove(index)
if index > 0:
index = index - 1
mdata.arm_rplist_index = index
return{'FINISHED'}
class ArmRPListMoveItem(bpy.types.Operator):
# Move an item in the list
bl_idname = "arm_rplist.move_item"
bl_label = "Move an item in the list"
direction = bpy.props.EnumProperty(
items=(
('UP', 'Up', ""),
('DOWN', 'Down', ""),))
def move_index(self):
# Move index of an item render queue while clamping it
mdata = bpy.data.worlds['Arm']
index = mdata.arm_rplist_index
list_length = len(mdata.arm_rplist) - 1
new_index = 0
if self.direction == 'UP':
new_index = index - 1
elif self.direction == 'DOWN':
new_index = index + 1
new_index = max(0, min(new_index, list_length))
mdata.arm_rplist.move(index, new_index)
mdata.arm_rplist_index = new_index
def execute(self, context):
mdata = bpy.data.worlds['Arm']
list = mdata.arm_rplist
index = mdata.arm_rplist_index
if self.direction == 'DOWN':
neighbor = index + 1
self.move_index()
elif self.direction == 'UP':
neighbor = index - 1
self.move_index()
else:
return{'CANCELLED'}
return{'FINISHED'}
def register():
bpy.utils.register_class(ArmRPListItem)
bpy.utils.register_class(ArmRPList)
| |
"001F0E": "Japan Kyastem Co., Ltd",
"001F0F": "Select Engineered Systems",
"001F10": "TOLEDO DO BRASIL INDUSTRIA DE BALANCAS LTDA",
"001F11": "OPENMOKO, INC.",
"001F12": "Juniper Networks",
"001F13": "S.& A.S. Ltd.",
"001F14": "NexG",
"001F15": "Bioscrypt Inc",
"001F16": "Wistron Corporation",
"001F17": "IDX Company, Ltd.",
"001F18": "Hakusan.Mfg.Co,.Ltd",
"001F19": "BEN-RI ELECTRONICA S.A.",
"001F1A": "Prominvest",
"001F1B": "RoyalTek Company Ltd.",
"001F1C": "KOBISHI ELECTRIC Co.,Ltd.",
"001F1D": "Atlas Material Testing Technology LLC",
"001F1E": "Astec Technology Co., Ltd",
"001F1F": "Edimax Technology Co. Ltd.",
"001F20": "Logitech Europe SA",
"001F21": "Inner Mongolia Yin An Science & Technology Development Co.,L",
"001F22": "Source Photonics, Inc.",
"001F23": "Interacoustics",
"001F24": "DIGITVIEW TECHNOLOGY CO., LTD.",
"001F25": "MBS GmbH",
"001F26": "CISCO SYSTEMS, INC.",
"001F27": "CISCO SYSTEMS, INC.",
"001F28": "HPN Supply Chain",
"001F29": "Hewlett-Packard Company",
"001F2A": "ACCM",
"001F2B": "Orange Logic",
"001F2C": "Starbridge Networks",
"001F2D": "Electro-Optical Imaging, Inc.",
"001F2E": "Triangle Research Int'l Pte Ltd",
"001F2F": "Berker GmbH & Co. KG",
"001F30": "Travelping",
"001F31": "Radiocomp",
"001F32": "Nintendo Co., Ltd.",
"001F33": "Netgear Inc.",
"001F34": "Lung Hwa Electronics Co., Ltd.",
"001F35": "AIR802 LLC",
"001F36": "Bellwin Information Co. Ltd.,",
"001F37": "Genesis I&C",
"001F38": "POSITRON",
"001F39": "Construcciones y Auxiliar de Ferrocarriles, S.A.",
"001F3A": "Hon Hai Precision Ind.Co., Ltd.",
"001F3B": "Intel Corporate",
"001F3C": "Intel Corporate",
"001F3D": "Qbit GmbH",
"001F3E": "RP-Technik e.K.",
"001F3F": "AVM GmbH",
"001F40": "Speakercraft Inc.",
"001F41": "Ruckus Wireless",
"001F42": "Etherstack plc",
"001F43": "ENTES ELEKTRONIK",
"001F44": "GE Transportation Systems",
"001F45": "Enterasys",
"001F46": "Nortel",
"001F47": "MCS Logic Inc.",
"001F48": "Mojix Inc.",
"001F49": "Eurosat Distribution Ltd",
"001F4A": "Albentia Systems S.A.",
"001F4B": "Lineage Power",
"001F4C": "Roseman Engineering Ltd",
"001F4D": "Segnetics LLC",
"001F4E": "ConMed Linvatec",
"001F4F": "Thinkware Co. Ltd.",
"001F50": "Swissdis AG",
"001F51": "HD Communications Corp",
"001F52": "UVT Unternehmensberatung fur Verkehr und Technik GmbH",
"001F53": "GEMAC Gesellschaft f\u00fcr Mikroelektronikanwendung Chemnitz mbH",
"001F54": "Lorex Technology Inc.",
"001F55": "Honeywell Security (China) Co., Ltd.",
"001F56": "DIGITAL FORECAST",
"001F57": "Phonik Innovation Co.,LTD",
"001F58": "EMH Energiemesstechnik GmbH",
"001F59": "Kronback Tracers",
"001F5A": "Beckwith Electric Co.",
"001F5B": "Apple",
"001F5C": "Nokia Danmark A/S",
"001F5D": "Nokia Danmark A/S",
"001F5E": "Dyna Technology Co.,Ltd.",
"001F5F": "Blatand GmbH",
"001F60": "COMPASS SYSTEMS CORP.",
"001F61": "Talent Communication Networks Inc.",
"001F62": "JSC \"Stilsoft\"",
"001F63": "JSC Goodwin-Europa",
"001F64": "Beijing Autelan Technology Inc.",
"001F65": "KOREA ELECTRIC TERMINAL CO., LTD.",
"001F66": "PLANAR LLC",
"001F67": "Hitachi,Ltd.",
"001F68": "Martinsson Elektronik AB",
"001F69": "Pingood Technology Co., Ltd.",
"001F6A": "PacketFlux Technologies, Inc.",
"001F6B": "LG Electronics",
"001F6C": "CISCO SYSTEMS, INC.",
"001F6D": "CISCO SYSTEMS, INC.",
"001F6E": "Vtech Engineering Corporation",
"001F6F": "Fujian Sunnada Communication Co.,Ltd.",
"001F70": "Botik Technologies LTD",
"001F71": "xG Technology, Inc.",
"001F72": "QingDao Hiphone Technology Co,.Ltd",
"001F73": "Teraview Technology Co., Ltd.",
"001F74": "Eigen Development",
"001F75": "GiBahn Media",
"001F76": "AirLogic Systems Inc.",
"001F77": "HEOL DESIGN",
"001F78": "Blue Fox Porini Textile",
"001F79": "Lodam Electronics A/S",
"001F7A": "WiWide Inc.",
"001F7B": "TechNexion Ltd.",
"001F7C": "Witelcom AS",
"001F7D": "embedded wireless GmbH",
"001F7E": "ARRIS Group, Inc.",
"001F7F": "Phabrix Limited",
"001F80": "Lucas Holding bv",
"001F81": "Accel Semiconductor Corp",
"001F82": "Cal-Comp Electronics & Communications Co., Ltd",
"001F83": "Teleplan Technology Services Sdn Bhd",
"001F84": "Gigle Semiconductor",
"001F85": "Apriva ISS, LLC",
"001F86": "digEcor",
"001F87": "Skydigital Inc.",
"001F88": "FMS Force Measuring Systems AG",
"001F89": "Signalion GmbH",
"001F8A": "Ellion Digital Inc.",
"001F8B": "Cache IQ",
"001F8C": "CCS Inc.",
"001F8D": "Ingenieurbuero Stark GmbH und Ko. KG",
"001F8E": "Metris USA Inc.",
"001F8F": "Shanghai Bellmann Digital Source Co.,Ltd.",
"001F90": "Actiontec Electronics, Inc",
"001F91": "DBS Lodging Technologies, LLC",
"001F92": "VideoIQ, Inc.",
"001F93": "Xiotech Corporation",
"001F94": "Lascar Electronics Ltd",
"001F95": "SAGEM COMMUNICATION",
"001F96": "APROTECH CO.LTD",
"001F97": "BERTANA SRL",
"001F98": "DAIICHI-DENTSU LTD.",
"001F99": "SERONICS co.ltd",
"001F9A": "Nortel Networks",
"001F9B": "POSBRO",
"001F9C": "LEDCO",
"001F9D": "CISCO SYSTEMS, INC.",
"001F9E": "CISCO SYSTEMS, INC.",
"001F9F": "Thomson Telecom Belgium",
"001FA0": "A10 Networks",
"001FA1": "Gtran Inc",
"001FA2": "Datron World Communications, Inc.",
"001FA3": "T&W Electronics(Shenzhen)Co.,Ltd.",
"001FA4": "ShenZhen Gongjin Electronics Co.,Ltd",
"001FA5": "Blue-White Industries",
"001FA6": "Stilo srl",
"001FA7": "Sony Computer Entertainment Inc.",
"001FA8": "Smart Energy Instruments Inc.",
"001FA9": "Atlanta DTH, Inc.",
"001FAA": "Taseon, Inc.",
"001FAB": "I.S HIGH TECH.INC",
"001FAC": "Goodmill Systems Ltd",
"001FAD": "Brown Innovations, Inc",
"001FAE": "Blick South Africa (Pty) Ltd",
"001FAF": "NextIO, Inc.",
"001FB0": "TimeIPS, Inc.",
"001FB1": "Cybertech Inc.",
"001FB2": "Sontheim Industrie Elektronik GmbH",
"001FB3": "2Wire",
"001FB4": "SmartShare Systems",
"001FB5": "I/O Interconnect Inc.",
"001FB6": "Chi Lin Technology Co., Ltd.",
"001FB7": "WiMate Technologies Corp.",
"001FB8": "Universal Remote Control, Inc.",
"001FB9": "Paltronics",
"001FBA": "BoYoung Tech. & Marketing, Inc.",
"001FBB": "Xenatech Co.,LTD",
"001FBC": "EVGA Corporation",
"001FBD": "Kyocera Wireless Corp.",
"001FBE": "Shenzhen Mopnet Industrial Co.,Ltd",
"001FBF": "Fulhua Microelectronics Corp. Taiwan Branch",
"001FC0": "Control Express Finland Oy",
"001FC1": "Hanlong Technology Co.,LTD",
"001FC2": "Jow Tong Technology Co Ltd",
"001FC3": "SmartSynch, Inc",
"001FC4": "ARRIS Group, Inc.",
"001FC5": "Nintendo Co., Ltd.",
"001FC6": "ASUSTek COMPUTER INC.",
"001FC7": "Casio Hitachi Mobile Comunications Co., Ltd.",
"001FC8": "Up-Today Industrial Co., Ltd.",
"001FC9": "CISCO SYSTEMS, INC.",
"001FCA": "CISCO SYSTEMS, INC.",
"001FCB": "NIW Solutions",
"001FCC": "Samsung Electronics Co.,Ltd",
"001FCD": "Samsung Electronics",
"001FCE": "QTECH LLC",
"001FCF": "MSI Technology GmbH",
"001FD0": "GIGA-BYTE TECHNOLOGY CO.,LTD.",
"001FD1": "OPTEX CO.,LTD.",
"001FD2": "COMMTECH TECHNOLOGY MACAO COMMERCIAL OFFSHORE LTD.",
"001FD3": "RIVA Networks Inc.",
"001FD4": "4IPNET, INC.",
"001FD5": "MICRORISC s.r.o.",
"001FD6": "<NAME>",
"001FD7": "TELERAD SA",
"001FD8": "A-TRUST COMPUTER CORPORATION",
"001FD9": "RSD Communications Ltd",
"001FDA": "Nortel Networks",
"001FDB": "Network Supply Corp.,",
"001FDC": "Mobile Safe Track Ltd",
"001FDD": "GDI LLC",
"001FDE": "Nokia Danmark A/S",
"001FDF": "Nokia Danmark A/S",
"001FE0": "EdgeVelocity Corp",
"001FE1": "Hon Hai Precision Ind. Co., Ltd.",
"001FE2": "Hon Hai Precision Ind. Co., Ltd.",
"001FE3": "LG Electronics",
"001FE4": "Sony Ericsson Mobile Communications",
"001FE5": "In-Circuit GmbH",
"001FE6": "Alphion Corporation",
"001FE7": "Simet",
"001FE8": "KURUSUGAWA Electronics Industry Inc,.",
"001FE9": "Printrex, Inc.",
"001FEA": "Applied Media Technologies Corporation",
"001FEB": "Trio Datacom Pty Ltd",
"001FEC": "Synapse \u00c9lectronique",
"001FED": "Tecan Systems Inc.",
"001FEE": "ubisys technologies GmbH",
"001FEF": "SHINSEI INDUSTRIES CO.,LTD",
"001FF0": "Audio Partnership",
"001FF1": "Paradox Hellas S.A.",
"001FF2": "VIA Technologies, Inc.",
"001FF3": "Apple",
"001FF4": "Power Monitors, Inc.",
"001FF5": "Kongsberg Defence & Aerospace",
"001FF6": "PS Audio International",
"001FF7": "Nakajima All Precision Co., Ltd.",
"001FF8": "Siemens AG, Sector Industry, Drive Technologies, Motion Control Systems",
"001FF9": "Advanced Knowledge Associates",
"001FFA": "Coretree, Co, Ltd",
"001FFB": "Green Packet Bhd",
"001FFC": "Riccius+Sohn GmbH",
"001FFD": "Indigo Mobile Technologies Corp.",
"001FFE": "HPN Supply Chain",
"001FFF": "Respironics, Inc.",
"002000": "LEXMARK INTERNATIONAL, INC.",
"002001": "DSP SOLUTIONS, INC.",
"002002": "SERITECH ENTERPRISE CO., LTD.",
"002003": "PIXEL POWER LTD.",
"002004": "YAMATAKE-HONEYWELL CO., LTD.",
"002005": "SIMPLE TECHNOLOGY",
"002006": "GARRETT COMMUNICATIONS, INC.",
"002007": "SFA, INC.",
"002008": "CABLE & COMPUTER TECHNOLOGY",
"002009": "PACKARD BELL ELEC., INC.",
"00200A": "SOURCE-COMM CORP.",
"00200B": "OCTAGON SYSTEMS CORP.",
"00200C": "ADASTRA SYSTEMS CORP.",
"00200D": "<NAME>",
"00200E": "SATELLITE TECHNOLOGY MGMT, INC",
"00200F": "TANBAC CO., LTD.",
"002010": "JEOL SYSTEM TECHNOLOGY CO. LTD",
"002011": "CANOPUS | |
import discord
import re
import csv
import random
import os
import datetime
from operator import itemgetter, attrgetter
from .utils import chat_formatting as chat
from .utils.dataIO import dataIO
from cogs.utils import checks
from discord.ext import commands
from . import hook as hook
class MCOCTools:
'''Tools for Marvel Contest of Champions'''
lookup_links = {
'event': (
'<http://simians.tk/MCOC-Sched>',
'[Tiny MCoC Schedule](https://docs.google.com/spreadsheets/d/e/2PACX-1vT5A1MOwm3CvOGjn7fMvYaiTKDuIdvKMnH5XHRcgzi3eqLikm9SdwfkrSuilnZ1VQt8aSfAFJzZ02zM/pubhtml?gid=390226786)',
'<NAME> Schedule',
'https://d2jixqqjqj5d23.cloudfront.net/assets/developer/imgs/icons/google-spreadsheet-icon.png'),
'rttl':(
'<https://drive.google.com/file/d/0B4ozoShtX2kFcDV4R3lQb1hnVnc/view>',
'[Road to the Labyrinth Opponent List](https://drive.google.com/file/d/0B4ozoShtX2kFcDV4R3lQb1hnVnc/view)',
'by Regal Empire {OG Wolvz}',
'http://svgur.com/s/48'),
'hook': (
'<http://hook.github.io/champions>',
'[hook/Champions by gabriel](http://hook.github.io/champions)',
'hook/champions for Collector',
'https://assets-cdn.github.com/favicon.ico'),
'spotlight': (
'<http://simians.tk/MCoCspotlight>',
'[MCOC Spotlight Dataset](http://simians.tk/MCoCspotlight)\nIf you would like to donate prestige, signatures or stats, join us at \n[CollectorDevTeam](https://discord.gg/BwhgZxk)'),
# 'marvelsynergy': (
# '<http://www.marvelsynergy.com/team-builder>',
# '[Marvel Synergy Team Builder](http://www.marvelsynergy.com/team-builder)',
# 'Marvel Synergy',
# 'http://www.marvelsynergy.com/images/marvelsynergy.png'),
'alsciende':(
'<https://alsciende.github.io/masteries/v10.0.1/#>',
'[Alsciende Mastery Tool](https://alsciende.github.io/masteries/v17.0.2/#)',
'by u/alsciende',
'https://images-ext-2.discordapp.net/external/ymdMNrkhO9L5tUDupbFSEmu-JK0X2bpV0ZE-VYTBICc/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/268829380262756357/b55ae7fc51d9b741450f949accd15fbe.webp?width=80&height=80'),
'simulator': (
'<http://simians.tk/msimSDF>',
'[-SDF- Mastery Simulator](http://simians.tk/msimSDF)'),
# 'streak': (
# '<http://simians.tk/-sdf-streak>'
# '[Infinite Streak](http://simians.tk/-sdf-streak)'),
# #'http://simians.tk/SDFstreak')
}
mcolor = discord.Color.red()
COLLECTOR_ICON='https://raw.githubusercontent.com/JasonJW/mcoc-cogs/master/mcoc/data/cdt_icon.png'
icon_sdf = 'https://raw.githubusercontent.com/JasonJW/mcoc-cogs/master/mcoc/data/sdf_icon.png'
dataset = 'data/mcoc/masteries.csv'
def __init__(self, bot):
self.bot = bot
def present(self, lookup):
em=discord.Embed(color=self.mcolor,title='',description=lookup[1])
print(len(lookup))
if len(lookup) > 2:
em.set_footer(text=lookup[2],icon_url=lookup[3])
else:
em.set_footer(text='CollectorDevTeam',icon_url=self.COLLECTOR_ICON)
return em
@commands.command(pass_context=True,aliases={'collector','infocollector','about'})
async def aboutcollector(self,ctx):
"""Shows info about Collector"""
author_repo = "https://github.com/Twentysix26"
red_repo = author_repo + "/Red-DiscordBot"
server_url = "https://discord.gg/wJqpYGS"
dpy_repo = "https://github.com/Rapptz/discord.py"
python_url = "https://www.python.org/"
collectorpatreon = 'https://patreon.com/collectorbot'
since = datetime.datetime(2016, 1, 2, 0, 0)
days_since = (datetime.datetime.utcnow() - since).days
dpy_version = "[{}]({})".format(discord.__version__, dpy_repo)
py_version = "[{}.{}.{}]({})".format(*os.sys.version_info[:3],
python_url)
owner_set = self.bot.settings.owner is not None
owner = self.bot.settings.owner if owner_set else None
if owner:
owner = discord.utils.get(self.bot.get_all_members(), id=owner)
if not owner:
try:
owner = await self.bot.get_user_info(self.bot.settings.owner)
except:
owner = None
if not owner:
owner = "Unknown"
about = (
"Collector is an instance of [Red, an open source Discord bot]({0}) "
"created by [Twentysix]({1}) and improved by many.\n\n"
"The Collector Dev Team is backed by a passionate community who contributes and "
"creates content for everyone to enjoy. [Join us today]({2}) "
"and help us improve!\n\n"
"★ If you would like to support the Collector, please visit {3}.\n"
"★ Patrons and Collaborators recieve priority support and secrety stuff.\n\n~ JJW"
"".format(red_repo, author_repo, server_url, collectorpatreon))
devteam = ( "DeltaSigma#8530\n"
"JJW#8071\n"
)
supportteam=('phil_wo#3733\nSpiderSebas#9910\nsuprmatt#2753\ntaoness#5565\nOtriux#9964')
embed = discord.Embed(colour=discord.Colour.red(), title="Collector", url=collectorpatreon)
embed.add_field(name="Instance owned by", value=str(owner))
embed.add_field(name="Python", value=py_version)
embed.add_field(name="discord.py", value=dpy_version)
embed.add_field(name="About", value=about, inline=False)
embed.add_field(name="PrestigePartner",value='mutamatt#4704',inline=True)
embed.add_field(name='DuelsPartners',value='2OO2RC51#4587',inline=True)
embed.add_field(name='MapsPartners',value='jpags#5202\nBlooregarde#5848 ',inline=True)
embed.add_field(name='LabyrinthTeam',value='Kiryu#5755\nre-1#7595',inline=True)
embed.add_field(name='CollectorSupportTeam', value=supportteam,inline=True)
embed.add_field(name="CollectorDevTeam",value=devteam,inline=True)
embed.set_footer(text="Bringing joy since 02 Jan 2016 (over "
"{} days ago!)".format(days_since))
try:
await self.bot.say(embed=embed)
except discord.HTTPException:
await self.bot.say("I need the `Embed links` permission "
"to send this")
# @checks.admin_or_permissions(manage_server=True)
# @commands.command()
# async def tickets(self):
# ticketsjson = 'data/tickets/tickets.json'
# tickets = dataIO.load_json(ticketsjson)
# em = discord.Embed(title='Tickets')
# cnt = 0
# ids = tickets.keys()
#
# for ticket in :
# em.add_field(name='{} - filed by {}'.format(cnt, ticket['name'],value='{}\n id: {}'.format(ticket['message'],ticket)))
# await self.bot.say(embed=em)
@commands.command(help=lookup_links['event'][0], aliases=['events','schedule',])
async def event(self):
x = 'event'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['spotlight'][0],)
async def spotlight(self):
x = 'spotlight'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['rttl'][0],)
async def rttl(self):
x = 'rttl'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['simulator'][0],aliases=['msim'])
async def simulator(self):
x = 'simulator'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['alsciende'][0], aliases=('mrig',))
async def alsciende(self):
x = 'alsciende'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['hook'][0])
async def hook(self):
x = 'hook'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
# @commands.command()
# async def keygen(self, prefix='SDCC17'):
# '''SDCC Code Generator
# No warranty :)'''
# letters='ABCDEFGHIJKLMNOPQURSTUVWXYZ'
# numbers='0123456789'
# package = []
# for i in range(0,9):
# lets='{}{}{}{}{}{}'.format(random.choice(letters),random.choice(letters),random.choice(numbers),random.choice(numbers),random.choice(letters),random.choice(letters))
# package.append(prefix+lets)
# em=discord.Embed(color=discord.Color.gold(),title='Email Code Generator',description='\n'.join(package))
# await self.bot.say(embed=em)
def _get_text(self, mastery, rank):
rows = csv_get_rows(self.dataset,'Mastery',mastery)
for row in rows:
text.append(row['Text'].format(row[str(rank)]))
return text
@checks.admin_or_permissions(manage_server=True, manage_roles=True)
@commands.command(name='gaps', pass_context=True, hidden=True)
async def _alliance_popup(self, ctx, *args):
'''Guild | Alliance Popup System'''
warning_msg =('The G.A.P.S. System will configure your server for basic Alliance Operations.'
'Roles will be added for summoners, alliance, officers, bg1, bg2, bg3'
'Channels will be added for announcements, alliance, & battlegroups.'
'Channel permissions will be configured.'
'After the G.A.P.S. system prepares your server, there will be additional instructions.'
'If you consent, press OK')
em = discord.Embed(color=ctx.message.author.color, title='G.A.P.S. Warning Message', description=warning_msg)
message = await self.bot.say(embed=em)
await self.bot.add_reaction(message, '❌')
await self.bot.add_reaction(message, '🆗')
react = await self.bot.wait_for_reaction(message=message, user=ctx.message.author, timeout=30, emoji=['❌', '🆗'])
if react is not None:
if react.reaction.emoji == '❌':
await self.bot.say('G.A.P.S. canceled.')
return
elif react.reaction.emoji == '🆗':
message2 = await self.bot.say('G.A.P.S. in progess.')
else:
await self.bot.say('Ambiguous response. G.A.P.S. canceled')
return
server = ctx.message.server
adminpermissions = discord.PermissionOverwrite(administrator=True)
moderatorpermissions = discord.PermissionOverwrite(manage_roles=True)
moderatorpermissions.manage_server=True
moderatorpermissions.kick_members=True
moderatorpermissions.ban_members=True
moderatorpermissions.manage_channels=True
moderatorpermissions.manage_server=True
moderatorpermissions.manage_messages=True
moderatorpermissions.view_audit_logs=True
moderatorpermissions.read_messages=True
moderatorpermissions.create_instant_invite=True
roles = server.roles
rolenames = []
for r in roles:
rolenames.append('{}'.format(r.name))
aroles = ['officers', 'bg1', 'bg2', 'bg3', 'alliance', 'summoners']
# message = await self.bot.say('Stage 1: Creating roles')
if 'admin' not in rolenames:
admin = await self.bot.create_role(server=server, name='admin', color=discord.Color.gold(), hoist=False, mentionable=False)
if 'officers' not in rolenames:
officers = await self.bot.create_role(server=server, name='officers', color=discord.Color.light_grey(), hoist=False, mentionable=True)
if 'bg1' not in rolenames:
bg1 = await self.bot.create_role(server=server, name='bg1', color=discord.Color.blue(), hoist=False, mentionable=True)
if 'bg2' not in rolenames:
bg2 = await self.bot.create_role(server=server, name='bg2', color=discord.Color.purple(), hoist=False, mentionable=True)
if 'bg3' not in rolenames:
bg3 = await self.bot.create_role(server=server, name='bg3', color=discord.Color.orange(), hoist=False, mentionable=True)
if 'alliance' not in rolenames:
alliance = await self.bot.create_role(server=server, name='alliance', color=discord.Color.teal(), hoist=True, mentionable=True)
if 'summoners' not in rolenames:
summoners = await self.bot.create_role(server=server, name='summoners', color=discord.Color.lighter_grey(), hoist=True, mentionable=True)
roles = sorted(server.roles, key=lambda roles:roles.position, reverse=True)
em = discord.Embed(color=discord.Color.red(), title='Guild Alliance Popup System', description='')
positions = []
for r in roles:
positions.append('{} = {}'.format(r.position, r.mention))
if r.name == 'officers':
officers = r
elif r.name == 'bg1':
bg1 = r
elif r.name == 'bg2':
bg2 = r
elif r.name == 'bg3':
bg3 = r
elif r.name == 'alliance':
alliance = r
elif r.name == 'summoners':
summoners = r
elif r.name == 'admin':
admin = r
elif r.name=='everyone':
everyone = r
em.add_field(name='Stage 1 Role Creation',value='\n'.join(positions),inline=False)
await self.bot.say(embed=em)
everyone_perms = discord.PermissionOverwrite(read_messages = False)
everyoneperms = discord.ChannelPermissions(target=server.default_role, overwrite=everyone_perms)
readperm = discord.PermissionOverwrite(read_messages = True)
officerperms = discord.ChannelPermissions(target=officers, overwrite=readperm)
allianceperms = discord.ChannelPermissions(target=alliance, overwrite=readperm)
summonerperms = discord.ChannelPermissions(target=summoners, overwrite=readperm)
bg1perms = discord.ChannelPermissions(target=bg1, overwrite=readperm)
bg2perms = discord.ChannelPermissions(target=bg2, overwrite=readperm)
bg3perms = discord.ChannelPermissions(target=bg3, overwrite=readperm)
channellist = []
for c in server.channels:
channellist.append(c.name)
if 'announcements' not in channellist:
await self.bot.create_channel(server, 'announcements', everyoneperms, allianceperms, summonerperms)
# if 'alliance' not in channellist:
# await self.bot.create_channel(server, 'alliance', everyoneperms, allianceperms)
if 'alliance-chatter' not in channellist:
await self.bot.create_channel(server, 'alliance-chatter', everyoneperms, allianceperms)
if 'officers' not in channellist:
await self.bot.create_channel(server, 'officers', everyoneperms, officerperms)
if 'bg1aq' not in channellist:
await self.bot.create_channel(server, 'bg1aq', everyoneperms, officerperms, bg1perms)
if 'bg1aw' not in channellist:
await self.bot.create_channel(server, 'bg1aw', everyoneperms, officerperms, bg1perms)
if 'bg2aq' not in channellist:
await self.bot.create_channel(server, 'bg2aq', everyoneperms, officerperms, bg2perms)
if 'bg2aw' not in channellist:
await self.bot.create_channel(server, 'bg2aw', everyoneperms, officerperms, bg2perms)
if 'bg3aq' not in channellist:
await self.bot.create_channel(server, 'bg3aq', everyoneperms, officerperms, bg3perms)
if 'bg3aw' not in channellist:
await self.bot.create_channel(server, 'bg3aw', everyoneperms, officerperms, bg2perms)
channels= sorted(server.channels, key=lambda channels:channels.position, reverse=False)
channelnames=[]
for c in channels:
channelnames.append('{} = {} '.format(c.position, c.mention))
em = discord.Embed(color=discord.Color.red(), title='Guild Alliance Popup System', description='')
em.add_field(name='Stage 2 Create Channels',value='\n'.join(channelnames),inline=False)
await self.bot.say(embed=em)
em = discord.Embed(color=discord.Color.red(), titel= 'Guild Alliance Popup System', descritpion='')
# fixNotifcations = await self.bot.say('Stage 3: Attempting to set Default Notification to Direct Message Only')
try:
# mentions only
await self.bot.http.request(discord.http.Route('PATCH', '/guilds/{guild_id}', guild_id=server.id), json={'default_message_notifications': 1})
em.add_field(name='Stage 3: Notification Settings', value='I have modified the servers to use better notification settings.')
except Exception as e:
await self.bot.edit_message(fixNotifcations, "An exception occurred. check your log.")
await self.bot.say(embed=em)
em = discord.Embed(color=ctx.message.author.color, titel= 'Guild Alliance Popup System', descritpion='Server Owner Instructions')
em.add_field(name='Enroll for Collector announcements', value='Enroll a channel for Collector announcements\n```/addchan #announcements```\n', inline=False)
em.add_field(name='Set up Autorole', value='Default Role should be {}\n```/autorole role summoners```\n```/autorole toggle``` '.format(summoners.mention), inline=False)
await self.bot.say(embed=em)
await self.bot.delete_message(message2)
# @checks.is_owner()
# @commands.group(pass_context=True, hidden=True)
# async def inspect(self, ctx):
# @checks.is_owner()
@commands.command(pass_context=True, hidden=True, name='inspectroles', aliases=['inspectrole', 'ir',])
async def _inspect_roles(self, ctx):
server = ctx.message.server
roles = sorted(server.roles, key=lambda roles:roles.position, reverse=True)
positions = []
for r in roles:
positions.append('{} = {}'.format(r.position, r.name))
desc = '\n'.join(positions)
em = discord.Embed(color=discord.Color.red(), title='Collector Inspector: ROLES', description=desc)
await self.bot.say(embed=em)
@checks.admin_or_permissions(manage_roles=True)
@commands.command(name='norole',pass_context=True,hidden=True)
async def _no_role(self, ctx, role : discord.Role):
members = ctx.message.server.members
missing = []
print(str(len(missing)))
for | |
from functools import reduce
from typing import List, Tuple, Any, Dict, Optional
from copy import deepcopy
from time import time
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import gym
import regym
from regym.rl_algorithms.agents import Agent
from regym.environments.tasks import RegymAsyncVectorEnv
from regym.rl_loops.utils import update_parallel_sequential_trajectories, update_finished_trajectories
from regym.rl_loops.utils import agents_to_update_finished_trajectory_sequential_env
from regym.rl_loops.utils import extract_latest_experience_sequential_trajectory
from regym.rl_loops.trajectory import Trajectory
def async_run_episode(env: RegymAsyncVectorEnv,
agent_vector: List[Agent],
training: bool,
num_episodes: int,
show_progress: bool = False,
summary_writer: Optional[SummaryWriter] = None,
initial_episode: int = 0,
store_extra_information: bool = False) \
-> List[Trajectory]:
'''
Runs :param: num_episodes of asynchronous environment :param: env
with agents specified in :param: agent_vector.
For model-free agents, observations are batched, to be easily
managed by neural networks. For model-based agents, an array of environment
copies is handed over to the agent.
NOTES:
- Currently the :param: env runs `env.num_envs` asynchronous environments.
Because more than one trajectory can finish at the same time,
this function can return a number of trajectories in the range:
$[num_episodes, num_episodes + (env.num_envs - 1)]$
- Because some environments will be in the middle of an episode when
this function returns, those trajectories won't appear in the output
of this function, even though they have been processed by agents in
:param: agent_vector.
:param env: RegymAsyncVectorEnv where agents will play
:param agent_vector: Vector containing agent policies
:param training: Whether to propagate experiences to agents
in :param: agent_vector
:param num_episodes: Number of target episodes to run environment for
:param show_progress: Whether to output a progress bar to stdout
:param summary_writer: Summary writer to which log various metrics
:param initial_episode: Initial episode
:param store_extra_information: Whether to have each Timestep in output
trajectory store information about the
agents that acted on it.
:returns: List of environment trajectories experienced during simulation.
'''
# Initialize trajectories
ongoing_trajectories = [Trajectory(env_type=regym.environments.EnvType.MULTIAGENT_SEQUENTIAL_ACTION,
num_agents=len(agent_vector))
for _ in range(env.num_envs)]
finished_trajectories: List[Trajectory] = []
(store_extra_information,
current_players,
legal_actions,
num_agents,
obs) = create_environment_variables(env, agent_vector, store_extra_information)
if show_progress:
progress_bar = create_progress_bar(env, agent_vector, training, num_episodes, initial_episode)
if summary_writer:
# TODO: not sure these actually do what they claim
logged_trajectories = 0
action_time, handling_experience_time, env_step_time = 0., 0., 0.
start_time = time()
while len(finished_trajectories) < num_episodes:
# Take action
if summary_writer: action_time_start = time()
action_vector = multienv_choose_action(
agent_vector, env, obs, current_players, legal_actions)
if summary_writer: action_time += time() - action_time_start
# Environment step
if summary_writer: env_step_time_start = time()
succ_obs, rewards, dones, infos = env.step(action_vector)
if summary_writer: env_step_time += time() - env_step_time_start
# Update trajectories:
if summary_writer:
handling_experience_time_start = time()
update_parallel_sequential_trajectories(ongoing_trajectories,
agent_vector, action_vector,
obs, rewards, succ_obs, dones,
current_players, store_extra_information)
if summary_writer:
handling_experience_time += time() - handling_experience_time_start
# Update agents
if training:
propagate_experiences(agent_vector, ongoing_trajectories, store_extra_information)
# Update observation
obs = succ_obs
# Update current players and legal actions
legal_actions = [info.get('legal_actions', None) for info in infos]
current_players = [info.get('current_player',
(current_players[e_i] + 1) % num_agents)
for e_i, info in enumerate(infos)]
# Handle with episode termination
done_envs = [i for i in range(len(dones)) if dones[i]]
if len(done_envs) > 0:
# TODO: Figure out a way of nicely refactoring this
if summary_writer: handling_experience_time_start = time()
finished_trajectories, ongoing_trajectories, current_players = \
handle_finished_episodes(
training,
agent_vector,
ongoing_trajectories,
done_envs,
finished_trajectories,
current_players,
store_extra_information
)
if summary_writer: handling_experience_time += time() - handling_experience_time_start
if show_progress: progress_bar.update(len(done_envs))
if summary_writer:
logged_trajectories = log_end_of_episodes(
summary_writer,
finished_trajectories,
logged_trajectories,
initial_episode,
start_time,
action_time,
handling_experience_time,
env_step_time,
)
action_time, handling_experience_time, env_step_time = 0., 0., 0.
if show_progress: progress_bar.close()
return finished_trajectories
def log_end_of_episodes(summary_writer: SummaryWriter,
finished_trajectories: List[Trajectory],
logged_trajectories: int,
initial_episode: int,
start_time: float,
action_time: float,
handling_experience_time: float,
env_step_time: float):
'''
Writes to :param: summary_writer logs about :param: finished_trajectories
:param logged_trajectories: More than 1 trajectory can be finished
concurrently, but we want one datapoint
to log for each one, so we have to keep
track of how many we've logged.
'''
finished_trajectories_lengths = list(map(lambda t: len(t), finished_trajectories))
for i in range(logged_trajectories, len(finished_trajectories)):
summary_writer.add_scalar('PerEpisode/Episode_length', finished_trajectories_lengths[i],
initial_episode + (i+1))
summary_writer.add_scalar('PerEpisode/Mean_episode_length', np.mean(finished_trajectories_lengths[:(i+1)]),
initial_episode + (i+1))
summary_writer.add_scalar('PerEpisode/Std_episode_length', np.std(finished_trajectories_lengths[:(i+1)]),
initial_episode + (i+1))
# Not sure if calculation is correct
avg_time_per_episode = (time() - start_time) / len(finished_trajectories)
summary_writer.add_scalar('Timing/Mean_time_per_episode', avg_time_per_episode,
initial_episode + (i+1))
summary_writer.add_scalar('Timing/Take_action_time_taken', action_time,
initial_episode + (i+1))
summary_writer.add_scalar('Timing/Handling_experience_time_taken', handling_experience_time,
initial_episode + (i+1))
summary_writer.add_scalar('Timing/Env_step_time_taken', env_step_time,
initial_episode + (i+1))
return len(finished_trajectories)
def multienv_choose_action(agent_vector: List[Agent],
env: RegymAsyncVectorEnv, obs,
current_players: List[int],
legal_actions: Dict[int, List[int]]) -> List[int]:
'''
Choose an action for each environment in (multienv) :param: env from
agents in :param: agent_vector, constrained by :param: legal_actions, as
prescribed by :param: current_players.
:param: observations and :param: legal_actions from multiple environments
where the same agent is meant to act will be batched to a single
`Agent.take_action` call to reduce computational overhead.
:param agent_vector: Vector containing agent policies
:param env: RegymAsyncVectorEnv where agents are acting
:param obs: TODO
:param current_players: List indicating which agent should act on
each environment
:param legal_actions: Dict indicating which actions are allowed on each
environment
:returns: Vector containing one action to be executed on each environment
'''
action_vector: List[int] = [None] * env.num_envs
agent_signals = extract_signals_for_acting_agents(
agent_vector, obs, current_players, legal_actions)
for a_i, signals in agent_signals.items():
a = agent_vector[a_i]
partial_action_vector = compute_partial_action_vector(a, signals,
env, a_i)
for env_id, action in zip(signals['env_ids'], partial_action_vector):
assert action_vector[env_id] is None, 'Attempt to override an action'
action_vector[env_id] = action
return action_vector
def compute_partial_action_vector(agent: Agent,
signals: Dict[str, Any],
env: RegymAsyncVectorEnv,
agent_index: int) -> List[int]:
'''
:param agent_vector: Vector containing agent policies
:param signals: Environment signals per agent required to take actions
:param env: RegymAsyncVectorEnv where agents are acting
:returns: Actions to be taken by :param: agent
'''
if not agent.requires_environment_model:
partial_action_vector = agent.model_free_take_action(
signals['obs'], legal_actions=signals['legal_actions'],
multi_action=True)
else:
envs = env.get_envs()
relevant_envs = {e_i: envs[e_i] for e_i in signals['env_ids']}
observations = {e_i: o for e_i, o in zip(signals['env_ids'], signals['obs'])}
partial_action_vector = agent.model_based_take_action(
relevant_envs, observations, agent_index, multi_action=True)
return partial_action_vector
def handle_finished_episodes(training: bool, agent_vector: List[Agent],
ongoing_trajectories: List[Trajectory],
done_envs: List[int],
finished_trajectories: List[Trajectory],
current_players: List[int],
store_extra_information: bool) \
-> Tuple[List[Trajectory], List[Trajectory]]:
if training:
propagate_last_experiences(agent_vector, ongoing_trajectories,
done_envs, store_extra_information)
# Reset players and trajectories
# Why are we returning ongoing trajectories twice?
ongoing_trajectories, finished_trajectories = update_finished_trajectories(
ongoing_trajectories, finished_trajectories, done_envs)
current_players = reset_current_players(done_envs, current_players)
return finished_trajectories, ongoing_trajectories, current_players
def reset_current_players(done_envs: List[int],
current_players: List[int]) -> List[int]:
for i, e_i in enumerate(done_envs):
current_players[e_i] = 0
return current_players
def propagate_experiences(agent_vector: List[Agent],
trajectories: List[Trajectory],
store_extra_information: bool = False):
'''
Batch propagates experiences from :param: trajectories to each
corresponding agent in :param: agent_vector.
ASSUMES that turns are taken in clockwise fashion:
- Player 1 acts, player 2 acts..., player n acts, player 1 acts...
- where n is the length of :param: agent_vector
'''
agent_to_update_per_env = {i: len(t) % len(agent_vector)
for i, t in enumerate(trajectories)
if len(t) >= len(agent_vector)}
if agent_to_update_per_env == {}: # No agents to update
return
agents_to_update = set(agent_to_update_per_env.values())
environment_per_agents = {a_i: [env_i
for env_i, a_j in agent_to_update_per_env.items()
if a_i == a_j]
for a_i in agents_to_update}
agent_experiences = collect_agent_experiences_from_trajectories(
agents_to_update,
agent_to_update_per_env,
trajectories,
agent_vector,
store_extra_information)
propagate_batched_experiences(agent_experiences,
agent_vector,
environment_per_agents)
def propagate_batched_experiences(agent_experiences: Dict[int, List[Tuple]],
agent_vector: List[Agent],
environment_per_agents: Dict[int, List[int]]):
'''
Propagates :param: agent_experiences to the corresponding agents in
:param: agent_vector, as dictated by :param: environment_per_agents
'''
for a_i, experiences in agent_experiences.items():
if agent_vector[a_i].training:
agent_vector[a_i].handle_multiple_experiences(
experiences, environment_per_agents[a_i])
def propagate_last_experiences(agent_vector: List[Agent],
trajectories: List[Trajectory],
done_envs: List[int],
store_extra_information: bool):
''' TODO '''
agents_to_update_per_env = compute_agents_to_update_per_env(
trajectories, done_envs, agent_vector)
agents_to_update = set(reduce(lambda acc, x: acc + x,
agents_to_update_per_env.values(), []))
environment_per_agents = {a_i: [env_i
for env_i, agent_ids in agents_to_update_per_env.items()
if a_i in agent_ids]
for a_i in agents_to_update}
agent_experiences = {a_i: [] for a_i in agents_to_update}
# Potential refactoring by using `collect_agent_experiences_from_trajectories`
for a_i, envs in environment_per_agents.items():
for e_i in envs:
(o, a, r, succ_o, d, extra_info) = extract_latest_experience_sequential_trajectory(
a_i, trajectories[e_i], store_extra_information)
assert d, f'Episode should in environment {e_i} should be finished'
agent_experiences[a_i] += [(o, a, r, succ_o, True, extra_info)]
propagate_batched_experiences(agent_experiences,
agent_vector, environment_per_agents)
def compute_agents_to_update_per_env(trajectories: List[Trajectory], done_envs, agent_vector):
num_agents = len(agent_vector)
agents_to_update_per_env = {
done_e_i: agents_to_update_finished_trajectory_sequential_env(
len(trajectories[done_e_i]), num_agents)
for done_e_i in done_envs
if len(trajectories[done_e_i]) >= len(agent_vector)} # is this check necessary?
return agents_to_update_per_env
def collect_agent_experiences_from_trajectories(agents_to_update: List[int],
agent_to_update_per_env: Dict[int, int],
trajectories: List[List],
agent_vector: List[Agent],
store_extra_information: bool) \
-> Dict[int, Any]:
'''
Collects the latests experience from :param: trajectories, for each
:param: agents_to_update. Each agent collects experiences according to
:param: agent_to_update_per_env.
:param agents_to_update: List of all agents that need to | |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian
from a2c_ppo_acktr.utils import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, base=None, base_kwargs=None, use_pnn=False):
super(Policy, self).__init__()
if base_kwargs is None:
base_kwargs = {}
if base is None:
if use_pnn:
base = PNNConvBase
elif len(obs_shape) == 3:
base = CNNBase
elif len(obs_shape) == 1:
base = MLPBase
else:
raise NotImplementedError
self.base = base(obs_shape[0], **base_kwargs)
if action_space.__class__.__name__ == "Discrete":
num_outputs = action_space.n
self.dist = Categorical(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "Box":
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "MultiBinary":
num_outputs = action_space.shape[0]
self.dist = Bernoulli(self.base.output_size, num_outputs)
else:
raise NotImplementedError
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def act(self, inputs, rnn_hxs, masks, deterministic=False):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRU(recurrent_input_size, hidden_size)
for name, param in self.gru.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, hxs = self.gru(
x[start_idx:end_idx],
hxs * masks[start_idx].view(1, -1, 1))
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = hxs.squeeze(0)
return x, hxs
class CNNBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.main = nn.Sequential(
init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = self.main(inputs / 255.0)
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
class MLPBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
##### Added for CCM #######
class ScaleLayer(nn.Module):
def __init__(self, init_value=1e-3):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, x):
return x * self.scale
class PNNBase(NNBase):
def __init__(self, t, recurrent=False, hidden_size=512):
super(PNNBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.conv1 = init_(nn.Conv2d(t[0][0], t[0][1], t[0][2], stride=t[0][3]))
self.conv2 = init_(nn.Conv2d(t[1][0], t[1][1], t[1][2], stride=t[1][3]))
self.conv3 = init_(nn.Conv2d(t[2][0], t[2][1], t[2][2], stride=t[2][3]))
self.fc = init_(nn.Linear(t[3][0], t[3][1]))
self.mp = None
self.relu = nn.ReLU()
self.flatten = Flatten()
self.topology = [
[t[1][2], t[1][3]],
[t[2][2], t[2][3]],
t[3][1]
]
self.output_shapes = [x[1] for x in t]
self.input_shapes = [x[0] for x in t]
def layers(self, i, x):
if i == 0:
if not self.mp:
return self.relu(self.conv1(x))
else:
return self.mp(self.relu(self.conv1(x)))
elif i == 1:
return self.relu(self.conv2(x))
elif i == 2:
return self.relu(self.conv3(x))
elif i == 3:
return self.fc(self.flatten(x))
def forward(self, x):
outs = []
for i in range(4):
x = self.layers(i, x)
outs.append(x)
return outs
class PNNColumnAtari(PNNBase): # Use this for atari environments
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
t = [[num_inputs, 32, 8, 4], [32, 64, 4, 2], [64, 32, 3, 1], [32 * 7 * 7, hidden_size]]
# [n_input, n_output, fsize, stride] for c1, c2, c3 and [n_input, n_output] for FC
super(PNNColumnAtari, self).__init__(t, recurrent, hidden_size)
class PNNColumnGrid(PNNBase): # Use this for grid environments
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
t = [[num_inputs, 16, 2, 1], [16, 32, 2, 1], [32, 64, 2, 1], [64, 64]]
super(PNNColumnGrid, self).__init__(t, recurrent, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.mp = nn.MaxPool2d((2, 2))
self.fc = nn.Sequential(
init_(nn.Linear(hidden_size, 64)),
nn.Tanh(),
self.fc
)
class PNNConvBase(NNBase):
def __init__(self, num_inputs, recurrent=False, grid=False, hidden_size=512):
super(PNNConvBase, self).__init__(recurrent, hidden_size, hidden_size)
self.columns = nn.ModuleList([])
self.num_inputs = num_inputs
self.hidden_size = hidden_size
self.recurrent = recurrent
self.alpha = nn.ModuleList([])
self.V = nn.ModuleList([])
self.U = nn.ModuleList([])
self.flatten = Flatten()
self.grid = grid
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if grid:
self.critic_linear = nn.Sequential(
init_(nn.Linear(self.hidden_size, 64)),
nn.Tanh(),
init_(nn.Linear(64, 1))
)
else:
self.critic_linear = init_(nn.linear(self.hidden_size,1))
self.train()
self.n_layers = 4
def forward(self, x, rnn_hxs, masks):
assert self.columns, 'PNN should at least have one column (missing call to `new_task` ?)'
# x = (x / 255.0)
inputs = [self.columns[i].layers(0, x) for i in range(len(self.columns))]
for l in range(1, self.n_layers):
outputs = [self.columns[0].layers(l, inputs[0])]
for c in range(1, len(self.columns)):
pre_col = inputs[c - 1]
cur_out = self.columns[c].layers(l, inputs[c])
a = self.alpha[c - 1][l - 1]
a_h = F.relu(a(pre_col))
V = self.V[c - 1][l - 1]
V_a_h = F.relu(V(a_h))
U = self.U[c - 1][l - 1]
if l == self.n_layers - 1: # FC layer
V_a_h = self.flatten(V_a_h)
U_V_a_h = U(V_a_h)
out = F.relu(cur_out + U_V_a_h)
outputs.append(out)
else:
U_V_a_h = U(V_a_h) # conv layers
out = F.relu(cur_out + U_V_a_h)
outputs.append(out)
inputs = outputs
x = inputs[-1]
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
def new_task(self): # adds a new column to pnn
if self.grid:
new_column = PNNColumnGrid(self.num_inputs, self.recurrent, self.hidden_size)
else:
new_column = PNNColumnAtari(self.num_inputs, self.recurrent, self.hidden_size)
self.columns.append(new_column)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if len(self.columns) > 1:
pre_col, col = self.columns[-2], self.columns[-1]
a_list = []
V_list = []
U_list = []
for l in range(1, self.n_layers):
a = ScaleLayer(0.01)
map_in = pre_col.output_shapes[l - 1]
map_out = int(map_in / 2)
v = init_(nn.Conv2d(map_in, map_out, 1))
if l != self.n_layers - 1: # conv -> conv, last layer
cur_out = col.output_shapes[l]
size, stride = pre_col.topology[l - 1]
u = init_(nn.Conv2d(map_out, cur_out, size, stride=stride))
else:
input_size = int(col.input_shapes[-1] / 2)
hidden_size = self.hidden_size
u = init_(nn.Linear(input_size, hidden_size))
a_list.append(a)
V_list.append(v)
U_list.append(u)
a_list = nn.ModuleList(a_list)
V_list = nn.ModuleList(V_list)
U_list = nn.ModuleList(U_list)
self.alpha.append(a_list)
self.V.append(V_list)
self.U.append(U_list)
def freeze_columns(self, skip=None): # freezes the weights of previous columns
if skip is None:
skip = []
for i, c in enumerate(self.columns):
if i not in skip:
for params in c.parameters():
params.requires_grad = False
def parameters(self, col=None):
if col is None:
return super(PNNConvBase, self).parameters()
| |
import csv
import time
from datetime import timedelta
from decimal import Decimal
from io import BytesIO, StringIO
import tracker.models.fields
from django.contrib import messages
from django.contrib.admin import register
from django.contrib.auth import models as auth
from django.contrib.auth.decorators import permission_required, user_passes_test
from django.core.files.storage import DefaultStorage
from django.core.validators import EmailValidator
from django.db.models import Sum
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, redirect
from django.urls import reverse, path
from django.utils.html import format_html
from django.views.decorators.csrf import csrf_protect
from tracker import models, search_filters, forms
from .filters import RunListFilter
from .forms import (
EventForm,
PostbackURLForm,
RunnerAdminForm,
SpeedRunAdminForm,
StartRunForm,
TestEmailForm,
)
from .util import CustomModelAdmin
from ..auth import send_registration_mail
@register(models.Event)
class EventAdmin(CustomModelAdmin):
form = EventForm
search_fields = ('short', 'name')
list_display = ['name', 'locked', 'allow_donations']
list_editable = ['locked', 'allow_donations']
readonly_fields = ['scheduleid', 'bids']
fieldsets = [
(
None,
{
'fields': [
'short',
'name',
'hashtag',
'receivername',
'targetamount',
'use_one_step_screening',
'minimumdonation',
'auto_approve_threshold',
'datetime',
'timezone',
'locked',
'allow_donations',
]
},
),
(
'Paypal',
{
'classes': ['collapse'],
'fields': ['paypalemail', 'paypalcurrency', 'paypalimgurl'],
},
),
(
'Donation Autoreply',
{
'classes': ['collapse',],
'fields': [
'donationemailsender',
'donationemailtemplate',
'pendingdonationemailtemplate',
],
},
),
(
'Prize Management',
{
'classes': ['collapse',],
'fields': [
'prize_accept_deadline_delta',
'prize_drawing_date',
'prizecoordinator',
'allowed_prize_countries',
'disallowed_prize_regions',
'prizecontributoremailtemplate',
'prizewinneremailtemplate',
'prizewinneracceptemailtemplate',
'prizeshippedemailtemplate',
],
},
),
('Google Document', {'classes': ['collapse'], 'fields': ['scheduleid']}),
('Bids', {'fields': ('bids',)}),
]
def bids(self, instance):
if instance.id is not None:
return format_html(
'<a href="{u}?event={id}">View</a>',
u=(reverse('admin:tracker_bid_changelist',)),
id=instance.id,
)
else:
return 'Not Saved Yet'
def get_urls(self):
return [
path(
'send_volunteer_emails/<int:pk>',
self.admin_site.admin_view(self.send_volunteer_emails_view),
name='send_volunteer_emails',
),
path('ui/', self.admin_site.admin_view(self.ui_view), name='tracker_ui',),
path(
'ui/<path:extra>',
self.admin_site.admin_view(self.ui_view),
name='tracker_ui',
),
path(
'diagnostics',
self.admin_site.admin_view(self.diagnostics),
name='diagnostics',
),
] + super(EventAdmin, self).get_urls()
def send_volunteer_emails(self, request, queryset):
if queryset.count() != 1:
self.message_user(
request, 'Select exactly one event.', level=messages.ERROR,
)
return
return HttpResponseRedirect(
reverse('admin:send_volunteer_emails', args=(queryset.first().id,))
)
@staticmethod
@permission_required('auth.change_user', raise_exception=True)
def send_volunteer_emails_view(request, pk):
event = models.Event.objects.filter(pk=pk, locked=False).first()
if event is None:
raise Http404
if request.method == 'POST':
form = forms.SendVolunteerEmailsForm(request.POST, request.FILES)
if form.is_valid():
volunteers = csv.DictReader(
StringIO(request.FILES['volunteers'].read().decode('utf-8'))
)
tracker_group = auth.Group.objects.get_or_create(name='Bid Tracker')[0]
tracker_codenames = [
'change_donation',
'view_donation',
'view_comments',
]
tracker_permissions = auth.Permission.objects.filter(
content_type__app_label='tracker', codename__in=tracker_codenames,
)
assert tracker_permissions.count() == len(
tracker_codenames
), 'some permissions were missing, check tracker_codenames or that all migrations have run'
tracker_group.permissions.set(tracker_permissions)
admin_group = auth.Group.objects.get_or_create(name='Bid Admin')[0]
admin_codenames = [
# bid screening/assignment
'add_donationbid',
'change_donationbid',
'delete_donationbid',
'view_donationbid',
'add_bid',
'change_bid',
'view_bid',
'view_hidden_bid',
# donations
'change_donation',
'view_donation',
'view_comments',
'view_pending_donation',
'send_to_reader',
# donors
'add_donor',
'change_donor',
'view_donor',
'view_emails',
'view_usernames',
# needed for 'Start Run'
'change_speedrun',
'view_speedrun',
]
admin_permissions = auth.Permission.objects.filter(
content_type__app_label='tracker', codename__in=admin_codenames,
)
assert admin_permissions.count() == len(
admin_codenames
), 'some permissions were missing, check admin_codenames or that all migrations have run'
admin_group.permissions.set(admin_permissions)
successful = 0
for row, volunteer in enumerate(volunteers, start=2):
try:
firstname, space, lastname = (
volunteer['name'].strip().partition(' ')
)
is_head = 'head' in volunteer['position'].strip().lower()
is_host = 'host' in volunteer['position'].strip().lower()
email = volunteer['email'].strip()
EmailValidator()(email)
username = volunteer['username'].strip()
if not username:
raise ValueError('username cannot be blank')
user, created = auth.User.objects.get_or_create(
email__iexact=volunteer['email'],
defaults=dict(
username=username,
first_name=firstname.strip(),
last_name=lastname.strip(),
email=email,
is_active=False,
),
)
user.is_staff = True
if is_head:
user.groups.add(admin_group)
user.groups.remove(tracker_group)
else:
user.groups.remove(admin_group)
user.groups.add(tracker_group)
user.save()
if created:
messages.add_message(
request,
messages.INFO,
f'Created user {volunteer["username"]} with email {volunteer["email"]}',
)
else:
messages.add_message(
request,
messages.INFO,
f'Found existing user {volunteer["username"]} with email {volunteer["email"]}',
)
context = dict(
event=event,
is_head=is_head,
is_host=is_host,
password_reset_url=request.build_absolute_uri(
reverse('tracker:password_reset')
),
admin_url=request.build_absolute_uri(
reverse('admin:index')
),
)
send_registration_mail(
request,
user,
template=form.cleaned_data['template'],
sender=form.cleaned_data['sender'],
extra_context=context,
)
successful += 1
except Exception as e:
messages.add_message(
request,
messages.ERROR,
f'Could not process row #{row}: {repr(e)}',
)
if successful:
messages.add_message(
request, messages.INFO, f'Sent {successful} email(s)'
)
return redirect('admin:tracker_event_changelist')
else:
form = forms.SendVolunteerEmailsForm()
return render(
request,
'admin/tracker/generic_form.html',
{
'form': form,
'site_header': 'Send Volunteer Emails',
'title': 'Send Volunteer Emails',
'breadcrumbs': (
(
reverse('admin:app_list', kwargs=dict(app_label='tracker')),
'Tracker',
),
(reverse('admin:tracker_event_changelist'), 'Events'),
(
reverse('admin:tracker_event_change', args=(event.id,)),
str(event),
),
(None, 'Send Volunteer Emails'),
),
'action': request.path,
},
)
@staticmethod
@csrf_protect
@user_passes_test(lambda u: u.is_superuser)
def diagnostics(request):
from django.conf import settings
from post_office import mail
ping_socket_url = (
request.build_absolute_uri(f'{reverse("tracker:index_all")}ws/ping/')
.replace('https:', 'wss:')
.replace('http:', 'ws:')
)
celery_socket_url = (
request.build_absolute_uri(f'{reverse("tracker:index_all")}ws/celery/')
.replace('https:', 'wss:')
.replace('http:', 'ws:')
)
if request.method == 'POST':
test_email_form = TestEmailForm(data=request.POST)
if test_email_form.is_valid():
mail.send(
[test_email_form.cleaned_data['email']],
f'webmaster@{request.get_host().split(":")[0]}',
subject='Test Email',
message='If you got this, email is set up correctly.',
)
messages.info(
request, 'Test email queued. Check Post Office models for status.'
)
else:
test_email_form = TestEmailForm()
try:
storage = DefaultStorage()
output = storage.save(f'testfile_{int(time.time())}', BytesIO(b'test file'))
storage.open(output).read()
assert storage.exists(output)
storage.delete(output)
storage_works = True
except Exception as e:
storage_works = e
return render(
request,
'admin/tracker/diagnostics.html',
{
'is_secure': request.is_secure(),
'test_email_form': test_email_form,
'ping_socket_url': ping_socket_url,
'celery_socket_url': celery_socket_url,
'storage_works': storage_works,
'HAS_CELERY': getattr(settings, 'HAS_CELERY', False),
},
)
@staticmethod
def ui_view(request, **kwargs):
# TODO: just move this here
import tracker.ui.views
return tracker.ui.views.admin(
request, ROOT_PATH=reverse('admin:tracker_ui'), **kwargs
)
def donor_report(self, request, queryset):
if queryset.count() != 1:
self.message_user(
request, 'Select exactly one event.', level=messages.ERROR,
)
return
event = queryset.first()
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = (
'attachment; filename="donor-report-%s.csv"' % event.short
)
writer = csv.writer(response)
writer.writerow(['Name', 'Donation Sum', 'Donation Count'])
anon = tracker.models.Donation.objects.filter(
donor__visibility='ANON', transactionstate='COMPLETED', event=event
)
writer.writerow(
[
'All Anonymous Donations',
anon.aggregate(Sum('amount'))['amount__sum'].quantize(Decimal('1.00')),
anon.count(),
]
)
donors = (
tracker.models.DonorCache.objects.filter(event=event)
.exclude(donor__visibility='ANON')
.select_related('donor')
.iterator()
)
for d in donors:
writer.writerow([d.visible_name(), d.donation_total, d.donation_count])
return response
donor_report.short_description = 'Export donor CSV'
def run_report(self, request, queryset):
if queryset.count() != 1:
self.message_user(
request, 'Select exactly one event.', level=messages.ERROR,
)
return
event = queryset.first()
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = (
'attachment; filename="run-report-%s.csv"' % event.short
)
writer = csv.writer(response)
writer.writerow(
['Run', 'Event', 'Start Time', 'End Time', 'Runners', 'Runner Twitters']
)
runs = (
tracker.models.SpeedRun.objects.filter(event=event)
.exclude(order=None)
.select_related('event')
.prefetch_related('runners')
)
for r in runs:
writer.writerow(
[
str(r),
r.event.short,
r.starttime.astimezone(r.event.timezone).isoformat(),
r.endtime.astimezone(r.event.timezone).isoformat(),
','.join(str(ru) for ru in r.runners.all()),
','.join(ru.twitter for ru in r.runners.all() if ru.twitter),
]
)
return response
run_report.short_description = 'Export run CSV'
def donation_report(self, request, queryset):
if queryset.count() != 1:
self.message_user(
request, 'Select exactly one event.', level=messages.ERROR,
)
return
event = queryset.first()
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = (
'attachment; filename="donation-report-%s.csv"' % event.short
)
writer = csv.writer(response)
writer.writerow(['Donor', 'Event', 'Amount', 'Time Received'])
donations = (
tracker.models.Donation.objects.filter(
transactionstate='COMPLETED', event=event
)
.select_related('donor', 'event')
.iterator()
)
for d in donations:
writer.writerow(
[
d.donor.visible_name(),
d.event.short,
d.amount,
d.timereceived.astimezone(d.event.timezone).isoformat(),
]
)
return response
donation_report.short_description = 'Export donation CSV'
def bid_report(self, request, queryset):
if queryset.count() != 1:
self.message_user(
request, 'Select exactly one event.', level=messages.ERROR,
)
return
event = queryset.first()
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = (
'attachment; filename="bid-report-%s.csv"' % event.short
)
writer = csv.writer(response)
writer.writerow(['Id', 'Bid', 'Event', 'Target', 'Goal', 'Amount', 'Count'])
bids = (
tracker.models.Bid.objects.filter(
state__in=['CLOSED', 'OPENED'], event=event
)
.order_by('event__datetime', 'speedrun__order', 'parent__name', '-total')
.select_related('event', 'speedrun', 'parent')
.iterator()
)
for b in bids:
writer.writerow(
[b.id, str(b), b.event.short, b.istarget, b.goal, b.total, b.count]
)
return response
bid_report.short_description = 'Export bid CSV'
def donationbid_report(self, request, queryset):
if queryset.count() != 1:
self.message_user(
request, 'Select exactly one event.', level=messages.ERROR,
)
return
event = queryset.first()
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = (
'attachment; filename="donationbid-report-%s.csv"' % event.short
)
writer = csv.writer(response)
writer.writerow(['Bid', 'Amount', 'Time'])
donation_bids = (
tracker.models.DonationBid.objects.filter(
bid__state__in=['CLOSED', 'OPENED'],
bid__event=event,
donation__transactionstate='COMPLETED',
)
.order_by('donation__timereceived')
.select_related('donation')
.iterator()
)
for b in donation_bids:
writer.writerow([b.bid_id, b.amount, b.donation.timereceived])
return response
donationbid_report.short_description = 'Export donation bid CSV'
def prize_report(self, request, queryset):
if queryset.count() != 1:
self.message_user(
request, 'Select exactly one event.', level=messages.ERROR,
)
return
event = queryset.first()
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = (
'attachment; filename="prize-report-%s.csv"' % event.short
)
writer = csv.writer(response)
writer.writerow(
[
'Event',
'Name',
'Eligible Donors',
'Exact Donors',
'Start Time',
'End Time',
]
)
prizes = tracker.models.Prize.objects.filter(
state='ACCEPTED', event=event
).iterator()
for p in prizes:
eligible = p.eligible_donors()
writer.writerow(
[
p.event.short,
p.name,
len(eligible),
len([d for d in eligible if d['amount'] == p.minimumbid]),
p.start_draw_time(),
p.end_draw_time(),
]
)
return response
prize_report.short_description = 'Export prize CSV'
def email_report(self, request, queryset):
if queryset.count() != 1:
self.message_user(
request, 'Select exactly one event.', level=messages.ERROR,
)
return
event = queryset.first()
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = (
'attachment; filename="email-report-%s.csv"' % event.short
)
writer = csv.writer(response)
writer.writerow(['Email', 'Name', 'Anonymous', 'Donation Sum', 'Country'])
donors = (
tracker.models.DonorCache.objects.filter(
event=event, donor__solicitemail='OPTIN',
)
.select_related('donor')
.iterator()
)
for d in donors:
if d.firstname:
if d.lastname:
name = u'%s, %s' % (d.lastname, d.firstname)
else:
name = d.firstname
else:
name = '(No Name Supplied)'
writer.writerow(
[
d.email,
name,
d.visibility == 'ANON',
d.donation_total,
d.addresscountry,
]
)
return response
email_report.short_description = 'Export email opt-in CSV'
actions = [
send_volunteer_emails,
donor_report,
run_report,
donation_report,
bid_report,
donationbid_report,
prize_report,
email_report,
]
@register(models.PostbackURL)
class PostbackURLAdmin(CustomModelAdmin):
form = PostbackURLForm
search_fields = ('url',)
list_filter = ('event',)
list_display = ('url', | |
transform the test documents (ignore unknown words)
features_test = vectorizer.transform(words_test).toarray()
# NOTE: Remember to convert the features using .toarray() for a compact representation
# Write to cache file for future runs (store vocabulary as well)
if cache_file is not None:
vocabulary = vectorizer.vocabulary_
cache_data = dict(features_train=features_train, features_test=features_test,
vocabulary=vocabulary)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
joblib.dump(cache_data, f)
print("Wrote features to cache file:", cache_file)
else:
# Unpack data loaded from cache file
features_train, features_test, vocabulary = (cache_data['features_train'],
cache_data['features_test'], cache_data['vocabulary'])
# Return both the extracted features as well as the vocabulary
return features_train, features_test, vocabulary
# %%
# Extract Bag of Words features for both training and test datasets
train_X, test_X, vocabulary = extract_BoW_features(train_X, test_X)
# %%
len(train_X[100])
# %% [markdown]
# ## Step 4: Classification using XGBoost
#
# Now that we have created the feature representation of our training (and testing) data, it is time to start setting up and using the XGBoost classifier provided by SageMaker.
#
# ### Writing the dataset
#
# The XGBoost classifier that we will be using requires the dataset to be written to a file and stored using Amazon S3. To do this, we will start by splitting the training dataset into two parts, the data we will train the model with and a validation set. Then, we will write those datasets to a file and upload the files to S3. In addition, we will write the test set input to a file and upload the file to S3. This is so that we can use SageMakers Batch Transform functionality to test our model once we've fit it.
# %%
import pandas as pd
# Earlier we shuffled the training dataset so to make things simple we can just assign
# the first 10 000 reviews to the validation set and use the remaining reviews for training.
val_X = pd.DataFrame(train_X[:10000])
train_X = pd.DataFrame(train_X[10000:])
val_y = pd.DataFrame(train_y[:10000])
train_y = pd.DataFrame(train_y[10000:])
# %% [markdown]
# The documentation for the XGBoost algorithm in SageMaker requires that the saved datasets should contain no headers or index and that for the training and validation data, the label should occur first for each sample.
#
# For more information about this and other algorithms, the SageMaker developer documentation can be found on __[Amazon's website.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__
# %%
# First we make sure that the local directory in which we'd like to store the training and validation csv files exists.
data_dir = '../data/sentiment_update'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# %%
pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)
pd.concat([val_y, val_X], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)
pd.concat([train_y, train_X], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
# %%
# To save a bit of memory we can set text_X, train_X, val_X, train_y and val_y to None.
test_X = train_X = val_X = train_y = val_y = None
# %% [markdown]
# ### Uploading Training / Validation files to S3
#
# Amazon's S3 service allows us to store files that can be access by both the built-in training models such as the XGBoost model we will be using as well as custom models such as the one we will see a little later.
#
# For this, and most other tasks we will be doing using SageMaker, there are two methods we could use. The first is to use the low level functionality of SageMaker which requires knowing each of the objects involved in the SageMaker environment. The second is to use the high level functionality in which certain choices have been made on the user's behalf. The low level approach benefits from allowing the user a great deal of flexibility while the high level approach makes development much quicker. For our purposes we will opt to use the high level approach although using the low-level approach is certainly an option.
#
# Recall the method `upload_data()` which is a member of object representing our current SageMaker session. What this method does is upload the data to the default bucket (which is created if it does not exist) into the path described by the key_prefix variable. To see this for yourself, once you have uploaded the data files, go to the S3 console and look to see where the files have been uploaded.
#
# For additional resources, see the __[SageMaker API documentation](http://sagemaker.readthedocs.io/en/latest/)__ and in addition the __[SageMaker Developer Guide.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__
# %%
import sagemaker
session = sagemaker.Session() # Store the current SageMaker session
# S3 prefix (which folder will we use)
prefix = 'sentiment-update'
test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)
val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)
train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)
# %% [markdown]
# ### Creating the XGBoost model
#
# Now that the data has been uploaded it is time to create the XGBoost model. To begin with, we need to do some setup. At this point it is worth discussing what a model is in SageMaker. It is easiest to think of a model of comprising three different objects in the SageMaker ecosystem, which interact with one another.
#
# - Model Artifacts
# - Training Code (Container)
# - Inference Code (Container)
#
# The Model Artifacts are what you might think of as the actual model itself. For example, if you were building a neural network, the model artifacts would be the weights of the various layers. In our case, for an XGBoost model, the artifacts are the actual trees that are created during training.
#
# The other two objects, the training code and the inference code are then used the manipulate the training artifacts. More precisely, the training code uses the training data that is provided and creates the model artifacts, while the inference code uses the model artifacts to make predictions on new data.
#
# The way that SageMaker runs the training and inference code is by making use of Docker containers. For now, think of a container as being a way of packaging code up so that dependencies aren't an issue.
# %%
from sagemaker import get_execution_role
# Our current execution role is require when creating the model as the training
# and inference code will need to access the model artifacts.
role = get_execution_role()
# %%
# We need to retrieve the location of the container which is provided by Amazon for using XGBoost.
# As a matter of convenience, the training and inference code both use the same container.
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(session.boto_region_name, 'xgboost')
# %%
# First we create a SageMaker estimator object for our model.
xgb = sagemaker.estimator.Estimator(container, # The location of the container we wish to use
role, # What is our current IAM Role
train_instance_count=1, # How many compute instances
train_instance_type='ml.m4.xlarge', # What kind of compute instances
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
sagemaker_session=session)
# And then set the algorithm specific parameters.
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
early_stopping_rounds=10,
num_round=500)
# %% [markdown]
# ### Fit the XGBoost model
#
# Now that our model has been set up we simply need to attach the training and validation datasets and then ask SageMaker to set up the computation.
# %%
s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')
# %%
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
# %% [markdown]
# ### Testing the model
#
# Now that we've fit our XGBoost model, it's time to see how well it performs. To do this we will use SageMakers Batch Transform functionality. Batch Transform is a convenient way to perform inference on a large dataset in a way that is not realtime. That is, we don't necessarily need to use our model's results immediately and instead we can peform inference on a large number of samples. An example of this in industry might be peforming an end of month report. This method of inference can also be useful to us as it means to can perform inference on our entire test set.
#
# To perform a Batch Transformation we need to first create a transformer objects from our trained estimator object.
# %%
xgb_transformer = xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')
# %% [markdown]
# Next we actually perform the transform job. When doing so we need to make sure to specify the type of data we are sending so that it is serialized correctly in the background. In our case we are providing our model with csv data so we specify `text/csv`. Also, if the test data that | |
<gh_stars>0
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import transaction
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from django_countries.serializers import CountryFieldMixin
from hierarkey.proxy import HierarkeyProxy
from pytz import common_timezones
from rest_framework import serializers
from rest_framework.fields import ChoiceField, Field
from rest_framework.relations import SlugRelatedField
from pretix.api.serializers.i18n import I18nAwareModelSerializer
from pretix.base.models import Event, TaxRule
from pretix.base.models.event import SubEvent
from pretix.base.models.items import SubEventItem, SubEventItemVariation
from pretix.base.services.seating import (
SeatProtected, generate_seats, validate_plan_change,
)
from pretix.base.settings import DEFAULTS, validate_settings
from pretix.base.signals import api_event_settings_fields
class MetaDataField(Field):
def to_representation(self, value):
return {
v.property.name: v.value for v in value.meta_values.all()
}
def to_internal_value(self, data):
return {
'meta_data': data
}
class MetaPropertyField(Field):
def to_representation(self, value):
return {
v.name: v.default for v in value.item_meta_properties.all()
}
def to_internal_value(self, data):
return {
'item_meta_properties': data
}
class SeatCategoryMappingField(Field):
def to_representation(self, value):
qs = value.seat_category_mappings.all()
if isinstance(value, Event):
qs = qs.filter(subevent=None)
return {
v.layout_category: v.product_id for v in qs
}
def to_internal_value(self, data):
return {
'seat_category_mapping': data or {}
}
class PluginsField(Field):
def to_representation(self, obj):
from pretix.base.plugins import get_all_plugins
return sorted([
p.module for p in get_all_plugins()
if not p.name.startswith('.') and getattr(p, 'visible', True) and p.module in obj.get_plugins()
])
def to_internal_value(self, data):
return {
'plugins': data
}
class TimeZoneField(ChoiceField):
def get_attribute(self, instance):
return instance.cache.get_or_set(
'timezone_name',
lambda: instance.settings.timezone,
3600
)
class EventSerializer(I18nAwareModelSerializer):
meta_data = MetaDataField(required=False, source='*')
item_meta_properties = MetaPropertyField(required=False, source='*')
plugins = PluginsField(required=False, source='*')
seat_category_mapping = SeatCategoryMappingField(source='*', required=False)
timezone = TimeZoneField(required=False, choices=[(a, a) for a in common_timezones])
class Meta:
model = Event
fields = ('name', 'slug', 'live', 'testmode', 'currency', 'date_from',
'date_to', 'date_admission', 'is_public', 'presale_start',
'presale_end', 'location', 'geo_lat', 'geo_lon', 'has_subevents', 'meta_data', 'seating_plan',
'plugins', 'seat_category_mapping', 'timezone', 'item_meta_properties')
def validate(self, data):
data = super().validate(data)
full_data = self.to_internal_value(self.to_representation(self.instance)) if self.instance else {}
full_data.update(data)
Event.clean_dates(data.get('date_from'), data.get('date_to'))
Event.clean_presale(data.get('presale_start'), data.get('presale_end'))
if full_data.get('has_subevents') and full_data.get('seating_plan'):
raise ValidationError('Event series should not directly be assigned a seating plan.')
return data
def validate_has_subevents(self, value):
Event.clean_has_subevents(self.instance, value)
return value
def validate_slug(self, value):
Event.clean_slug(self.context['request'].organizer, self.instance, value)
return value
def validate_live(self, value):
if value:
if self.instance is None:
raise ValidationError(_('Events cannot be created as \'live\'. Quotas and payment must be added to the '
'event before sales can go live.'))
else:
self.instance.clean_live()
return value
@cached_property
def meta_properties(self):
return {
p.name: p for p in self.context['request'].organizer.meta_properties.all()
}
def validate_meta_data(self, value):
for key in value['meta_data'].keys():
if key not in self.meta_properties:
raise ValidationError(_('Meta data property \'{name}\' does not exist.').format(name=key))
return value
@cached_property
def item_meta_props(self):
return {
p.name: p for p in self.context['request'].event.item_meta_properties.all()
}
def validate_seating_plan(self, value):
if value and value.organizer != self.context['request'].organizer:
raise ValidationError('Invalid seating plan.')
if self.instance and self.instance.pk:
try:
validate_plan_change(self.instance, None, value)
except SeatProtected as e:
raise ValidationError(str(e))
return value
def validate_seat_category_mapping(self, value):
if not self.instance or not self.instance.pk:
if value and value['seat_category_mapping']:
raise ValidationError('You cannot specify seat category mappings on event creation.')
else:
return {'seat_category_mapping': {}}
item_cache = {i.pk: i for i in self.instance.items.all()}
result = {}
for k, item in value['seat_category_mapping'].items():
if item not in item_cache:
raise ValidationError('Item \'{id}\' does not exist.'.format(id=item))
result[k] = item_cache[item]
return {'seat_category_mapping': result}
def validate_plugins(self, value):
from pretix.base.plugins import get_all_plugins
plugins_available = {
p.module for p in get_all_plugins(self.instance)
if not p.name.startswith('.') and getattr(p, 'visible', True)
}
for plugin in value.get('plugins'):
if plugin not in plugins_available:
raise ValidationError(_('Unknown plugin: \'{name}\'.').format(name=plugin))
return value
@transaction.atomic
def create(self, validated_data):
meta_data = validated_data.pop('meta_data', None)
item_meta_properties = validated_data.pop('item_meta_properties', None)
validated_data.pop('seat_category_mapping', None)
plugins = validated_data.pop('plugins', settings.PRETIX_PLUGINS_DEFAULT.split(','))
tz = validated_data.pop('timezone', None)
event = super().create(validated_data)
if tz:
event.settings.timezone = tz
# Meta data
if meta_data is not None:
for key, value in meta_data.items():
event.meta_values.create(
property=self.meta_properties.get(key),
value=value
)
# Item Meta properties
if item_meta_properties is not None:
for key, value in item_meta_properties.items():
event.item_meta_properties.create(
name=key,
default=value,
event=event
)
# Seats
if event.seating_plan:
generate_seats(event, None, event.seating_plan, {})
# Plugins
if plugins is not None:
event.set_active_plugins(plugins)
event.save(update_fields=['plugins'])
return event
@transaction.atomic
def update(self, instance, validated_data):
meta_data = validated_data.pop('meta_data', None)
item_meta_properties = validated_data.pop('item_meta_properties', None)
plugins = validated_data.pop('plugins', None)
seat_category_mapping = validated_data.pop('seat_category_mapping', None)
tz = validated_data.pop('timezone', None)
event = super().update(instance, validated_data)
if tz:
event.settings.timezone = tz
# Meta data
if meta_data is not None:
current = {mv.property: mv for mv in event.meta_values.select_related('property')}
for key, value in meta_data.items():
prop = self.meta_properties.get(key)
if prop in current:
current[prop].value = value
current[prop].save()
else:
event.meta_values.create(
property=self.meta_properties.get(key),
value=value
)
for prop, current_object in current.items():
if prop.name not in meta_data:
current_object.delete()
# Item Meta properties
if item_meta_properties is not None:
current = [imp for imp in event.item_meta_properties.all()]
for key, value in item_meta_properties.items():
prop = self.item_meta_props.get(key)
if prop in current:
prop.default = value
prop.save()
else:
prop = event.item_meta_properties.create(
name=key,
default=value,
event=event
)
current.append(prop)
for prop in current:
if prop.name not in list(item_meta_properties.keys()):
prop.delete()
# Seats
if seat_category_mapping is not None or ('seating_plan' in validated_data and validated_data['seating_plan'] is None):
current_mappings = {
m.layout_category: m
for m in event.seat_category_mappings.filter(subevent=None)
}
if not event.seating_plan:
seat_category_mapping = {}
for key, value in seat_category_mapping.items():
if key in current_mappings:
m = current_mappings.pop(key)
m.product = value
m.save()
else:
event.seat_category_mappings.create(product=value, layout_category=key)
for m in current_mappings.values():
m.delete()
if 'seating_plan' in validated_data or seat_category_mapping is not None:
generate_seats(event, None, event.seating_plan, {
m.layout_category: m.product
for m in event.seat_category_mappings.select_related('product').filter(subevent=None)
})
# Plugins
if plugins is not None:
event.set_active_plugins(plugins)
event.save()
return event
class CloneEventSerializer(EventSerializer):
@transaction.atomic
def create(self, validated_data):
plugins = validated_data.pop('plugins', None)
is_public = validated_data.pop('is_public', None)
testmode = validated_data.pop('testmode', None)
has_subevents = validated_data.pop('has_subevents', None)
tz = validated_data.pop('timezone', None)
new_event = super().create(validated_data)
event = Event.objects.filter(slug=self.context['event'], organizer=self.context['organizer'].pk).first()
new_event.copy_data_from(event)
if plugins is not None:
new_event.set_active_plugins(plugins)
if is_public is not None:
new_event.is_public = is_public
if testmode is not None:
new_event.testmode = testmode
if has_subevents is not None:
new_event.has_subevents = has_subevents
new_event.save()
if tz:
new_event.settings.timezone = tz
return new_event
class SubEventItemSerializer(I18nAwareModelSerializer):
class Meta:
model = SubEventItem
fields = ('item', 'price')
class SubEventItemVariationSerializer(I18nAwareModelSerializer):
class Meta:
model = SubEventItemVariation
fields = ('variation', 'price')
class SubEventSerializer(I18nAwareModelSerializer):
item_price_overrides = SubEventItemSerializer(source='subeventitem_set', many=True, required=False)
variation_price_overrides = SubEventItemVariationSerializer(source='subeventitemvariation_set', many=True, required=False)
seat_category_mapping = SeatCategoryMappingField(source='*', required=False)
event = SlugRelatedField(slug_field='slug', read_only=True)
meta_data = MetaDataField(source='*')
class Meta:
model = SubEvent
fields = ('id', 'name', 'date_from', 'date_to', 'active', 'date_admission',
'presale_start', 'presale_end', 'location', 'geo_lat', 'geo_lon', 'event', 'is_public',
'seating_plan', 'item_price_overrides', 'variation_price_overrides', 'meta_data',
'seat_category_mapping')
def validate(self, data):
data = super().validate(data)
event = self.context['request'].event
full_data = self.to_internal_value(self.to_representation(self.instance)) if self.instance else {}
full_data.update(data)
Event.clean_dates(data.get('date_from'), data.get('date_to'))
Event.clean_presale(data.get('presale_start'), data.get('presale_end'))
SubEvent.clean_items(event, [item['item'] for item in full_data.get('subeventitem_set', [])])
SubEvent.clean_variations(event, [item['variation'] for item in full_data.get('subeventitemvariation_set', [])])
return data
def validate_item_price_overrides(self, data):
return list(filter(lambda i: 'item' in i, data))
def validate_variation_price_overrides(self, data):
return list(filter(lambda i: 'variation' in i, data))
def validate_seating_plan(self, value):
if value and value.organizer != self.context['request'].organizer:
raise ValidationError('Invalid seating plan.')
if self.instance and self.instance.pk:
try:
validate_plan_change(self.context['request'].event, self.instance, value)
except SeatProtected as e:
raise ValidationError(str(e))
return value
def validate_seat_category_mapping(self, value):
item_cache = {i.pk: i for i in self.context['request'].event.items.all()}
result = {}
for k, item in value['seat_category_mapping'].items():
if item not in item_cache:
raise ValidationError('Item \'{id}\' does not exist.'.format(id=item))
result[k] = item_cache[item]
return {'seat_category_mapping': result}
@cached_property
def meta_properties(self):
return {
p.name: p for p in self.context['request'].organizer.meta_properties.all()
}
def validate_meta_data(self, value):
for key in value['meta_data'].keys():
if key not in self.meta_properties:
raise ValidationError(_('Meta data property \'{name}\' does not exist.').format(name=key))
return value
@transaction.atomic
def create(self, validated_data):
item_price_overrides_data = validated_data.pop('subeventitem_set') if 'subeventitem_set' in validated_data else {}
variation_price_overrides_data = validated_data.pop('subeventitemvariation_set') if 'subeventitemvariation_set' in validated_data else {}
meta_data = validated_data.pop('meta_data', None)
seat_category_mapping = validated_data.pop('seat_category_mapping', None)
subevent = super().create(validated_data)
for item_price_override_data in item_price_overrides_data:
SubEventItem.objects.create(subevent=subevent, **item_price_override_data)
for variation_price_override_data in variation_price_overrides_data:
SubEventItemVariation.objects.create(subevent=subevent, **variation_price_override_data)
# Meta data
if meta_data is not None:
for key, value in meta_data.items():
subevent.meta_values.create(
property=self.meta_properties.get(key),
value=value
)
# Seats
if subevent.seating_plan:
if seat_category_mapping is not None:
for key, value in seat_category_mapping.items():
self.context['request'].event.seat_category_mappings.create(
product=value, layout_category=key, subevent=subevent
)
generate_seats(self.context['request'].event, subevent, subevent.seating_plan, {
m.layout_category: m.product
for m in self.context['request'].event.seat_category_mappings.select_related('product').filter(subevent=subevent)
})
return subevent
@transaction.atomic
def update(self, instance, validated_data):
item_price_overrides_data = validated_data.pop('subeventitem_set') if 'subeventitem_set' in validated_data else {}
variation_price_overrides_data = validated_data.pop('subeventitemvariation_set') if 'subeventitemvariation_set' in validated_data else {}
meta_data = validated_data.pop('meta_data', None)
seat_category_mapping = validated_data.pop('seat_category_mapping', None)
subevent = super().update(instance, validated_data)
existing_item_overrides = {item.item: item.id for item in SubEventItem.objects.filter(subevent=subevent)}
for item_price_override_data in item_price_overrides_data:
id = existing_item_overrides.pop(item_price_override_data['item'], None)
SubEventItem(id=id, subevent=subevent, **item_price_override_data).save()
SubEventItem.objects.filter(id__in=existing_item_overrides.values()).delete()
existing_variation_overrides = {item.variation: item.id for item in SubEventItemVariation.objects.filter(subevent=subevent)}
for variation_price_override_data in variation_price_overrides_data:
id = existing_variation_overrides.pop(variation_price_override_data['variation'], None)
SubEventItemVariation(id=id, subevent=subevent, **variation_price_override_data).save()
SubEventItemVariation.objects.filter(id__in=existing_variation_overrides.values()).delete()
# Meta data
if meta_data is not None:
current = {mv.property: mv for mv in subevent.meta_values.select_related('property')}
for | |
<reponame>gbacco5/fluid
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 21:49:54 2018
@author: Giacomo
"""
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
# I define a dummy class for Matlab structure like objects
class structtype():
pass
def psi_fluid(rho,xi,rho0):
return (rho**2 - rho0**2)/rho*np.sin(xi);
# return (np.power(rho,2) - rho0**2)/rho*np.sin(xi);
def phi_fluid(rho,xi,rho0):
return (np.power(rho,2) + rho0**2)/rho*np.cos(xi);
def xi_fluid(psi,rho,rho0):
return np.arcsin(psi*rho/(np.power(rho,2) - rho0**2));
def rho_fluid(psi,xi,rho0):
return ( psi + np.sqrt(np.power(psi,2) + 4*np.power(np.sin(xi),2)*rho0**2) )/(2*np.sin(xi));
def r_map(rho):
return np.power(rho, 1/p);
def th_map(xi):
return xi/p;
def rho_map(r):
return np.power(r, p);
def xi_map(th):
return th*p;
def vr(r,th,R0):
return p*(np.power(r,(p-1)) - R0**(2*p)/np.power(r,(p+1)))*np.cos(p*th);
def vt(r,th,R0):
return -p*( np.power(r,(p-1)) + R0**(2*p)/np.power(r,(p+1)) )*np.sin(p*th);
def vx(vr_v,vth_v,th):
return vr_v*np.cos(th) - vth_v*np.sin(th);
def vy(vr_v,vth_v,th):
return vr_v*np.sin(th) + vth_v*np.cos(th);
def CentralPt_Eq(th, *args):
psiCentralPt, rho0, mCentral, qCentral = args;
return np.multiply( r_map( rho_fluid(psiCentralPt, xi_map(th), rho0) ),
( np.sin(th) - mCentral*np.cos(th) ) ) - qCentral;
def BarrierEndSystem(X, *args):
# th,xd,yd,xo,yo,R = X;
th = X[0:Nb];
xd = X[1*Nb:2*Nb];
yd = X[2*Nb:3*Nb];
xo = X[3*Nb:4*Nb];
yo = X[4*Nb:5*Nb];
R = X[5*Nb:6*Nb];
psiA, rho0, xE, yE = args;
R0 = r_map(rho0);
firstEq = xd - np.multiply( r_map(rho_fluid(psiA, p*th, rho0)), np.cos(th) );
seconEq = yd - np.multiply( r_map(rho_fluid(psiA, p*th, rho0)), np.sin(th) );
thirdEq = (xd - xo)**2 + (yd - yo)**2 - R**2;
# thirdEq = (xE - xo)**2 + (yE - yo)**2 - R**2;
fourtEq = (xE - xo)**2 + (yE - yo)**2 - R**2;
fifthEq = np.multiply( (xo - xd), vx( vr( r_map(rho_fluid(psiA, p*th, rho0)),th,R0 ), vt( r_map(rho_fluid(psiA, p*th, rho0)) ,th,R0 ), th) ) + np.multiply( (yo - yd), vy( vr( r_map(rho_fluid(psiA, p*th, rho0)),th,R0 ), vt( r_map(rho_fluid(psiA, p*th, rho0)) ,th,R0 ), th) );
sixthEq = np.multiply(xo - xE,yE) - np.multiply(yo - yE,xE);
return np.concatenate([firstEq,
seconEq,
thirdEq,
fourtEq,
fifthEq,
sixthEq])
def PsiPhi(X, *args):
psi, phi, rho0, N = args;
rho = X[0:N];
xi = X[N:2*N];
return np.concatenate( [psi - psi_fluid(rho, xi, rho0),
phi - phi_fluid(rho, xi, rho0)] )
# MAIN function definition
def calc_fluid_barrier(r, deb):
"CALC_FLUID_BARRIER computes the flux-barrier points along the streamline function."
Dr = r.De; # [m], rotor outer diameter
ScalingFactor = 1/( 10**(round(np.log10(Dr))) );
# ScalingFactor = 1;
Dr = Dr*ScalingFactor;
pi = np.pi;
global p, Nb # I have been lazy here...
p = r.p; # number of pole pairs
Nb = r.Nb; # number of flux-barriers
tb = r.tb*ScalingFactor; # flux-barrier widths
wc = r.wc*ScalingFactor; # flux-carrier widths
Nstep = r.Nstep; # number of steps to draw the flux-barrier side
wrib_t = r.wrib_t*ScalingFactor; # [m], tangential iron rib width
if hasattr(r,'barrier_angles_el'):
barrier_angles_el = r.barrier_angles_el; # [deg], electrical flux-barrier angles
AutoBarrierEndCalc = 0;
else:
barrier_angles_el = np.zeros(Nb);
AutoBarrierEndCalc = 1;
if hasattr(r,'barrier_end_wf'):
wf = r.barrier_end_wf;
else:
wf = 0.5*np.ones(Nb);
if hasattr(r,'wm'):
wm = r.wm*ScalingFactor;
else:
wm = 0;
if hasattr(r,'wrib'):
wrib = r.wrib*ScalingFactor + wm; # [m], radial iron rib widths
else:
wrib = np.zeros([1,Nb]) + wm;
Dend = Dr - 2*wrib_t; # [m], flux-barrier end diameter
Dsh = Dend - 2*( np.sum(tb) + np.sum(wc) ); # [m], shaft diameter
R0 = Dsh/2; # [m], shaft radius
barrier_angles = barrier_angles_el/p; # [deg], flux-barrier angles
if hasattr(r,'barrier_end'):
barrier_end = r.barrier_end;
else:
barrier_end = '';
## Precomputations
rho0 = rho_map(R0);
## Central base points
RAprime = Dend/2 - np.concatenate( (np.array([0]), np.cumsum( tb[0:-1]) ) ) - np.cumsum( wc[0:-1] ); # top
RBprime = RAprime - tb; # bottom
te_qAxis = pi/(2*p); # q-axis angle in rotor reference frame
# get A' and B' considering rib and magnet widths
mCentral = np.tan(te_qAxis); # slope
qCentral = np.tile( -wrib/2/np.cos(te_qAxis), 2); # intercept
psiCentralPtA = psi_fluid(rho_map(RAprime), xi_map(te_qAxis), rho0);
psiCentralPtB = psi_fluid(rho_map(RBprime), xi_map(te_qAxis), rho0);
psiCentralPt = np.array( np.concatenate( (psiCentralPtA, psiCentralPtB) ) );
psiA = psiCentralPtA;
psiB = psiCentralPtB;
FunctionTolerance = 10*np.spacing(1);
StepTolerance = 1e4*np.spacing(1);
X0 = np.repeat(te_qAxis, 2*Nb);
data = (psiCentralPt,rho0,mCentral,qCentral);
# test function
# print( CentralPt_Eq(X0, *data ) )
teAB = sp.optimize.fsolve(CentralPt_Eq, X0, args=data, xtol=StepTolerance, epsfcn=FunctionTolerance);
teA = teAB[0:Nb];
teB = teAB[Nb:];
RA = r_map( rho_fluid(psiA, xi_map(teA), rho0) );
RB = r_map( rho_fluid(psiB, xi_map(teB), rho0) );
# central base points
zA = RA*np.exp(1j*teA);
zB = RB*np.exp(1j*teB);
xA = zA.real;
yA = zA.imag;
xB = zB.real;
yB = zB.imag;
# magnet central base point radius computation
RAsecond = RA*np.cos(te_qAxis - teA);
RBsecond = RB*np.cos(te_qAxis - teB);
Rmag = (RAprime + RAsecond + RBprime + RBsecond)/4;
# 1st test --> OK!
# print(RA,teA,RB,teB)
# print(xA,yA,xB,yB)
# Outer base points C,D preparation
RCprime = Dend/2;
teCprime = th_map( xi_fluid(psiA, rho_map(RCprime), rho0) );
xCprime = Dend/2*np.cos(teCprime);
yCprime = Dend/2*np.sin(teCprime);
RDprime = Dend/2;
teDprime = th_map( xi_fluid(psiB, rho_map(RDprime), rho0) );
xDprime = Dend/2*np.cos(teDprime);
yDprime = Dend/2*np.sin(teDprime);
if AutoBarrierEndCalc:
teE = ( teCprime*(1 - wf) + teDprime*wf );
aphE = pi/2/p - teE;
barrier_angles = 180/np.pi*aphE;
barrier_angles_el = p*barrier_angles;
else:
aphE = barrier_angles*pi/180;
teE = pi/2/p - aphE;
xE = Dend/2*np.cos(teE);
yE = Dend/2*np.sin(teE);
# 2nd test --> OK!
# print(xE,yE)
## Outer base points C (top)
if barrier_end == 'rect':
RC = RCprime;
teC = teCprime;
xC = xCprime;
yC = yCprime;
xOC = xC;
yOC = yC;
else:
# 1st try
# X0 = [ 1.5*teE, 0.9*xE, 0.9*yE, 0.8*xE, 0.8*yE, 0.25*xE];
# best try
xC0 = ( xE + xCprime + 0.1*xA )/(2 + 0.1);
yC0 = ( yE + yCprime )/2;
thC0 = np.arctan(yC0/xC0);
xOC0 = ( xE + xC0 + 0 )/3;
yOC0 = ( yE + yC0 + 0 )/3;
RCOCE0 = np.sqrt( (xOC0 - xE)**2 + (yOC0 - yE)**2 );
X0 = [ thC0, xC0, yC0, xOC0, yOC0, RCOCE0];
X0 = np.reshape(X0, Nb*6);
data = (psiA, rho0, xE, yE);
X = sp.optimize.fsolve( BarrierEndSystem, X0, args=data);
xOC = X[3*Nb:4*Nb];
yOC = X[4*Nb:5*Nb];
xC = X[1*Nb:2*Nb];
yC = X[2*Nb:3*Nb];
RC = np.sqrt(xC**2 + yC**2);
teC = np.arctan2(yC, xC);
# 3rd test --> OK!
# print(xOC)
# print(yOC)
# print(xC)
# print(yC)
# print(RC)
# print(teC)
## Outer base points D (bottom)
if barrier_end == 'rect':
RD = RDprime;
teD = teDprime;
xD = xDprime;
yD = yDprime;
xOD = xD;
yOD = yD;
else:
# 1st try
# X0 = [ 0.8*teE, 0.8*xE, 0.8*yE, 0.9*xE, 0.9*yE, 0.2*xE];
# best try
xD0 = ( xE + xDprime )/2;
yD0 = ( yE + yDprime )/2;
thD0 = np.arctan(yD0/xD0);
xOD0 = ( xE + xD0 + xC )/3;
yOD0 = ( yE + yD0 + yC )/3;
RDODE0 = np.sqrt( (xOD0 - xE)**2 + (yOD0 - yE)**2 );
X0 = [ thD0, xD0, yD0, xOD0, yOD0, RDODE0];
X0 = np.reshape(X0, Nb*6);
data = (psiB, rho0, xE, yE);
X = sp.optimize.fsolve( BarrierEndSystem, X0, args=data);
xOD = X[3*Nb:4*Nb];
yOD = X[4*Nb:5*Nb];
xD = X[1*Nb:2*Nb];
yD = X[2*Nb:3*Nb];
RD = np.sqrt(xD**2 + yD**2);
teD = np.arctan2(yD, xD);
# 4th test --> OK!
# print(xOD)
# print(yOD)
# print(xD)
# print(yD)
# print(RD)
# print(teD)
## Flux-barrier points
# We already have the potentials of the two flux-barrier sidelines
phiA = phi_fluid( rho_map(RA), xi_map(teA), rho0);
phiB = phi_fluid( rho_map(RB), xi_map(teB), rho0);
phiC = phi_fluid( rho_map(RC), xi_map(teC), rho0);
phiD = phi_fluid( rho_map(RD), xi_map(teD), rho0);
barrier = structtype();
XX = [];
YY = [];
Rm = [];
for bkk in range(0,Nb):
dphiAC = np.divide(phiC[bkk] - phiA[bkk], Nstep[bkk]);
dphiBD = np.divide(phiD[bkk] - phiB[bkk], Nstep[bkk]);
# we create the matrix of potentials phi needed for points intersections
PhiAC = phiA[bkk] + np.cumsum( np.tile(dphiAC, Nstep[bkk] - 1) );
PhiBD = phiB[bkk] + np.cumsum( np.tile(dphiBD, Nstep[bkk] - 1) );
PsiAC = np.tile( psiA[bkk], Nstep[bkk]-1);
PsiBD = np.tile( psiB[bkk], Nstep[bkk]-1);
X0 = np.concatenate( [np.linspace(rho0, Dend/2, np.size(PhiAC)), np.linspace(pi/4, xi_map(teE[bkk]), np.size(PhiAC))] );
data = (PsiAC, PhiAC, rho0, Nstep[bkk]-1);
RhoXi_AC = sp.optimize.fsolve( PsiPhi, X0, args=data );
data = (PsiBD, PhiBD, rho0, Nstep[bkk]-1);
RhoXi_BD = sp.optimize.fsolve( PsiPhi, X0, args=data );
R_AC = r_map( RhoXi_AC[0:Nstep[bkk]-1] );
te_AC = th_map( RhoXi_AC[Nstep[bkk]-1:] );
R_BD = r_map( RhoXi_BD[0:Nstep[bkk]-1] );
te_BD = th_map( RhoXi_BD[Nstep[bkk]-1:] );
# 5th test --> OK!
# print(R_AC, te_AC)
# print(R_BD, te_BD)
Zeta = np.concatenate( [[
# top side
| |
from logisimpy.circuit import Circuit, Wire
from logisimpy.logic import AND, AND3, NOT, OR
class MUX2x1(Circuit):
"""
2x1 Multiplexer logic.
The MUX element has two data inputs, one select input and one data output.
If the select input is 0, the data from the first input is transfered
to the output. Otherwise the data from the second input is transfered
to the output.
Truth Table:
| S | D0 | D1 | O |
| - | - | - | - |
| 0 | 0 | 0 | 0 |
| 0 | 0 | 1 | 0 |
| 0 | 1 | 0 | 1 |
| 0 | 1 | 1 | 1 |
| 1 | 0 | 0 | 0 |
| 1 | 0 | 1 | 1 |
| 1 | 1 | 0 | 0 |
| 1 | 1 | 1 | 1 |
"""
def __init__(self, *a, **kw):
# Input
self.d0 = Wire()
self.d1 = Wire()
self.select = Wire()
# Logic Circuits
and0 = AND()
and1 = AND()
not0 = NOT()
or0 = OR()
# Connections
self.d1.connect(and0.a)
self.select.connect(and0.b)
self.select.connect(not0.a)
not0.out.connect(and1.b)
self.d0.connect(and1.a)
and0.out.connect(or0.b)
and1.out.connect(or0.a)
# Output
self.out = or0.out
super(MUX2x1, self).__init__(inputs=[self.d0, self.d1, self.select], *a, **kw)
class MUX4x1(Circuit):
"""
4x1 Multiplexer logic.
Truth Table:
| S1 | S0 | D3 | D2 | D1 | D0 | O |
| - | - | - | - | - | - | - |
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| - | - | 0 | 0 | 0 | 0 | 1 |
| - | - | 0 | 0 | 1 | 0 | 0 |
| - | - | 0 | 0 | 1 | 1 | 1 |
| - | - | 0 | 1 | 0 | 0 | 0 |
| - | - | 0 | 1 | 0 | 1 | 1 |
| - | - | 0 | 1 | 1 | 0 | 0 |
| - | - | 0 | 1 | 1 | 1 | 1 |
| - | - | 1 | 0 | 0 | 0 | 0 |
| - | - | 1 | 0 | 0 | 1 | 1 |
| - | - | 1 | 0 | 1 | 0 | 0 |
| - | - | 1 | 0 | 1 | 1 | 1 |
| - | - | 1 | 1 | 0 | 0 | 0 |
| - | - | 1 | 1 | 0 | 1 | 1 |
| - | - | 1 | 1 | 1 | 0 | 0 |
| - | - | 1 | 1 | 1 | 1 | 1 |
| - | - | - | - | - | - | - |
| 0 | 1 | 0 | 0 | 0 | 0 | 0 |
| - | - | 0 | 0 | 0 | 0 | 0 |
| - | - | 0 | 0 | 1 | 0 | 1 |
| - | - | 0 | 0 | 1 | 1 | 1 |
| - | - | 0 | 1 | 0 | 0 | 0 |
| - | - | 0 | 1 | 0 | 1 | 0 |
| - | - | 0 | 1 | 1 | 0 | 1 |
| - | - | 0 | 1 | 1 | 1 | 1 |
| - | - | 1 | 0 | 0 | 0 | 0 |
| - | - | 1 | 0 | 0 | 1 | 0 |
| - | - | 1 | 0 | 1 | 0 | 1 |
| - | - | 1 | 0 | 1 | 1 | 1 |
| - | - | 1 | 1 | 0 | 0 | 0 |
| - | - | 1 | 1 | 0 | 1 | 0 |
| - | - | 1 | 1 | 1 | 0 | 1 |
| - | - | 1 | 1 | 1 | 1 | 1 |
| - | - | - | - | - | - | - |
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
| - | - | 0 | 0 | 0 | 0 | 0 |
| - | - | 0 | 0 | 1 | 0 | 0 |
| - | - | 0 | 0 | 1 | 1 | 0 |
| - | - | 0 | 1 | 0 | 0 | 1 |
| - | - | 0 | 1 | 0 | 1 | 1 |
| - | - | 0 | 1 | 1 | 0 | 1 |
| - | - | 0 | 1 | 1 | 1 | 1 |
| - | - | 1 | 0 | 0 | 0 | 0 |
| - | - | 1 | 0 | 0 | 1 | 0 |
| - | - | 1 | 0 | 1 | 0 | 0 |
| - | - | 1 | 0 | 1 | 1 | 0 |
| - | - | 1 | 1 | 0 | 0 | 1 |
| - | - | 1 | 1 | 0 | 1 | 1 |
| - | - | 1 | 1 | 1 | 0 | 1 |
| - | - | 1 | 1 | 1 | 1 | 1 |
| - | - | - | - | - | - | - |
| 1 | 1 | 0 | 0 | 0 | 0 | 0 |
| - | - | 0 | 0 | 0 | 0 | 0 |
| - | - | 0 | 0 | 1 | 0 | 0 |
| - | - | 0 | 0 | 1 | 1 | 0 |
| - | - | 0 | 1 | 0 | 0 | 0 |
| - | - | 0 | 1 | 0 | 1 | 0 |
| - | - | 0 | 1 | 1 | 0 | 0 |
| - | - | 0 | 1 | 1 | 1 | 0 |
| - | - | 1 | 0 | 0 | 0 | 1 |
| - | - | 1 | 0 | 0 | 1 | 1 |
| - | - | 1 | 0 | 1 | 0 | 1 |
| - | - | 1 | 0 | 1 | 1 | 1 |
| - | - | 1 | 1 | 0 | 0 | 1 |
| - | - | 1 | 1 | 0 | 1 | 1 |
| - | - | 1 | 1 | 1 | 0 | 1 |
| - | - | 1 | 1 | 1 | 1 | 1 |
"""
def __init__(self, *a, **kw):
# Input
self.d0 = Wire()
self.d1 = Wire()
self.d2 = Wire()
self.d3 = Wire()
self.s0 = Wire()
self.s1 = Wire()
# Logic Circuits
and30 = AND3()
and31 = AND3()
and32 = | |
<reponame>Arnukk/FP_RFP_LH<gh_stars>1-10
# vim:fileencoding=utf8
#
# Project: Implementation of the Lemke-Howson algorithm for finding MNE
# Author: <NAME> <<EMAIL>>, 2009
#
"""This module contains a Lemke-Howson algorithm implementation
and various functions that are used in that algorithm.
"""
import matrix
import rational
def normalizeMatrices(m1, m2):
"""Returns normalized selected matrices in a tuple.
Normalized matrix does not have any row with all zeros, nor
any column with all zeros. Also any element will contain positive
number.
m1 - first matrix to be normalized (Matrix)
m2 - second matrix to be normalized (Matrix)
The normalization is done by adding a proper constant to all
item of both matrices (the least possible constant + 1 is chosen).
If both matrices do not have any negative items, nor any items
equal to zero, no constant is added.
"""
ms = (m1, m2)
# Check for the least value in both matrices
lowestVal = m1.getItem(1, 1)
for m in ms:
for i in xrange(1, m.getNumRows() + 1):
for j in xrange(1, m.getNumCols() + 1):
if m.getItem(i, j) < lowestVal:
lowestVal = m.getItem(i, j)
normMs = (matrix.Matrix(m1.getNumRows(), m1.getNumCols()),
matrix.Matrix(m2.getNumRows(), m2.getNumCols()))
# Copy all items from both matrices and add a proper constant
# to all values
cnst = 0 if lowestVal > 0 else abs(lowestVal) + 1
for k in xrange(0, len(normMs)):
for i in xrange(1, ms[k].getNumRows() + 1):
for j in xrange(1, ms[k].getNumCols() + 1):
normMs[k].setItem(i, j, ms[k].getItem(i, j) + cnst)
return normMs
def createTableaux(m1, m2):
"""Creates a tableaux from the two selected matrices.
m1 - first matrix (Matrix instance)
m2 - second matrix (Matrix instance)
Preconditions:
- m1 must have the same number of rows and columns as m2
Raises ValueError if some of the preconditions are not met.
"""
if m1.getNumRows() != m2.getNumRows() or m1.getNumCols() != m2.getNumCols():
raise ValueError, 'Selected matrices does not have the same number ' +\
'of rows and columns'
# The total number of strategies of both players
S = m1.getNumRows() + m1.getNumCols()
# The tableaux will have S rows, because there are S slack variables
# and S + 2 columns, because the first column is the index of the basis
# in the current column and the second column is initially all 1s
t = matrix.Matrix(S, S + 2)
# Initialize the first column (index of the currect basis variable).
# Because there are only slack variables at the beginning, initialize
# it to a sequence of negative numbers starting from -1.
for i in xrange(1, t.getNumRows() + 1):
t.setItem(i, 1, -i)
# Initialize the second column to all 1s (current value of all basis)
for i in xrange(1, t.getNumRows() + 1):
t.setItem(i, 2, 1)
# Initialize indices from the first matrix
for i in xrange(1, m1.getNumRows() + 1):
for j in xrange(1, m1.getNumCols() + 1):
t.setItem(i, m1.getNumRows() + j + 2, -m1.getItem(i, j))
# Initialize indices from the second matrix
for i in xrange(1, m2.getNumRows() + 1):
for j in xrange(1, m2.getNumCols() + 1):
t.setItem(m1.getNumRows() + j, i + 2, -m2.getItem(i, j))
return t
def makePivotingStep(t, p1SCount, ebVar):
"""Makes a single pivoting step in the selected tableaux by
bringing the selected variable into the basis. All changes are done
in the original tableaux. Returns the variable that left the basis.
t - tableaux (Matrix)
p1SCount - number of strategies of player 1 (number)
ebVar - variable that will enter the basis (number)
Preconditions:
- 0 < abs(ebVar) <= t.getNumRows()
- 0 < p1SCount < t.getNumRows()
Raises ValueError if some of the preconditions are not met.
"""
# 1st precondition
if abs(ebVar) <= 0 or abs(ebVar) > t.getNumRows():
raise ValueError, 'Selected variable index is invalid.'
# 2nd precondition
if p1SCount < 0 or t.getNumRows() <= p1SCount:
raise ValueError, 'Invalid number of strategies of player 1.'
# Returns the column corresponding to the selected variable
def varToCol(var):
# Apart from players matrices values, there are 2 additional
# columns in the tableaux
return 2 + abs(var)
# Returns the list of row numbers which corresponds
# to the selected variable
def getRowNums(var):
# Example (for a game 3x3):
# -1,-2,-3,4,5,6 corresponds to the first part of the tableaux
# 1,2,3,-4,-5,-6 corresponds to the second part of the tableaux
if -p1SCount <= var < 0 or var > p1SCount:
return xrange(1, p1SCount + 1)
else:
return xrange(p1SCount + 1, t.getNumRows() + 1)
# Check which variable should leave the basis using the min-ratio rule
# (it will have the lowest ratio)
lbVar = None
minRatio = None
# Check only rows in the appropriate part of the tableaux
for i in getRowNums(ebVar):
if t.getItem(i, varToCol(ebVar)) < 0:
ratio = -rational.Rational(t.getItem(i, 2)) /\
t.getItem(i, varToCol(ebVar))
if minRatio == None or ratio < minRatio:
minRatio = ratio
lbVar = t.getItem(i, 1)
lbVarRow = i
lbVarCoeff = t.getItem(i, varToCol(ebVar))
# Update the row in which the variable that will leave the basis was
# found in the previous step
t.setItem(lbVarRow, 1, ebVar)
t.setItem(lbVarRow, varToCol(ebVar), 0)
t.setItem(lbVarRow, varToCol(lbVar), -1)
for j in xrange(2, t.getNumCols() + 1):
newVal = rational.Rational(t.getItem(lbVarRow, j)) / abs(lbVarCoeff)
t.setItem(lbVarRow, j, newVal)
# Update other rows in the appropriate part of the tableaux
for i in getRowNums(ebVar):
if t.getItem(i, varToCol(ebVar)) != 0:
for j in xrange(2, t.getNumCols() + 1):
newVal = t.getItem(i, j) + t.getItem(i, varToCol(ebVar)) *\
t.getItem(lbVarRow, j)
t.setItem(i, j, newVal)
t.setItem(i, varToCol(ebVar), 0)
return lbVar
def getEquilibrium(t, p1SCount):
"""Returns the equilibrium from the given tableaux. The returned result
might contain mixed strategies like (1/3, 0/1), so normalization is need to
be performed on the result.
t - tableaux (Matrix)
p1SCount - number of strategies of player 1 (number)
Preconditions:
- 0 < p1SCount < t.getNumRows()
- first column of the matrix must contain each number from 1 to
t.getNumRows (inclusive, in absolute value)
Raises ValueError if some of the preconditions are not met.
"""
# 1st precondition
if p1SCount < 0 or t.getNumRows() <= p1SCount:
raise ValueError, 'Invalid number of strategies of player 1.'
# 2nd precondition
firstColNums = []
for i in xrange(1, t.getNumRows() + 1):
firstColNums.append(abs(t.getItem(i, 1)))
for i in xrange(1, t.getNumRows() + 1):
if not i in firstColNums:
raise ValueError, 'Invalid indices in the first column of the tableaux.'
# I decided to use a list instead of a tuple, because I need
# to modify it (tuples are immutable)
eqs = t.getNumRows() * [0]
# Equilibrium is in the second column of the tableaux
for i in xrange(1, t.getNumRows() + 1):
# Strategy
strat = t.getItem(i, 1)
# Strategy probability
prob = t.getItem(i, 2)
# If the strategy index or the probability is lower than zero,
# set it to zero instead
eqs[abs(strat) - 1] = rational.Rational(0) if (strat < 0 or prob < 0) else prob
# Convert the found equilibrium into a tuple
return (tuple(eqs[0:p1SCount]), tuple(eqs[p1SCount:]))
def normalizeEquilibrium(eq):
"""Normalizes and returns the selected equilibrium (every probability
in a players mixed strategy will have the same denominator).
eq - equilibrium to be normalized (tuple of two tuples of Rationals)
Preconditions:
- len(eq) == 2 and len(eq[0] > 0) and len(eq[1]) > 0
- eq[x] must contain a non-empty tuple of Rationals for x in {1,2}
Raises ValueError if some of the preconditions are not met.
"""
# 1st precondition
if len(eq) != 2 or (len(eq[0]) == 0 or len(eq[1]) == 0):
raise ValueError, 'Selected equilibrium is not valid.'
# 2nd precondition
for i in xrange(0, 2):
for j in xrange(0, len(eq[i])):
if not isinstance(eq[i][j], rational.Rational):
raise ValueError, 'Selected equilibrium contains a ' +\
'non-rational number.'
# Normalizes a single part of the equilibrium (the normalization
# procedure is the same as with vectors)
def normalizeEqPart(eqPart):
probSum = reduce(lambda x, y: x + y, eqPart, 0)
return tuple(map(lambda x: x * probSum.recip(), eqPart))
return (normalizeEqPart(eq[0]), normalizeEqPart(eq[1]))
def lemkeHowson(m1, m2):
"""Runs the Lemke-Howson algorithm on the selected two matrices and
returns the found equilibrium in mixed strategies. The equilibrium
will be normalized | |
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit import prompt
import networkx as nx
import sys
import os
import re
from configurator import Configuration
from configurator import PREFIX_COMMAND_HANDLER, EXIT_COMMANDS, SINGLE_WORD_COMMANDS
from configurator import handlers
from configurator.configuration import IntrahostModel, FitnessModel, TransmissionModel
from configurator.validators import DirExistsValidator, StatementValidator
from configurator.sequence_generator import multiple_host_random_fasta as multihost
PROMPT = 'contagion> '
EXIT_COMANNDS = ['exit', 'exit()', 'quit', 'quit()', ]
SINGLEVALUE_COMMANDS = EXIT_COMANNDS + ['configure',]
SPECIAL_COMMANDS = ['create', 'reset']
OP_COMMANDS = ['set', ]
COMMANDS = OP_COMMANDS + EXIT_COMANNDS
CONFIG_PROPERTIES = list(Configuration().__dict__.keys())
def prompt_config_path():
default_path = os.path.join(os.getcwd(), 'config.toml')
config_path = prompt(
'Configuration file path [{}]: '.format(default_path),
validator=DirExistsValidator()
)
if not config_path:
config_path = default_path
return config_path
def configuration_wizard(config_obj, history=None):
num_generations = prompt('Number of generations: ')
num_instances = prompt('Number of simulation trials: ')
host_popsize = prompt('Host population size: ')
host_popsize = int(host_popsize)
epidemic_model_completer = WordCompleter([
'si', 'sir', 'sirs',
'sei', 'seis', 'seirs',
'endtrans', 'exchange'
])
epidemic_model = prompt('Epidemic model: ', completer=epidemic_model_completer)
coinfection = prompt('Allow coinfection [y/N]: ', default='N')
# Generate pathogens
if prompt('Do you want to generate pathogen sequences? [Y/n]: ', default='Y'):
pathogen_path = prompt('Pathogen sequences save path: ', history=history, validator=None)
num_sites = prompt('Length of pathogen sequence: ')
chars = prompt('Character states: ')
popsize = prompt('Number of pathogens per host: ')
char_dist = prompt('Probability of picking a character (ENTER if uniform probability): ')
char_dist = [float(f) for f in re.findall(r'\d*\.?\d+', char_dist)]
host_ids = prompt('Infected host IDs: ', history=history, validator=None)
host_ids = parse_host_ids(host_ids)
clonal_completer = WordCompleter([
'all', 'host', 'random',
])
clonal = prompt('Pathogen sequence identity (all|host|random): ', completer=clonal_completer)
# Generate
fasta_text = multihost(host_ids, int(popsize), chars,
int(num_sites), clonal, char_dist)
with open(pathogen_path, 'w') as f:
print(fasta_text, file=f)
else:
pathogen_path = prompt('Pathogen sequences path: ', history=history, validator=None)
# Generate network
if prompt('Do you want to generate a random network? [Y/n]: ', default='Y'):
host_network_path = prompt('Host network save path: ', history=history, validator=None)
network_completer = WordCompleter([
'gnp', 'binomial', 'erdos-renyi',
'barabasi-albert', 'scale-free',
'holme-kim', 'powerlaw_cluster',
'complete',
])
while True:
network_type = prompt('Network type: ', completer=network_completer)
n = host_popsize
if str(network_type).lower() in ['gnp', 'binomial', 'erdos-renyi']:
# gnp
# n (int) – The number of nodes.
# p (float) – Probability for edge creation.
p = prompt('Probability of an edge between two nodes p: ')
network = nx.fast_gnp_random_graph(n, float(p), directed=False)
with open(host_network_path, 'w') as f:
for a, row in network.adjacency():
for b in row:
print(a, b, 1.0, file=f)
break
elif str(network_type).lower() in ['barabasi-albert', 'scale-free']:
# ba
# n (int) – Number of nodes
# m (int) – Number of edges to attach from a new node to existing nodes
# error if m does not satisfy 1 <= m < n
m = prompt('Number of edges to attach from a new node to existing nodes m (1 <= m < n): ')
network = nx.barabasi_albert_graph(n, int(m))
with open(host_network_path, 'w') as f:
for a, row in network.adjacency():
for b in row:
print(a, b, 1.0, file=f)
break
elif str(network_type).lower() in ['holme-kim', 'powerlaw_cluster']:
#hk
# n (int) – the number of nodes
# m (int) – the number of random edges to add for each new node
# p (float,) – Probability of adding a triangle after adding a random edge
# error if m does not satisfy 1 <= m <= n or p does not satisfy 0 <= p <= 1
m = prompt('Number of random edges to add for each new node m (1 <= m <= n): ')
p = prompt('Probability of adding a triangle after adding a random edge p (0 <= p <= 1): ')
network = nx.powerlaw_cluster_graph(n, int(m), float(p))
with open(host_network_path, 'w') as f:
for a, row in network.adjacency():
for b in row:
print(a, b, 1.0, file=f)
break
elif str(network_type).lower() == 'complete':
# complete
network = nx.complete_graph(n)
with open(host_network_path, 'w') as f:
for a, row in network.adjacency():
for b in row:
print(a, b, 1.0, file=f)
break
else:
print('unrecognized network type')
else:
host_network_path = prompt('Host network path: ', history=history, validator=None)
config_obj.num_generations = int(num_generations)
config_obj.num_instances = int(num_instances)
config_obj.host_popsize = int(host_popsize)
config_obj.epidemic_model = epidemic_model
config_obj.coinfection = bool(coinfection)
config_obj.pathogen_path = pathogen_path
config_obj.host_network_path = host_network_path
num_intrahost_model = prompt('How many intrahost models do you want to create: ')
for i in range(int(num_intrahost_model)):
create_intrahost_model(epidemic_model, history=history)
num_fitness_model = prompt('How many fitness models do you want to create: ')
for i in range(int(num_fitness_model)):
create_fitness_model(history=history)
num_transmission_model = prompt('How many transmission models do you want to create: ')
for i in range(int(num_transmission_model)):
create_transmission_model(history=history)
if str(prompt('Do you want to save this configuration? [Y/n]: ', default='Y')).lower() == 'y':
config_obj.save()
def create_model(config_obj, text, history=None):
# Add model to config
model = text.split()[1]
if model == 'transmission_model':
model = create_transmission_model(history)
config_obj.transmission_model_dict[model.model_name] = model
elif model == 'fitness_model':
model = create_fitness_model(history)
config_obj.fitness_model_dict[model.model_name] = model
elif model == 'intrahost_model':
epidemic_model = config_obj.epidemic_model
if not config_obj.epidemic_model:
epidemic_model = set_epidemic_model(config_obj, history=history)
model = create_intrahost_model(epidemic_model, history)
config_obj.fitness_model_dict[model.model_name] = model
def create_fitness_model(history=None):
fitness_model_completer = WordCompleter(['multiplicative', 'additive'])
model_name = prompt('Model name: ', history=history, validator=None)
host_ids = prompt('Host IDs: ', history=history, validator=None)
host_ids = parse_host_ids(host_ids)
fitness_model = prompt('Fitness model: ', history=history, validator=None, completer=fitness_model_completer)
# Generate fitness model or pass existing
generate_model = prompt('Do you want to generate a fitness model? Y/n :', default='Y')
if str(generate_model).lower() == 'y':
fitness_model_path = prompt('Fitness model save path: ', history=history, validator=None)
# TODO: add validator for path
# generate fitness model
if str(prompt('Create neutral model [Y/n]: ', default='Y')).lower() == 'y':
num_sites = prompt('Number of sites: ')
num_variants = prompt('Number of potential states per site: ')
if fitness_model == 'multiplicative':
generate_neutral_fitness(int(num_sites), int(num_variants), fitness_model_path)
else:
growth_rate = prompt('Growth rate: ')
generate_additive_neutral_fitness(int(num_sites), int(num_variants), float(growth_rate), fitness_model_path)
else:
num_sites = prompt('Number of sites: ')
fitnesses = prompt('Enter list of fitness values: ')
if fitness_model == 'multiplicative':
generate_unipreference_fitness(int(num_sites), fitnesses, fitness_model_path)
else:
growth_rates = prompt('Enter list of growth rates: ')
generate_additive_unipreference_fitness(int(num_sites), growth_rates, fitness_model_path)
else:
fitness_model_path = prompt('Fitness model path: ', history=history, validator=None)
# Create model
model = FitnessModel()
model.model_name = model_name
model.host_ids = host_ids
model.fitness_model = fitness_model
model.fitness_model_path = fitness_model_path
return model
def create_transmission_model(history=None):
model_name = prompt('Model name: ', history=history, validator=None)
host_ids = prompt('Host IDs: ', history=history, validator=None)
host_ids = parse_host_ids(host_ids)
transmission_prob = prompt('Transmission probability: ', history=history, validator=None)
transmission_size = prompt('Transmission size: ', history=history, validator=None)
# Create model
model = TransmissionModel()
model.model_name = model_name
model.host_ids = host_ids
model.transmission_prob = transmission_prob
model.transmission_size = transmission_size
return model
def set_epidemic_model(config_obj, history=None):
models = [
'si', 'sis', 'sir', 'sirs', 'sei', 'seir', 'seirs',
'endtrans', 'exchange',
]
model_completer = WordCompleter(models)
text = prompt('Epidemic model: ', history=history, validator=None, completer=model_completer)
if text:
config_obj.epidemic_model = text
return text
def set_property(config_obj, text):
_, kv = text.split(None, 1)
name, value = re.split(r'\s*\=\s*', kv)
value = value.lstrip("'").lstrip('"').rstrip("'").rstrip('"')
config_obj.__setattr__(name, value)
# TODO: return a message confirming that the value was set
def return_property(config_obj, text):
if text in config_obj.__dict__:
v = config_obj.__getattribute__(text)
if v is None:
return 'None'
elif isinstance(v, str) and v == '':
return "''"
return v
return 'unknown configuration parameter'
def parse_host_ids(text):
generate_match = re.search(r'^\!\[\s*(\d+)\s*\,\s*(\d+)\s*\,?\s*(\d+)\s*?\,?\]$', text)
if generate_match:
start, end, skip = generate_match.groups()
return repr([i for i in range(int(start), int(end), int(skip))])
return map(int, re.findall(r'\d+', text))
def main(config_path=None, contagion_path='contagion'):
# Create configuration object
config_obj = Configuration(config_path=config_path, contagion_path=contagion_path)
# Instantiate history
history = InMemoryHistory()
# shell interface loop
while True:
try:
# Valid statements:
# configure
# run logger=<csv|sqlite> threads=<int>
# create intrahost_model|fitness_model|transmission_model <model_name>
# append intrahost_model|fitness_model|transmission_model <model_name>
# generate pathogens|network|fitness_matrix
# set <configuration property>=<value>
# get <configuration property>
# reset <configuration property>
# load configuration <path>
# save configuration <path>
# todb <basepath> <outpath>
# tocsv <basepath> <outpath>
# exit|exit()|quit|quit()|q
# clear
text = prompt(PROMPT, history=history, validator=StatementValidator())
except KeyboardInterrupt: # Ctrl+C
continue
except EOFError: # Ctrl+D
break
else:
if text:
# match with single-word commands
if text in SINGLE_WORD_COMMANDS:
if text in EXIT_COMMANDS:
pass
elif text == 'configure':
pass
elif text == 'clear':
pass
# match first word
elif text.split(None, 1) in PREFIX_COMMAND_HANDLER.keys():
args = [arg for arg in text.split(None)[1:] if '=' not in arg]
kwargs = dict([kwarg.split('=') for kwarg in text.split(None)[1:] if '=' in kwarg])
PREFIX_COMMAND_HANDLER[text.split(None, 1)](*args, config_obj=config_obj, **kwargs)
if __name__ == '__main__':
# TODO: Use click to get contagion path
contagion_path = '/Volumes/Data/golang/src/github.com/kentwait/contagiongo/contagion'
config_path = sys.argv[1] | |
#from numba import jit
from typing import List, Dict
from skdecide.builders.discrete_optimization.generic_tools.mip.pymip_tools import MyModelMilp
from skdecide.builders.discrete_optimization.vrp.vrp_toolbox import length
import networkx as nx
import os
import random
this_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(this_path)
import matplotlib.pyplot as plt
from mip import Model, CBC, MINIMIZE, xsum, BINARY, Var, GRB
from skdecide.builders.discrete_optimization.generic_tools.do_solver import SolverDO, ResultStorage
from skdecide.builders.discrete_optimization.generic_tools.do_problem import build_aggreg_function_and_params_objective, \
ParamsObjectiveFunction
from skdecide.builders.discrete_optimization.vrp.vrp_model import VrpProblem, VrpSolution, compute_length
from copy import deepcopy
from skdecide.builders.discrete_optimization.vrp.solver.lp_vrp_iterative import build_graph_pruned_vrp, compute_start_end_flows_info
def init_model_lp(g,
edges,
edges_in_customers,
edges_out_customers,
edges_in_merged_graph,
edges_out_merged_graph,
edges_warm_set,
do_lns: False,
fraction: float,
start_indexes: List[int],
end_indexes: List[int],
vehicle_count: int,
vehicle_capacity: List[float],
include_backward=True,
include_triangle=False, solver_name=CBC):
tsp_model = MyModelMilp("VRP-master", sense=MINIMIZE, solver_name=solver_name)
x_var = {} # decision variables on edges
constraint_on_edge = {}
edges_to_constraint = set()
if do_lns:
edges_to_constraint = set(random.sample(list(edges),
int(fraction*len(edges))))
for iedge in constraint_on_edge:
tsp_model.remove(constraint_on_edge[iedge])
iedge = 0
start = []
for e in edges:
x_var[e] = tsp_model.add_var(var_type=BINARY,
obj=g[e[0]][e[1]]["weight"],
name="x_"+str(e))
val = 0
if e in edges_warm_set:
start += [(x_var[e], 1)]
val = 1
else:
start += [(x_var[e], 0)]
if e in edges_to_constraint:
constraint_on_edge[iedge] = tsp_model.add_constr(x_var[e] == val,
name=str((e, iedge)))
iedge += 1
tsp_model.start = start
constraint_tour_2length = {}
cnt_tour = 0
if include_backward:
for edge in edges:
if (edge[1], edge[0]) in edges:
if (edge[1], edge[0]) == edge:
continue
if edge[0][1] == start_indexes[edge[0][0]] or edge[1][1] == start_indexes[edge[0][0]]:
continue
if edge[0][1] == end_indexes[edge[0][0]] or edge[1][1] == end_indexes[edge[0][0]]:
continue
constraint_tour_2length[cnt_tour] = tsp_model.add_constr(x_var[edge]+x_var[(edge[1],
edge[0])] <= 1,
name="tour_"+str(edge))
cnt_tour += 1
if include_triangle:
constraint_triangle = {}
for node in g.nodes():
neigh = set([n for n in nx.neighbors(g, node)])
neigh_2 = {nn: neigh.intersection([n for n in nx.neighbors(g, nn)]) for nn in neigh}
for node_neigh in neigh_2:
if len(neigh_2[node_neigh]) >= 1:
for node_neigh_neigh in neigh_2[node_neigh]:
constraint_triangle[cnt_tour] = tsp_model.add_constr(
x_var[(node, node_neigh)]+x_var[(node_neigh, node_neigh_neigh)]
+ x_var[(node_neigh_neigh, node)] <= 2
)
#tsp_model.update()
constraint_flow_in = {}
constraint_flow_out = {}
start_to_i, end_to_i = compute_start_end_flows_info(start_indexes, end_indexes)
for s in start_to_i:
for vehicle in start_to_i[s]["vehicle"]:
constraint_flow_out["start_v_"+str(vehicle)] = \
tsp_model.add_constr(xsum([x_var[e]
for e in edges_out_customers[s]
if e[0][0] == vehicle]) == 1,
name="start_v_" + str(vehicle))
for s in end_to_i:
for vehicle in end_to_i[s]["vehicle"]:
constraint_flow_in["end_v_"+str(vehicle)] = \
tsp_model.add_constr(xsum([x_var[e]
for e in edges_in_customers[s]
if e[0][0] == vehicle]) == 1,
name="end_v_" + str(vehicle))
for customer in edges_in_customers:
if customer in end_to_i or customer in start_to_i:
# Already dealt by previous constraints
continue
else:
constraint_flow_in[customer] = tsp_model.add_constr(xsum([x_var[e]
for e in edges_in_customers[customer]]) == 1,
name="in_"+str(customer))
# for customer in edges_out_customers:
# if customer in start_to_i or customer in end_to_i:
# # Already dealt by previous constraints
# continue
# else:
# constraint_flow_out[customer] = tsp_model.addConstr(quicksum([x_var[e]
# for e in edges_out_customers[customer]]) == 1,
# name="outs_"+str(customer))
c_flow = {}
for n in edges_in_merged_graph:
if start_indexes[n[0]] == end_indexes[n[0]] \
or n[1] not in [start_indexes[n[0]], end_indexes[n[0]]]:
c_flow[n] = tsp_model.add_constr(xsum([x_var[e] for e in edges_in_merged_graph[n]]
+ [-x_var[e] for e in edges_out_merged_graph[n]]) == 0,
name="flow_"+str(n))
for v in range(vehicle_count):
tsp_model.add_constr(xsum([g[e[0]][e[1]]["demand"]*x_var[e]
for e in edges if e[0][0] == v])
<= vehicle_capacity[v],
name="capa_"+str(v))
return tsp_model, x_var, constraint_flow_in, constraint_flow_out, constraint_on_edge
def update_graph(g, edges,
edges_in_customers,
edges_out_customers,
edges_in_merged_graph,
edges_out_merged_graph,
missing_edge,
customers):
for edge in missing_edge:
g.add_edge(edge[0], edge[1], weight=length(customers[edge[0][1]],
customers[edge[1][1]]),
demand=customers[edge[1][1]].demand)
g.add_edge(edge[1], edge[0], weight=length(customers[edge[0][1]],
customers[edge[1][1]]),
demand=customers[edge[0][1]].demand)
edges_in_merged_graph[edge[1]].add((edge[0], edge[1]))
edges_out_merged_graph[edge[0]].add((edge[0], edge[1]))
edges_in_customers[edge[1][1]].add((edge[0], edge[1]))
edges_out_customers[edge[0][1]].add((edge[0], edge[1]))
edges_in_merged_graph[edge[0]].add((edge[1], edge[0]))
edges_out_merged_graph[edge[1]].add((edge[1], edge[0]))
edges_in_customers[edge[0][1]].add((edge[1], edge[0]))
edges_out_customers[edge[1][1]].add((edge[1], edge[0]))
edges.add((edge[0], edge[1]))
edges.add((edge[1], edge[0]))
return g, edges, edges_in_customers, edges_out_customers, edges_in_merged_graph, edges_out_merged_graph
def build_warm_edges_and_update_graph(vrp_problem: VrpProblem,
vrp_solution: VrpSolution,
graph: nx.DiGraph,
edges: set,
edges_in_merged_graph,
edges_out_merged_graph,
edges_in_customers,
edges_out_customers):
vehicle_paths = deepcopy(vrp_solution.list_paths)
edges_warm = []
edges_warm_set = set()
for i in range(len(vehicle_paths)):
vehicle_paths[i] = [vrp_problem.start_indexes[i]]\
+ vehicle_paths[i]+[vrp_problem.end_indexes[i]]
edges_warm += [[((i, v1), (i, v2)) for v1, v2 in zip(vehicle_paths[i][:-1],
vehicle_paths[i][1:])]]
edges_warm_set.update(set(edges_warm[i]))
missing_edge = [e for e in set(edges_warm[i]) if e not in edges]
for edge in missing_edge:
graph.add_edge(edge[0], edge[1], weight=vrp_problem.evaluate_function_indexes(edge[0][1],
edge[1][1]),
demand=vrp_problem.customers[edge[1][1]].demand)
graph.add_edge(edge[1], edge[0], weight=vrp_problem.evaluate_function_indexes(edge[1][1],
edge[0][1]),
demand=vrp_problem.customers[edge[0][1]].demand)
edges_in_merged_graph[edge[1]].add((edge[0], edge[1]))
edges_out_merged_graph[edge[0]].add((edge[0], edge[1]))
edges_in_customers[edge[1][1]].add((edge[0], edge[1]))
edges_out_customers[edge[0][1]].add((edge[0], edge[1]))
edges_in_merged_graph[edge[0]].add((edge[1], edge[0]))
edges_out_merged_graph[edge[1]].add((edge[1], edge[0]))
edges_in_customers[edge[0][1]].add((edge[1], edge[0]))
edges_out_customers[edge[1][1]].add((edge[1], edge[0]))
edges.add((edge[0], edge[1]))
edges.add((edge[1], edge[0]))
return edges_warm, edges_warm_set
class VRPIterativeLP(SolverDO):
def __init__(self, problem: VrpProblem, params_objective_function: ParamsObjectiveFunction):
self.problem = problem
self.model = None
self.x_var = None
self.constraint_on_edge = None
self.aggreg_sol, self.aggreg_dict, self.params_objective_function = \
build_aggreg_function_and_params_objective(problem=self.problem,
params_objective_function=params_objective_function)
def init_model(self, **kwargs):
g, g_empty, edges_in_customers, edges_out_customers, \
edges_in_merged_graph, edges_out_merged_graph = \
build_graph_pruned_vrp(self.problem)
initial_solution = kwargs.get("initial_solution", None)
if initial_solution is None:
# solver = VrpORToolsSolver(self.problem)
# solver.init_model()
# solution, fit = solver.solve()
solution = self.problem.get_dummy_solution()
else:
vehicle_tours_b = initial_solution
solution = VrpSolution(problem=self.problem, list_start_index=self.problem.start_indexes,
list_end_index=self.problem.end_indexes,
list_paths=vehicle_tours_b,
length=None,
lengths=None,
capacities=None)
edges = set(g.edges())
edges_warm, edges_warm_set = build_warm_edges_and_update_graph(vrp_problem=self.problem,
vrp_solution=solution,
graph=g,
edges=edges,
edges_in_merged_graph=edges_in_merged_graph,
edges_out_merged_graph=edges_out_merged_graph,
edges_in_customers=edges_in_customers,
edges_out_customers=edges_out_customers)
print(edges_warm, edges_warm_set)
do_lns = kwargs.get("do_lns", False)
fraction = kwargs.get("fraction_lns", 0.9)
solver_name = kwargs.get("solver_name", CBC)
tsp_model, x_var, constraint_flow_in, constraint_flow_out, constraint_on_edge = \
init_model_lp(g=g,
edges=edges,
edges_in_customers=edges_in_customers,
edges_out_customers=edges_out_customers,
edges_in_merged_graph=edges_in_merged_graph,
edges_out_merged_graph=edges_out_merged_graph,
edges_warm_set=edges_warm_set,
start_indexes=self.problem.start_indexes,
end_indexes=self.problem.end_indexes,
do_lns=do_lns,
fraction=fraction,
vehicle_count=self.problem.vehicle_count,
vehicle_capacity=self.problem.vehicle_capacities,
solver_name=solver_name)
self.model = tsp_model
self.x_var = x_var
self.constraint_on_edge = constraint_on_edge
self.graph = g
self.graph_infos = {"edges": edges,
"edges_in_customers": edges_in_customers,
"edges_out_customers": edges_out_customers,
"edges_in_merged_graph": edges_in_merged_graph,
"edges_out_merged_graph": edges_out_merged_graph,
"edges_warm_set": edges_warm_set}
def solve(self, **kwargs):
solver_name = kwargs.get("solver_name", CBC)
do_lns = kwargs.get("do_lns", False)
fraction = kwargs.get("fraction_lns", 0.9)
nb_iteration_max = kwargs.get("nb_iteration_max", 20)
if self.model is None:
kwargs["solver_name"] = solver_name
kwargs["do_lns"] = do_lns
kwargs["fraction_lns"] = fraction
self.init_model(**kwargs)
print("optimizing...")
limit_time_s = kwargs.get("limit_time_s", 10)
self.model.optimize(max_seconds=limit_time_s)
objective = self.model.objective_value
# "C5t0ynWADsH8TEiH"
# Query number of multiple objectives, and number of solutions
finished = False
solutions = []
cost = []
nb_components = []
iteration = 0
rebuilt_solution = []
rebuilt_obj = []
best_solution_rebuilt_index = 0
best_solution_objective_rebuilt = float('inf')
vehicle_count = self.problem.vehicle_count
customers = self.problem.customers
customer_count = self.problem.customer_count
edges_in_customers = self.graph_infos["edges_in_customers"]
edges_out_customers = self.graph_infos["edges_out_customers"]
edges_in_merged_graph = self.graph_infos["edges_in_merged_graph"]
edges_out_merged_graph = self.graph_infos["edges_out_merged_graph"]
edges = self.graph_infos["edges"]
edges_warm_set = self.graph_infos["edges_warm_set"]
g = self.graph
while not finished:
solutions_ll = retreve_solutions(self.model,
self.x_var,
vehicle_count,
g)
solutions += [solutions_ll[0]["x_solution"]]
cost += [objective]
# print(solutions)
x_solution, rebuilt_dict, \
obj, components, components_global, component_all, component_global_all = \
reevaluate_solutions(solutions_ll, vehicle_count, g,
vrp_problem=self.problem)
# for components_per_vehicle in component_all:
# update_model(self.problem,
# self.model,
# self.x_var,
# components_per_vehicle,
# edges_in_customers,
# edges_out_customers)
for comp in component_global_all:
update_model_2(self.problem,
self.model,
self.x_var,
comp,
edges_in_customers,
edges_out_customers)
nb_components += [len(components_global)]
rebuilt_solution += [rebuilt_dict]
rebuilt_obj += [obj]
print('Objective rebuilt : ', rebuilt_obj[-1])
if obj < best_solution_objective_rebuilt:
best_solution_objective_rebuilt = obj
best_solution_rebuilt_index = iteration
iteration += 1
if len(component_global_all[0]) > 1 or True:
edges_to_add = set()
for v in rebuilt_dict:
edges_to_add.update({(e0, e1) for e0, e1 in zip(rebuilt_dict[v][:-1], rebuilt_dict[v][1:])})
# print("len rebuilt : ", len(rebuilt_dict[v]))
print("edges to add , ", edges_to_add)
edges_missing = {e for e in edges_to_add if e not in edges}
print("missing : ", edges_missing)
if len(edges_missing) > 0:
g, edges, edges_in_customers, edges_out_customers, edges_in_merged_graph, edges_out_merged_graph = \
update_graph(g, edges, edges_in_customers,
edges_out_customers,
edges_in_merged_graph,
edges_out_merged_graph,
edges_missing,
customers)
# self.model.reset()
self.model = None
tsp_model, x_var, constraint_flow_in, constraint_flow_out, constraint_on_edge = \
init_model_lp(g=g,
edges=edges,
edges_in_customers=edges_in_customers,
edges_out_customers=edges_out_customers,
edges_in_merged_graph=edges_in_merged_graph,
edges_out_merged_graph=edges_out_merged_graph,
edges_warm_set=edges_warm_set,
start_indexes=self.problem.start_indexes,
end_indexes=self.problem.end_indexes,
do_lns=do_lns,
fraction=fraction,
vehicle_count=self.problem.vehicle_count,
vehicle_capacity=self.problem.vehicle_capacities,
solver_name=solver_name)
self.model = tsp_model
self.x_var = x_var
self.constraint_on_edge = constraint_on_edge
self.graph = g
self.graph_infos = {"edges": edges,
"edges_in_customers": edges_in_customers,
"edges_out_customers": edges_out_customers,
"edges_in_merged_graph": edges_in_merged_graph,
"edges_out_merged_graph": edges_out_merged_graph,
"edges_warm_set": edges_warm_set}
edges_in_customers = self.graph_infos["edges_in_customers"]
edges_out_customers = self.graph_infos["edges_out_customers"]
edges_in_merged_graph = self.graph_infos["edges_in_merged_graph"]
edges_out_merged_graph = self.graph_infos["edges_out_merged_graph"]
edges = self.graph_infos["edges"]
edges_warm_set = self.graph_infos["edges_warm_set"]
for iedge in self.constraint_on_edge:
self.model.remove(self.constraint_on_edge[iedge])
self.model.update()
self.constraint_on_edge = {}
edges_to_constraint = set(self.x_var.keys())
if do_lns:
edges_to_constraint = set(random.sample(list(self.x_var.keys()),
int(fraction * len(self.x_var))))
for iedge in self.constraint_on_edge:
self.model.remove(self.constraint_on_edge[iedge])
self.model.update()
self.constraint_on_edge = {}
edges_to_constraint = set()
vehicle = set(random.sample(range(vehicle_count), min(4, vehicle_count)))
edges_to_constraint.update(set([e for e in edges if e[0][0] not in vehicle]))
# customers_to_constraint = set(random.sample(range(1, customer_count),
# int(fraction * customer_count)))
# edges_to_constraint.update(set([edge for edge in edges
# if (edge[0][1] in customers_to_constraint
# or edge[1][1] in customers_to_constraint) ]))
print(len(edges_to_constraint), " edges constraint over ", len(edges))
# print(rebuilt[0], rebuilt[-1])
# print("len set rebuilt (debug) ", len(set(rebuilt_dict[v])))
iedge = 0
x_var = self.x_var
start = []
if all((e in edges) for e in edges_to_add):
# print("setting default value")
for e in x_var:
val = 0
if e in edges_to_add:
start += [(x_var[e], 1)]
val = 1
else:
start += [(x_var[e], 0)]
if e in edges_to_constraint:
if do_lns:
self.constraint_on_edge[iedge] = self.model.add_constr(x_var[e] == val,
name=str((e, iedge)))
iedge += 1
self.model.update()
else:
pass
# print([e for e in edges_to_add if e not in edges])
self.model.start = start
self.model.optimize(max_seconds=limit_time_s)
objective = self.model.objective_value
else:
finished = True
finished = finished or iteration >= nb_iteration_max
plot = kwargs.get("plot", True)
if plot:
fig, ax = plt.subplots(2)
for i in range(len(solutions)):
ll = []
for v in solutions[i]:
| |
networks: An array of one or more networks to attach to the
instance. The network object structure is documented below. Changing this
creates a new server.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstancePersonalityArgs']]]] personalities: Customize the personality of an instance by
defining one or more files and their contents. The personality structure
is described below.
:param pulumi.Input[str] power_state: Provide the VM state. Only 'active' and 'shutoff'
are supported values. *Note*: If the initial power_state is the shutoff
the VM will be stopped immediately after build and the provisioners like
remote-exec or files are not supported.
:param pulumi.Input[str] region: The region in which to create the server instance. If
omitted, the `region` argument of the provider is used. Changing this
creates a new server.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceSchedulerHintArgs']]]] scheduler_hints: Provide the Nova scheduler with hints on how
the instance should be launched. The available hints are described below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_groups: An array of one or more security group names
to associate with the server. Changing this results in adding/removing
security groups from the existing server. *Note*: When attaching the
instance to networks using Ports, place the security groups on the Port
and not the instance. *Note*: Names should be used and not ids, as ids
trigger unnecessary updates.
:param pulumi.Input[bool] stop_before_destroy: Whether to try stop instance gracefully
before destroying it, thus giving chance for guest OS daemons to stop correctly.
If instance doesn't stop within timeout, it will be destroyed anyway.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the instance. Changing this
updates the existing instance tags.
:param pulumi.Input[str] user_data: The user data to provide when launching the instance.
Changing this creates a new server.
:param pulumi.Input[pulumi.InputType['InstanceVendorOptionsArgs']] vendor_options: Map of additional vendor-specific options.
Supported options are described below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[InstanceArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Instance resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param InstanceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_ip_v4: Optional[pulumi.Input[str]] = None,
access_ip_v6: Optional[pulumi.Input[str]] = None,
admin_pass: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
availability_zone_hints: Optional[pulumi.Input[str]] = None,
block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceBlockDeviceArgs']]]]] = None,
config_drive: Optional[pulumi.Input[bool]] = None,
flavor_id: Optional[pulumi.Input[str]] = None,
flavor_name: Optional[pulumi.Input[str]] = None,
floating_ip: Optional[pulumi.Input[str]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
image_id: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
key_pair: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
network_mode: Optional[pulumi.Input[str]] = None,
networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceNetworkArgs']]]]] = None,
personalities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstancePersonalityArgs']]]]] = None,
power_state: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
scheduler_hints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceSchedulerHintArgs']]]]] = None,
security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stop_before_destroy: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_data: Optional[pulumi.Input[str]] = None,
vendor_options: Optional[pulumi.Input[pulumi.InputType['InstanceVendorOptionsArgs']]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceVolumeArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceArgs.__new__(InstanceArgs)
__props__.__dict__["access_ip_v4"] = access_ip_v4
__props__.__dict__["access_ip_v6"] = access_ip_v6
__props__.__dict__["admin_pass"] = <PASSWORD>
__props__.__dict__["availability_zone"] = availability_zone
__props__.__dict__["availability_zone_hints"] = availability_zone_hints
__props__.__dict__["block_devices"] = block_devices
__props__.__dict__["config_drive"] = config_drive
__props__.__dict__["flavor_id"] = flavor_id
__props__.__dict__["flavor_name"] = flavor_name
if floating_ip is not None and not opts.urn:
warnings.warn("""Use the openstack_compute_floatingip_associate_v2 resource instead""", DeprecationWarning)
pulumi.log.warn("""floating_ip is deprecated: Use the openstack_compute_floatingip_associate_v2 resource instead""")
__props__.__dict__["floating_ip"] = floating_ip
__props__.__dict__["force_delete"] = force_delete
__props__.__dict__["image_id"] = image_id
__props__.__dict__["image_name"] = image_name
__props__.__dict__["key_pair"] = key_pair
__props__.__dict__["metadata"] = metadata
__props__.__dict__["name"] = name
__props__.__dict__["network_mode"] = network_mode
__props__.__dict__["networks"] = networks
__props__.__dict__["personalities"] = personalities
__props__.__dict__["power_state"] = power_state
__props__.__dict__["region"] = region
__props__.__dict__["scheduler_hints"] = scheduler_hints
__props__.__dict__["security_groups"] = security_groups
__props__.__dict__["stop_before_destroy"] = stop_before_destroy
__props__.__dict__["tags"] = tags
__props__.__dict__["user_data"] = user_data
__props__.__dict__["vendor_options"] = vendor_options
if volumes is not None and not opts.urn:
warnings.warn("""Use block_device or openstack_compute_volume_attach_v2 instead""", DeprecationWarning)
pulumi.log.warn("""volumes is deprecated: Use block_device or openstack_compute_volume_attach_v2 instead""")
__props__.__dict__["volumes"] = volumes
__props__.__dict__["all_metadata"] = None
__props__.__dict__["all_tags"] = None
super(Instance, __self__).__init__(
'openstack:compute/instance:Instance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_ip_v4: Optional[pulumi.Input[str]] = None,
access_ip_v6: Optional[pulumi.Input[str]] = None,
admin_pass: Optional[pulumi.Input[str]] = None,
all_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
all_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
availability_zone_hints: Optional[pulumi.Input[str]] = None,
block_devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceBlockDeviceArgs']]]]] = None,
config_drive: Optional[pulumi.Input[bool]] = None,
flavor_id: Optional[pulumi.Input[str]] = None,
flavor_name: Optional[pulumi.Input[str]] = None,
floating_ip: Optional[pulumi.Input[str]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
image_id: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
key_pair: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
network_mode: Optional[pulumi.Input[str]] = None,
networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceNetworkArgs']]]]] = None,
personalities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstancePersonalityArgs']]]]] = None,
power_state: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
scheduler_hints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceSchedulerHintArgs']]]]] = None,
security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stop_before_destroy: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_data: Optional[pulumi.Input[str]] = None,
vendor_options: Optional[pulumi.Input[pulumi.InputType['InstanceVendorOptionsArgs']]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceVolumeArgs']]]]] = None) -> 'Instance':
"""
Get an existing Instance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_ip_v4: The first detected Fixed IPv4 address.
:param pulumi.Input[str] access_ip_v6: The first detected Fixed IPv6 address.
:param pulumi.Input[str] admin_pass: The administr<PASSWORD> password to assign to the server.
Changing this changes the root password on the existing server.
:param pulumi.Input[Sequence[pulumi.Input[str]]] all_tags: The collection of tags assigned on the instance, which have
been explicitly and implicitly added.
:param pulumi.Input[str] availability_zone: The availability zone in which to create
the server. Conflicts with `availability_zone_hints`. Changing this creates
a new server.
:param pulumi.Input[str] availability_zone_hints: The availability zone in which to
create the server. This argument is preferred to `availability_zone`, when
scheduling the server on a
[particular](https://docs.openstack.org/nova/latest/admin/availability-zones.html)
host or node. Conflicts with `availability_zone`. Changing this creates a
new server.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceBlockDeviceArgs']]]] block_devices: Configuration of block devices. The block_device
structure is documented below. Changing this creates a new server.
You can specify multiple block devices which will create an instance with
multiple disks. This configuration is very flexible, so please see the
following [reference](https://docs.openstack.org/nova/latest/user/block-device-mapping.html)
for more information.
:param pulumi.Input[bool] config_drive: Whether to use the config_drive feature to
configure the instance. Changing this creates a new server.
:param pulumi.Input[str] flavor_id: The flavor ID of
the desired flavor for the server. Changing this resizes the existing server.
:param pulumi.Input[str] flavor_name: The name of the
desired flavor for the server. Changing this resizes the existing server.
:param pulumi.Input[bool] force_delete: Whether to force the OpenStack instance to be
forcefully deleted. This is useful for environments that have reclaim / soft
deletion enabled.
:param pulumi.Input[str] image_id: (Optional; Required if `image_name` is empty and not booting
from a volume. Do not specify if booting from a volume.) The image ID of
the desired image for the server. Changing this creates a new server.
:param pulumi.Input[str] image_name: (Optional; Required if `image_id` is empty and not booting
from a volume. Do not specify if booting from a volume.) The name of the
desired image for the server. Changing this creates a new server.
:param pulumi.Input[str] key_pair: The name of a key pair to put on the server. The key
pair must already be created and associated with the tenant's account.
Changing this creates a new server.
:param pulumi.Input[Mapping[str, Any]] metadata: Metadata key/value pairs to make available from
within the instance. Changing this updates the existing server metadata.
:param pulumi.Input[str] name: The human-readable
name of the network. Changing this creates a new server.
:param pulumi.Input[str] network_mode: Special string for `network` option to create
the server. `network_mode` can be `"auto"` or `"none"`.
Please see the following [reference](https://docs.openstack.org/api-ref/compute/?expanded=create-server-detail#id11) | |
<gh_stars>1000+
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from sg2im.utils import timeit
"""
Functions for performing differentiable bilinear cropping of images, for use in
the object discriminator
"""
def crop_bbox_batch(feats, bbox, bbox_to_feats, HH, WW=None, backend='cudnn'):
"""
Inputs:
- feats: FloatTensor of shape (N, C, H, W)
- bbox: FloatTensor of shape (B, 4) giving bounding box coordinates
- bbox_to_feats: LongTensor of shape (B,) mapping boxes to feature maps;
each element is in the range [0, N) and bbox_to_feats[b] = i means that
bbox[b] will be cropped from feats[i].
- HH, WW: Size of the output crops
Returns:
- crops: FloatTensor of shape (B, C, HH, WW) where crops[i] uses bbox[i] to
crop from feats[bbox_to_feats[i]].
"""
if backend == 'cudnn':
return crop_bbox_batch_cudnn(feats, bbox, bbox_to_feats, HH, WW)
N, C, H, W = feats.size()
B = bbox.size(0)
if WW is None: WW = HH
dtype, device = feats.dtype, feats.device
crops = torch.zeros(B, C, HH, WW, dtype=dtype, device=device)
for i in range(N):
idx = (bbox_to_feats.data == i).nonzero()
if idx.dim() == 0:
continue
idx = idx.view(-1)
n = idx.size(0)
cur_feats = feats[i].view(1, C, H, W).expand(n, C, H, W).contiguous()
cur_bbox = bbox[idx]
cur_crops = crop_bbox(cur_feats, cur_bbox, HH, WW)
crops[idx] = cur_crops
return crops
def _invperm(p):
N = p.size(0)
eye = torch.arange(0, N).type_as(p)
pp = (eye[:, None] == p).nonzero()[:, 1]
return pp
def crop_bbox_batch_cudnn(feats, bbox, bbox_to_feats, HH, WW=None):
N, C, H, W = feats.size()
B = bbox.size(0)
if WW is None: WW = HH
dtype = feats.data.type()
feats_flat, bbox_flat, all_idx = [], [], []
for i in range(N):
idx = (bbox_to_feats.data == i).nonzero()
if idx.dim() == 0:
continue
idx = idx.view(-1)
n = idx.size(0)
cur_feats = feats[i].view(1, C, H, W).expand(n, C, H, W).contiguous()
cur_bbox = bbox[idx]
feats_flat.append(cur_feats)
bbox_flat.append(cur_bbox)
all_idx.append(idx)
feats_flat = torch.cat(feats_flat, dim=0)
bbox_flat = torch.cat(bbox_flat, dim=0)
crops = crop_bbox(feats_flat, bbox_flat, HH, WW, backend='cudnn')
# If the crops were sequential (all_idx is identity permutation) then we can
# simply return them; otherwise we need to permute crops by the inverse
# permutation from all_idx.
all_idx = torch.cat(all_idx, dim=0)
eye = torch.arange(0, B).type_as(all_idx)
if (all_idx == eye).all():
return crops
return crops[_invperm(all_idx)]
def crop_bbox(feats, bbox, HH, WW=None, backend='cudnn'):
"""
Take differentiable crops of feats specified by bbox.
Inputs:
- feats: Tensor of shape (N, C, H, W)
- bbox: Bounding box coordinates of shape (N, 4) in the format
[x0, y0, x1, y1] in the [0, 1] coordinate space.
- HH, WW: Size of the output crops.
Returns:
- crops: Tensor of shape (N, C, HH, WW) where crops[i] is the portion of
feats[i] specified by bbox[i], reshaped to (HH, WW) using bilinear sampling.
"""
N = feats.size(0)
assert bbox.size(0) == N
assert bbox.size(1) == 4
if WW is None: WW = HH
if backend == 'cudnn':
# Change box from [0, 1] to [-1, 1] coordinate system
bbox = 2 * bbox - 1
x0, y0 = bbox[:, 0], bbox[:, 1]
x1, y1 = bbox[:, 2], bbox[:, 3]
X = tensor_linspace(x0, x1, steps=WW).view(N, 1, WW).expand(N, HH, WW)
Y = tensor_linspace(y0, y1, steps=HH).view(N, HH, 1).expand(N, HH, WW)
if backend == 'jj':
return bilinear_sample(feats, X, Y)
elif backend == 'cudnn':
grid = torch.stack([X, Y], dim=3)
return F.grid_sample(feats, grid)
def uncrop_bbox(feats, bbox, H, W=None, fill_value=0):
"""
Inverse operation to crop_bbox; construct output images where the feature maps
from feats have been reshaped and placed into the positions specified by bbox.
Inputs:
- feats: Tensor of shape (N, C, HH, WW)
- bbox: Bounding box coordinates of shape (N, 4) in the format
[x0, y0, x1, y1] in the [0, 1] coordinate space.
- H, W: Size of output.
- fill_value: Portions of the output image that are outside the bounding box
will be filled with this value.
Returns:
- out: Tensor of shape (N, C, H, W) where the portion of out[i] given by
bbox[i] contains feats[i], reshaped using bilinear sampling.
"""
N, C = feats.size(0), feats.size(1)
assert bbox.size(0) == N
assert bbox.size(1) == 4
if W is None: H = W
x0, y0 = bbox[:, 0], bbox[:, 1]
x1, y1 = bbox[:, 2], bbox[:, 3]
ww = x1 - x0
hh = y1 - y0
x0 = x0.contiguous().view(N, 1).expand(N, H)
x1 = x1.contiguous().view(N, 1).expand(N, H)
ww = ww.view(N, 1).expand(N, H)
y0 = y0.contiguous().view(N, 1).expand(N, W)
y1 = y1.contiguous().view(N, 1).expand(N, W)
hh = hh.view(N, 1).expand(N, W)
X = torch.linspace(0, 1, steps=W).view(1, W).expand(N, W).to(feats)
Y = torch.linspace(0, 1, steps=H).view(1, H).expand(N, H).to(feats)
X = (X - x0) / ww
Y = (Y - y0) / hh
# For ByteTensors, (x + y).clamp(max=1) gives logical_or
X_out_mask = ((X < 0) + (X > 1)).view(N, 1, W).expand(N, H, W)
Y_out_mask = ((Y < 0) + (Y > 1)).view(N, H, 1).expand(N, H, W)
out_mask = (X_out_mask + Y_out_mask).clamp(max=1)
out_mask = out_mask.view(N, 1, H, W).expand(N, C, H, W)
X = X.view(N, 1, W).expand(N, H, W)
Y = Y.view(N, H, 1).expand(N, H, W)
out = bilinear_sample(feats, X, Y)
out[out_mask] = fill_value
return out
def bilinear_sample(feats, X, Y):
"""
Perform bilinear sampling on the features in feats using the sampling grid
given by X and Y.
Inputs:
- feats: Tensor holding input feature map, of shape (N, C, H, W)
- X, Y: Tensors holding x and y coordinates of the sampling
grids; both have shape shape (N, HH, WW) and have elements in the range [0, 1].
Returns:
- out: Tensor of shape (B, C, HH, WW) where out[i] is computed
by sampling from feats[idx[i]] using the sampling grid (X[i], Y[i]).
"""
N, C, H, W = feats.size()
assert X.size() == Y.size()
assert X.size(0) == N
_, HH, WW = X.size()
X = X.mul(W)
Y = Y.mul(H)
# Get the x and y coordinates for the four samples
x0 = X.floor().clamp(min=0, max=W-1)
x1 = (x0 + 1).clamp(min=0, max=W-1)
y0 = Y.floor().clamp(min=0, max=H-1)
y1 = (y0 + 1).clamp(min=0, max=H-1)
# In numpy we could do something like feats[i, :, y0, x0] to pull out
# the elements of feats at coordinates y0 and x0, but PyTorch doesn't
# yet support this style of indexing. Instead we have to use the gather
# method, which only allows us to index along one dimension at a time;
# therefore we will collapse the features (BB, C, H, W) into (BB, C, H * W)
# and index along the last dimension. Below we generate linear indices into
# the collapsed last dimension for each of the four combinations we need.
y0x0_idx = (W * y0 + x0).view(N, 1, HH * WW).expand(N, C, HH * WW)
y1x0_idx = (W * y1 + x0).view(N, 1, HH * WW).expand(N, C, HH * WW)
y0x1_idx = (W * y0 + x1).view(N, 1, HH * WW).expand(N, C, HH * WW)
y1x1_idx = (W * y1 + x1).view(N, 1, HH * WW).expand(N, C, HH * WW)
# Actually use gather to pull out the values from feats corresponding
# to our four samples, then reshape them to (BB, C, HH, WW)
feats_flat = feats.view(N, C, H * W)
v1 = feats_flat.gather(2, y0x0_idx.long()).view(N, C, HH, WW)
v2 = feats_flat.gather(2, y1x0_idx.long()).view(N, C, HH, WW)
v3 = feats_flat.gather(2, y0x1_idx.long()).view(N, C, HH, WW)
v4 = feats_flat.gather(2, y1x1_idx.long()).view(N, C, HH, WW)
# Compute the weights for the four samples
w1 = ((x1 - X) * (y1 - Y)).view(N, 1, HH, WW).expand(N, C, HH, WW)
w2 = ((x1 - X) * (Y - y0)).view(N, 1, HH, WW).expand(N, C, HH, WW)
w3 = ((X - | |
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
"""
This module transforms data-parallel operations such as Numpy calls into
'Parfor' nodes, which are nested loops that can be parallelized.
It also implements optimizations such as loop fusion, and extends the rest of
compiler analysis and optimizations to support Parfors.
This is similar to ParallelAccelerator package in Julia:
https://github.com/IntelLabs/ParallelAccelerator.jl
'Parallelizing Julia with a Non-invasive DSL', <NAME> et al., ECOOP'17.
"""
from __future__ import print_function, division, absolute_import
import types as pytypes # avoid confusion with numba.types
import sys
from numba import ir, ir_utils, types, typing, rewrites, config, analysis
from numba import array_analysis, postproc, typeinfer
from numba.ir_utils import (
mk_unique_var,
next_label,
mk_alloc,
get_np_ufunc_typ,
mk_range_block,
mk_loop_header,
find_op_typ,
get_name_var_table,
replace_vars,
visit_vars,
visit_vars_inner,
remove_dels,
remove_dead,
copy_propagate,
get_block_copies,
apply_copy_propagate,
dprint_func_ir,
find_topo_order,
get_stmt_writes,
rename_labels,
get_call_table,
simplify_CFG)
from numba.analysis import (compute_use_defs, compute_live_map,
compute_dead_maps, compute_cfg_from_blocks)
from numba.controlflow import CFGraph
from numba.typing import npydecl, signature
from numba.types.functions import Function
import copy
import numpy
# circular dependency: import numba.npyufunc.dufunc.DUFunc
sequential_parfor_lowering = False
class prange(object):
def __new__(cls, *args):
return range(*args)
_reduction_ops = {
'sum': ('+=', '+', 0),
'dot': ('+=', '+', 0),
'prod': ('*=', '*', 1),
}
class LoopNest(object):
'''The LoopNest class holds information of a single loop including
the index variable (of a non-negative integer value), and the
range variable, e.g. range(r) is 0 to r-1 with step size 1.
'''
def __init__(self, index_variable, start, stop, step, correlation=-1):
self.index_variable = index_variable
self.start = start
self.stop = stop
self.step = step
self.correlation = correlation
def __repr__(self):
return ("LoopNest(index_variable={}, range={},{},{} correlation={})".
format(self.index_variable, self.start, self.stop, self.step,
self.correlation))
class Parfor(ir.Expr, ir.Stmt):
id_counter = 0
def __init__(
self,
loop_nests,
init_block,
loop_body,
loc,
array_analysis,
index_var):
super(Parfor, self).__init__(
op='parfor',
loc=loc
)
self.id = type(self).id_counter
type(self).id_counter += 1
#self.input_info = input_info
#self.output_info = output_info
self.loop_nests = loop_nests
self.init_block = init_block
self.loop_body = loop_body
self.array_analysis = array_analysis
self.index_var = index_var
self.params = None # filled right before parallel lowering
def __repr__(self):
return repr(self.loop_nests) + \
repr(self.loop_body) + repr(self.index_var)
def list_vars(self):
"""list variables used (read/written) in this parfor by
traversing the body and combining block uses.
"""
all_uses = []
for l, b in self.loop_body.items():
for stmt in b.body:
all_uses += stmt.list_vars()
for loop in self.loop_nests:
all_uses.append(loop.index_variable)
if isinstance(loop.start, ir.Var):
all_uses.append(loop.start)
if isinstance(loop.stop, ir.Var):
all_uses.append(loop.stop)
if isinstance(loop.step, ir.Var):
all_uses.append(loop.step)
for stmt in self.init_block.body:
all_uses += stmt.list_vars()
return all_uses
def dump(self, file=None):
file = file or sys.stdout
print(("begin parfor {}".format(self.id)).center(20, '-'), file=file)
print("index_var = ", self.index_var)
for loopnest in self.loop_nests:
print(loopnest, file=file)
print("init block:", file=file)
self.init_block.dump()
for offset, block in sorted(self.loop_body.items()):
print('label %s:' % (offset,), file=file)
block.dump(file)
print(("end parfor {}".format(self.id)).center(20, '-'), file=file)
class ParforPass(object):
"""ParforPass class is responsible for converting Numpy
calls in Numba intermediate representation to Parfors, which
will lower into either sequential or parallel loops during lowering
stage.
"""
def __init__(self, func_ir, typemap, calltypes, return_type):
self.func_ir = func_ir
self.typemap = typemap
self.calltypes = calltypes
self.return_type = return_type
self.array_analysis = array_analysis.ArrayAnalysis(func_ir, typemap,
calltypes)
ir_utils._max_label = max(func_ir.blocks.keys())
def _has_known_shape(self, var):
"""Return True if the given variable has fully known shape in array_analysis.
"""
if isinstance(
var,
ir.Var) and var.name in self.array_analysis.array_shape_classes:
var_shapes = self.array_analysis.array_shape_classes[var.name]
# 0-dimensional arrays (have [] as shape) shouldn't be parallelized
return len(var_shapes) > 0 and not (-1 in var_shapes)
return False
def run(self):
"""run parfor conversion pass: replace Numpy calls
with Parfors when possible and optimize the IR."""
# remove Del statements for easier optimization
remove_dels(self.func_ir.blocks)
self.array_analysis.run()
simplify_CFG(self.func_ir.blocks)
self._convert_prange(self.func_ir.blocks)
self._convert_numpy(self.func_ir.blocks)
dprint_func_ir(self.func_ir, "after parfor pass")
# get copies in to blocks and out from blocks
in_cps, out_cps = copy_propagate(self.func_ir.blocks, self.typemap)
# table mapping variable names to ir.Var objects to help replacement
name_var_table = get_name_var_table(self.func_ir.blocks)
apply_copy_propagate(
self.func_ir.blocks,
in_cps,
name_var_table,
array_analysis.copy_propagate_update_analysis,
self.array_analysis,
self.typemap,
self.calltypes)
# remove dead code to enable fusion
remove_dead(self.func_ir.blocks, self.func_ir.arg_names)
#dprint_func_ir(self.func_ir, "after remove_dead")
# reorder statements to maximize fusion
maximize_fusion(self.func_ir.blocks)
fuse_parfors(self.func_ir.blocks)
# remove dead code after fusion to remove extra arrays and variables
remove_dead(self.func_ir.blocks, self.func_ir.arg_names)
#dprint_func_ir(self.func_ir, "after second remove_dead")
# push function call variables inside parfors so gufunc function
# wouldn't need function variables as argument
push_call_vars(self.func_ir.blocks, {}, {})
remove_dead(self.func_ir.blocks, self.func_ir.arg_names)
# after optimization, some size variables are not available anymore
remove_dead_class_sizes(self.func_ir.blocks, self.array_analysis)
dprint_func_ir(self.func_ir, "after optimization")
if config.DEBUG_ARRAY_OPT == 1:
print("variable types: ", sorted(self.typemap.items()))
print("call types: ", self.calltypes)
# run post processor again to generate Del nodes
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if self.func_ir.is_generator:
fix_generator_types(self.func_ir.generator_info, self.return_type,
self.typemap)
if sequential_parfor_lowering:
lower_parfor_sequential(self.func_ir, self.typemap, self.calltypes)
else:
# prepare for parallel lowering
# add parfor params to parfors here since lowering is destructive
# changing the IR after this is not allowed
get_parfor_params(self.func_ir.blocks)
return
def _convert_numpy(self, blocks):
topo_order = find_topo_order(blocks)
# variables available in the program so far (used for finding map
# functions in array_expr lowering)
avail_vars = []
for label in topo_order:
block = blocks[label]
new_body = []
for instr in block.body:
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target
# only translate C order since we can't allocate F
if self._has_known_shape(
lhs) and self._is_C_order(lhs.name):
if self._is_supported_npycall(expr):
instr = self._numpy_to_parfor(lhs, expr)
elif isinstance(expr, ir.Expr) and expr.op == 'arrayexpr':
instr = self._arrayexpr_to_parfor(
lhs, expr, avail_vars)
elif self._is_supported_npyreduction(expr):
instr = self._reduction_to_parfor(lhs, expr)
avail_vars.append(lhs.name)
new_body.append(instr)
block.body = new_body
def _convert_prange(self, blocks):
call_table, _ = get_call_table(blocks)
cfg = compute_cfg_from_blocks(blocks)
for loop in cfg.loops().values():
if len(loop.entries) != 1 or len(loop.exits) != 1:
continue
entry = list(loop.entries)[0]
for inst in blocks[entry].body:
# if prange call
if (isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Expr)
and inst.value.op == 'call'
and self._is_prange(inst.value.func.name, call_table)):
body_labels = list(loop.body - {loop.header})
args = inst.value.args
# find loop index variable (pair_first in header block)
for stmt in blocks[loop.header].body:
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == 'pair_first'):
loop_index = stmt.target.name
break
# loop_index may be assigned to other vars
# get header copies to find all of them
cps, _ = get_block_copies({0: blocks[loop.header]},
self.typemap)
cps = cps[0]
loop_index_vars = set(t for t, v in cps if v == loop_index)
loop_index_vars.add(loop_index)
start = 0
step = 1
size_var = args[0]
if len(args) == 2:
start = args[0]
size_var = args[1]
if len(args) == 3:
start = args[0]
size_var = args[1]
try:
step = self.func_ir.get_definition(args[2])
except KeyError:
raise NotImplementedError(
"Only known step size is supported for prange")
if not isinstance(step, ir.Const):
raise NotImplementedError(
"Only constant step size is supported for prange")
step = step.value
if step != 1:
raise NotImplementedError(
"Only constant step size of 1 is supported for prange")
# set l=l for dead remove
inst.value = inst.target
scope = blocks[entry].scope
loc = inst.loc
init_block = ir.Block(scope, loc)
body = {l: blocks[l] for l in body_labels}
index_var = ir.Var(
scope, mk_unique_var("parfor_index"), loc)
self.typemap[index_var.name] = types.intp
index_var_map = {v: index_var for v in loop_index_vars}
replace_vars(body, index_var_map)
# TODO: find correlation
parfor_loop = LoopNest(
index_var, start, size_var, step, -1)
parfor = Parfor([parfor_loop], init_block, body, loc,
self.array_analysis, index_var)
# add parfor to entry block, change jump target to exit
jump = blocks[entry].body.pop()
blocks[entry].body.append(parfor)
jump.target = list(loop.exits)[0]
blocks[entry].body.append(jump)
# remove jumps back to header block
for l in body_labels:
last_inst = body[l].body[-1]
if isinstance(
last_inst,
ir.Jump) and last_inst.target == loop.header:
body[l].body.pop()
# remove loop blocks from top level dict
blocks.pop(loop.header)
for l in body_labels:
blocks.pop(l)
# run on parfor body
parfor_blocks = wrap_parfor_blocks(parfor)
self._convert_prange(parfor_blocks)
self._convert_numpy(parfor_blocks)
unwrap_parfor_blocks(parfor, parfor_blocks)
# run convert again to handle other prange loops
return self._convert_prange(blocks)
def _is_prange(self, func_var, call_table):
# prange can be either getattr (numba.prange) or global (prange)
if func_var not in call_table:
return False
call = call_table[func_var]
return call[0] == 'prange' or call[0] == prange
def _is_C_order(self, arr_name):
typ = self.typemap[arr_name]
assert isinstance(typ, types.npytypes.Array)
return typ.layout == 'C'
def _make_index_var(self, scope, index_vars, body_block):
ndims = len(index_vars)
if ndims > 1:
loc = body_block.loc
tuple_var = ir.Var(scope, mk_unique_var(
"$parfor_index_tuple_var"), loc)
self.typemap[tuple_var.name] = types.containers.UniTuple(
types.intp, ndims)
tuple_call = ir.Expr.build_tuple(list(index_vars), loc)
tuple_assign = ir.Assign(tuple_call, tuple_var, loc)
body_block.body.append(tuple_assign)
return tuple_var, types.containers.UniTuple(types.intp, ndims)
else:
return index_vars[0], types.intp
def _arrayexpr_to_parfor(self, lhs, arrayexpr, avail_vars):
"""generate parfor from arrayexpr node, which is essentially a
map with recursive tree.
"""
scope = lhs.scope
loc = lhs.loc
expr = arrayexpr.expr
arr_typ = self.typemap[lhs.name]
el_typ = arr_typ.dtype
# generate loopnests and size variables from lhs correlations
loopnests = []
size_vars = []
index_vars = []
for | |
'''
From <NAME>
No particular license or rights, you can change it as you feel, just be honest. :)
For python puritain, sorry if this script is not "pythonic".
Significant changes made by <NAME>, August 23, 2020
'''
'''
This script picks up the magnitudes and the spectral type from Simbad website.
*How to use it:
***In variable "path", put the path of the repo where you have the XMLs.
***Run the script
*Structure:
***HTMLparser class to extract information from a webpage.
***Two main functions : magnitude : pick up magnitudes from Simbad
spectralType : pick up spectral type from Simbad, it is currently commented because I don't need to run it at the moment.
***A list generator function : create a file containing the name of the XML files in "path".
*Logs:
***Log_planet.txt has all files for which there was a 404 error. This file is not reset
when the script is rerun. It works for both functions.
*Troubleshooting:
***If Simbad don't recognize this name, either you search manually or you create a list with the
other names for a system (Kepler, 2MASS...) and you rename the file with this name to let the script
writing in it.
*Improvements:
***You can improve this script by a multi-name recognition :for a system, if there is a 404 error on simbad web page
the script can try another name picked up in the XMLs and try it.
This would avoid to make a manual reasearch or rename the files, recreate a list and rerun the script.
***There can be a problem with binaries system. Simbad always has only SP (spectral type) and mag for one star (don't know which)
or the whole system but if this information exists for each star of a binary system, this script doesn't deal with it.
***Adapt it for other kind of extraction or for other website.
'''
from html.parser import HTMLParser
from urllib.request import urlopen
from urllib.parse import quote_plus
import xml.etree.ElementTree as ET
import re
import os
import glob
import time
def indent(elem, level=0):
i = "\n" + level * "\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class MyHTMLParser(HTMLParser):#HTML parser to get the information from the webpage
def handle_starttag(self, tag, attrs): #get start tag and may store its attributes
global boolean, dictio_mags, data2, dictio_ident, inname
if tag =="a" and section=="identifiers":
inname = 1
if boolean == 1 and section == "mag":
dictio_mags.append(data2)
boolean = 0
if boolean == 1 and section == "identifiers":
if len(data2):
worthyCats = ["HD", "GJ", "Gaia DR2", "NAME", "HIP", "KOI", "Kepler", "KIC", "TYC"]
for wc in worthyCats:
if wc in data2 and not "**" in data2:
data2 = data2.replace("NAME","").strip()
dictio_ident.append(data2)
boolean = 0
inname = 0
data2 = ""
def handle_endtag(self, tag):
global inname
if tag=="tt":
inname = 0
pass
def handle_data(self, data):
global data2, boolean, section, inname, dictio_distance, dictio_coord, dictio_spectral
if section=="mag" and re.findall("[A-Z] +\d*\.?\d*? *\[+.+\]", data):#Search magnitude
data2 = data
data2 = data2.replace("\n", "").replace(" ","")
boolean = 1
if section=="identifiers" and inname==1:
data2 = data2+data
data2 = data2.replace("\n", "").replace("\"", "").strip()
boolean = 1
if re.findall("Identifiers \(\d+\) :", data):
section = "identifiers"
data2 = ""
if re.findall("Spectral type:", data):
section = "spectraltype"
if section=="spectraltype" and re.findall("[OBAFGKM]",data):
dictio_spectral = data.strip()
section = "spectral done"
if re.findall("Plots and Images", data):
section = "plotsandimages"
if re.findall("ICRS", data):
section = "ICRS"
if section=="ICRS" and re.findall("coord.",data):
section = "ICRScoord"
if section=="ICRScoord":
res = re.search(r"\s+(\d\d \d\d \d\d\.\d{4})\d+ ([\+\-]\d\d \d\d \d\d\.\d{4})\d+",data)
if res:
dictio_coord = [res.group(1), res.group(2)]
section = "coords done"
if re.findall("distance Q unit", data):
section = "distance"
res = re.search(r"\s+\|\s*(\d+\.\d+)\s+pc\s+\|\s+\-(\d+\.\d+)\s+\+(\d+\.\d+)\s+\|",data)
if res:
dictio_distance = [res.group(1), res.group(2), res.group(3)]
#Another script exists for that. Splitting the two functions lets me to control
#the list is in correct format and won't bring any troubles.
#However, as it is a copy/paste of the script, it should work.
def generateList(path):
with open("list.txt", "w") as planet_list:
for filename in glob.glob(path+"/*.xml"):
# Open file
name = os.path.split(filename)
name = name[1]
name = name.replace(".xml","")
planet_list.write(name+"\n")
#****************************MAIN*********************************
parser = MyHTMLParser()
path = "systems" # systems or systems_kepler
generateList(path)
system_list = open("list.txt","r") #list of the systems to process
lines = system_list.readlines()
lines = [line.replace('\n','') for line in lines]
try:
willskip = open("simbad_skip.txt","r").readlines() #list of the systems to process
willskip = [s.strip() for s in willskip]
except:
willskip = []
nummax = 10000
for line in lines:#read all the list of systems and run the parser class and the magnitude function for each one
filename = path+"/"+line+".xml"
f = open(filename, 'rt')
root = ET.parse(f).getroot()
stars = root.findall(".//star")
binaries = root.findall(".//binary")
systemname = root.findtext("./name")
if line in willskip:
continue
if len(binaries):
continue
#if root.findall(".//spectraltype"):
# continue
## One request per star
for stari, star in enumerate(stars):
starnames = star.findall("./name")
# do request
dictio_mags = []
dictio_ident = []
dictio_distance = []
dictio_coord = []
dictio_spectral = []
section = "mag"
boolean = 0
data2 = ""
starname = starnames[0].text
try:
print('Requesting: http://simbad.u-strasbg.fr/simbad/sim-basic?Ident='+quote_plus(starname))
code_source = urlopen('http://simbad.u-strasbg.fr/simbad/sim-basic?Ident='+quote_plus(starname)).read()
code_source = code_source.decode('utf-8')
except IOError:
print('Lookup failed for {} - skipping'.format(starname))
continue
if re.findall("Identifier not found in the database", code_source):
print('Identifier not found in the database. - skipping')
continue
if re.findall("Extra-solar Confirmed Planet", code_source):
print('Got planet, not star. - skipping')
continue
parser.feed(code_source)
dictio_mags.sort()
# Work on new star names
lastnameindex = -1
for ind, child in enumerate(star):
if child.text == starnames[-1].text:
lastnameindex = ind
starnames = [n.text for n in starnames]
for newstarname in dictio_ident:
if newstarname not in starnames:
nsn = ET.Element("name")
nsn.text = newstarname
star.insert(lastnameindex+1,nsn)
print("New star name added: ", newstarname)
for key in dictio_mags:#concatenate magnitudes in the string from XML
expr = key
if not "[~]" in expr:
sigma = re.findall('\[+.+\]', expr)
sigma = str(sigma[0].replace('[','').replace(']',''))
else:
sigma = ""
expr = re.sub('\[+.+\]', '', expr)#Remove uncertainty from string
expr2 = re.sub('[A-Z]', '', expr)#Remove letters from string, just mag left.
magletters = ["J", "H","K","V","B","R","I"]
#find location to insert (after current mags, after names)
maginsertindex = -1
for magletter in magletters:
mags = star.findall("./mag"+magletter)
for mag in mags:
for ind, child in enumerate(star):
if child.text == mag.text:
maginsertindex = max(maginsertindex,ind)
names = star.findall("./name")
for name in names:
for ind, child in enumerate(star):
if child.text == name.text:
maginsertindex = max(maginsertindex,ind)
for magletter in magletters:
if magletter in expr:
if not star.findtext("./mag"+magletter):
nmag = ET.Element("mag"+magletter)
nmag.text = expr2
if sigma:
nmag.attrib['errorminus'] = sigma
nmag.attrib['errorplus'] = sigma
star.insert(maginsertindex+1,nmag)
print("New mag",magletter,"added: ",expr2,sigma)
if len(dictio_spectral):
if not star.findtext("./spectraltype"):
spectraltype = ET.Element("spectraltype")
spectraltype.text = dictio_spectral
star.insert(maginsertindex+1,spectraltype)
print("New spectraltype added: ",dictio_spectral)
## Planet Names
planets = star.findall("./planet")
for planet in planets:
planetname = planet.findtext("./name")
planetsuffix = planetname.replace(starname,"")
if planetsuffix in [" b"," c"," d"," e"," f"," g"," h"," i"," j"]:
# will attempt to add other names
planetnames = planet.findall("./name")
lastnameindex = -1
for ind, child in enumerate(planet):
if child.text == planetnames[-1].text:
lastnameindex = ind
planetnames = [n.text for n in planetnames]
for starname in dictio_ident:
newplanetname = starname + planetsuffix
if newplanetname not in planetnames:
nne = ET.Element("name")
nne.text = newplanetname
planet.insert(lastnameindex+1,nne)
print("New planet name added: ", newplanetname)
## System parameters based on last star in system
systemnames = root.findall("./name")
lastnameindex = -1
for ind, child in enumerate(root):
if child.text == systemnames[-1].text:
lastnameindex = ind
if not root.findtext("./distance") and len(dictio_distance):
distance = ET.Element("distance")
distance.text = dictio_distance[0]
distance.attrib['errorminus'] = dictio_distance[1]
distance.attrib['errorplus'] = dictio_distance[2]
print("New distance added: ", dictio_distance)
root.insert(lastnameindex+1,distance)
if len(dictio_coord):
coord = root.findtext("./declination")
if coord:
if coord[:6] in dictio_coord[1] and len(coord)<len(dictio_coord[1]):
for ind, child in enumerate(root):
if child.tag == "declination":
lastnameindex = ind-1
print("Old declination removed: ", coord)
root.remove(child)
coord = None
break
if not coord:
declination = ET.Element("declination")
declination.text = dictio_coord[1]
print("New declination added: ", dictio_coord[1])
root.insert(lastnameindex+1,declination)
coord = root.findtext("./rightascension")
if coord:
if coord[:5] in dictio_coord[0] and len(coord)<len(dictio_coord[0]):
for ind, child in enumerate(root):
if child.tag == "rightascension":
lastnameindex = ind-1
print("Old rightascension removed: ", coord)
root.remove(child)
| |
boundary_to_smoothing_ratio,
extract_box = extract_box)
def extract_all_maps_around_unique(self,
resolution = None,
solvent_content = None,
sequence = None,
molecular_mass = None,
soft_mask = True,
chain_type = 'PROTEIN',
box_cushion = 5,
target_ncs_au_model = None,
regions_to_keep = None,
keep_low_density = True,
symmetry = None,
boundary_to_smoothing_ratio = 2.,
soft_mask_around_edges = None,
keep_this_region_only = None,
residues_per_region = None,
soft_mask_radius = None,
mask_expand_ratio = 1):
'''
Runs box_all_maps_around_mask_and_shift_origin with extract_box=True
'''
return self.box_all_maps_around_unique_and_shift_origin(
resolution = resolution,
solvent_content = solvent_content,
sequence = sequence,
molecular_mass = molecular_mass,
soft_mask = soft_mask,
chain_type = chain_type,
box_cushion = box_cushion,
target_ncs_au_model = target_ncs_au_model,
regions_to_keep = regions_to_keep,
keep_low_density = keep_low_density,
keep_this_region_only = keep_this_region_only,
residues_per_region = residues_per_region,
symmetry = symmetry,
mask_expand_ratio = mask_expand_ratio,
soft_mask_radius = soft_mask_radius,
soft_mask_around_edges = soft_mask_around_edges,
boundary_to_smoothing_ratio = boundary_to_smoothing_ratio,
extract_box = True)
def box_all_maps_around_unique_and_shift_origin(self,
resolution = None,
solvent_content = None,
sequence = None,
molecular_mass = None,
soft_mask = True,
chain_type = 'PROTEIN',
box_cushion = 5,
target_ncs_au_model = None,
regions_to_keep = None,
keep_low_density = True,
symmetry = None,
mask_expand_ratio = 1,
soft_mask_radius = None,
soft_mask_around_edges = None,
boundary_to_smoothing_ratio = 2.,
keep_this_region_only = None,
residues_per_region = None,
extract_box = False):
'''
Box all maps using bounds obtained with around_unique,
shift origin of maps, model, and mask around unique region
If extract_box=True: Creates new object with deep_copies.
Otherwise: replaces existing map_managers and shifts model in place
Replaces existing map_managers and shifts model in place
NOTE: This changes the gridding and shift_cart of the maps and model
and masks the map
Normally supply just sequence; resolution will be taken from
map_manager resolution if present. other options match
all possible ways that segment_and_split_map can estimate solvent_content
Must supply one of (sequence, solvent_content, molecular_mass)
Symmetry is optional symmetry (i.e., D7 or C1). Used as alternative to
ncs_object supplied in map_manager
if soft_mask_around_edges, makes a bigger box and makes a soft mask around
the edges. Use this option if you are going to calculate a FT of
the map or otherwise manipulate it in reciprocal space.
Additional parameters:
mask_expand_ratio: allows increasing masking radius beyond default at
final stage of masking
solvent_content: fraction of cell not occupied by macromolecule
sequence: one-letter code of sequence of unique part of molecule
chain_type: PROTEIN or RNA or DNA. Used with sequence to estimate
molecular_mass
molecular_mass: Molecular mass (Da) of entire molecule used to
estimate solvent_content
target_ncs_au_model: model marking center of location to choose as
unique
box_cushion: buffer around unique region to be boxed
soft_mask: use soft mask
keep_low_density: keep low density regions
regions_to_keep: Allows choosing just highest-density contiguous
region (regions_to_keep=1) or a few
residues_per_region: Try to segment with this many residues per region
keep_this_region_only: Keep just this region (first one is 0 not 1)
'''
from cctbx.maptbx.box import around_unique
map_info=self._get_map_info()
map_manager = self._map_dict[map_info.map_id]
assert isinstance(map_manager, MapManager)
if not resolution:
resolution = self.resolution()
assert resolution is not None
assert (sequence, solvent_content, molecular_mass).count(None) == 2
model_info=self._get_model_info()
model = self._model_dict[model_info.model_id]
if extract_box: # make sure everything is deep_copy
model = model.deep_copy()
if soft_mask_around_edges: # make the cushion bigger
box_cushion += boundary_to_smoothing_ratio * self.resolution()
# Make box with around_unique and apply it to model, first map
box = around_unique(
map_manager = map_manager,
model = model,
wrapping = self._force_wrapping,
target_ncs_au_model = target_ncs_au_model,
regions_to_keep = regions_to_keep,
residues_per_region = residues_per_region,
keep_this_region_only = keep_this_region_only,
solvent_content = solvent_content,
resolution = resolution,
sequence = sequence,
molecular_mass = molecular_mass,
symmetry = symmetry,
chain_type = chain_type,
box_cushion = box_cushion,
soft_mask = soft_mask,
mask_expand_ratio = mask_expand_ratio,
log = self.log)
info = box.info()
if info and hasattr(info, 'available_selected_regions'):
self.set_info(info) # save this information
# Now box is a copy of map_manager and model that is boxed
# Now apply boxing to other maps and models and then insert them into
# either this map_model_manager object, replacing what is there (extract_box=False)
# or create and return a new map_model_manager object (extract_box=True)
other = self._finish_boxing(box = box, model_info = model_info,
map_info = map_info,
soft_mask_radius = soft_mask_radius,
soft_mask_around_edges = soft_mask_around_edges,
boundary_to_smoothing_ratio = boundary_to_smoothing_ratio,
extract_box = extract_box)
if not extract_box:
other = self # modifying this object
# Now apply masking to all other maps (not done in _finish_boxing)
for id in map_info.other_map_id_list:
box.apply_around_unique_mask(
other._map_dict[id],
resolution = resolution,
soft_mask = soft_mask)
if extract_box:
return other
def _finish_boxing(self, box, model_info, map_info,
soft_mask_radius = None,
soft_mask_around_edges = None,
boundary_to_smoothing_ratio = None,
extract_box = False):
'''
Finish copying information to boxed map_model_manager
If extract_box is False, modify this object in place.
If extract_box is True , create a new object of same type and return it
'''
if box.warning_message():
self._warning_message = box.warning_message()
self._print("%s" %(box.warning_message()))
if extract_box:
other = self._empty_copy() # making a new object
else:
other = self # modifying this object
other._map_dict[map_info.map_id] = box.map_manager()
other._model_dict[model_info.model_id] = box.model()
# Apply the box to all the other maps
for id in map_info.other_map_id_list:
other._map_dict[id] = box.apply_to_map(self._map_dict[id])
# Apply the box to all the other models
for id in model_info.other_model_id_list:
other._model_dict[id] = box.apply_to_model(
self._model_dict[id].deep_copy())
# Copy default information over
name = '%s_boxed' %(self.name)
self._set_default_parameters(other, name = name)
if soft_mask_around_edges:
other.mask_all_maps_around_edges(
soft_mask_radius = soft_mask_radius,
boundary_to_smoothing_ratio = boundary_to_smoothing_ratio)
if extract_box:
return other
def merge_split_maps_and_models(self,
model_id = None,
box_info = None,
replace_coordinates = True,
replace_u_aniso = False,
allow_changes_in_hierarchy = False,
output_model_id = None):
'''
Replaces coordinates in working model with those from the
map_model_managers in box_info. The box_info object should
come from running split_up_map_and_model in this instance
of the map_model_manager.
If allow_changes_in_hierarchy is set, create a new working model where
the hierarchy has N "models", one from each box. This allows changing
the hierarchy structure. This will create one model in a new hierarchy
for each box, numbered by box number. The new hierarchy will
be placed in a model with id output_model_id (default is
model_id, replacing existing model specified by model_id; usually
this is just 'model', the default model.)
'''
if model_id is None:
model_id = 'model'
if allow_changes_in_hierarchy and output_model_id is None:
output_model_id = 'model'
if allow_changes_in_hierarchy:
print(
"\nModels from %s boxed models will be 'models' in new hierarchy" %(
len(box_info.selection_list)), file = self.log)
print("New model id will be: %s" %(output_model_id),
file = self.log)
if output_model_id in self.model_id_list():
print("NOTE: Replacing model %s with new composite model" %(
output_model_id), file = self.log)
# Set up a new empty model and hierarchy
import iotbx.pdb
pdb_inp = iotbx.pdb.input(source_info='text', lines=[""])
ph = pdb_inp.construct_hierarchy()
# Make a new model and save it as output_model_id
self.model_from_hierarchy(ph,
model_id = output_model_id)
# Get this hierarchy so we can add models to it:
working_model = self.get_model_by_id(
model_id = output_model_id)
else:
print(
"\nMerging coordinates from %s boxed models into working model" %(
len(box_info.selection_list)), file = self.log)
print("Working model id is : %s" %(model_id),
file = self.log)
i = 0
if not hasattr(box_info,'tlso_list'):
box_info.tlso_list = len(box_info.mmm_list) * [None]
for selection, mmm, tlso_value in zip (
box_info.selection_list, box_info.mmm_list, box_info.tlso_list):
i += 1
model_to_merge = self.get_model_from_other(mmm,
other_model_id=model_id)
if allow_changes_in_hierarchy: # Add a model to the hierarchy
ph_models_in_box = 0
for m in model_to_merge.get_hierarchy().models():
ph_models_in_box += 1
assert ph_models_in_box <= 1 # cannot have multiple models in box
mm = m.detached_copy()
mm.id = "%s" %(i) # model number is box number as string
working_model.get_hierarchy().append_model(mm)
else: # replace sites and/or u_aniso values in existing model
if replace_coordinates: # all sites
sites_cart = self.get_model_by_id(model_id).get_sites_cart()
# Sites to merge from this model
new_coords=model_to_merge.get_sites_cart()
original_coords=sites_cart.select(selection)
rmsd=new_coords.rms_difference(original_coords)
print("RMSD for %s coordinates in model %s: %.3f A" %(
original_coords.size(), i, rmsd), file = self.log)
sites_cart.set_selected(selection, new_coords)
self.get_model_by_id(model_id).set_crystal_symmetry_and_sites_cart(
sites_cart = sites_cart,
crystal_symmetry = self.get_model_by_id(
model_id).crystal_symmetry())
if replace_u_aniso and tlso_value: # calculate aniso U from
print("Replacing u_cart values based on TLS info",file = self.log)
xrs=self.get_model_by_id(model_id).get_xray_structure()
xrs.convert_to_anisotropic()
uc = xrs.unit_cell()
sites_cart = xrs.sites_cart()
u_cart=xrs.scatterers().extract_u_cart(uc)
new_anisos= uaniso_from_tls_one_group(tlso = tlso_value,
sites_cart = sites_cart.select(selection),
zeroize_trace=False)
u_cart.set_selected(selection, new_anisos)
xrs.set_u_cart(u_cart)
self.get_model_by_id(model_id).set_xray_structure(xrs)
if allow_changes_in_hierarchy:
working_model.reset_after_changing_hierarchy() # REQUIRED
def split_up_map_and_model_by_chain(self,
model_id = 'model',
skip_waters = False,
skip_hetero = False,
box_cushion = 3,
mask_around_unselected_atoms = None,
mask_radius = 3,
masked_value = -10,
write_files = False,
apply_box_info = True,
):
'''
Split up the map, boxing around each chain in the | |
# Copyright (c) 2019-2022 ThatRedKite and contributors
import math
from math import log10, sqrt
import matplotlib.pyplot as plt
from io import BytesIO
import discord
import discord.commands as scmd
from discord.ext import commands
import si_prefix
from random import randint
from thatkitebot.backend import util
from thatkitebot.backend import pcb_mod
class InputDifferenceError(Exception):
pass
class InputOutOfRangeError(Exception):
pass
class TooFewArgsError(Exception):
pass
class ImpossibleValueError(Exception):
pass
def parse_input(s):
s = s.replace("=", " ").split(" ")
s_dict = dict(zip(s[::2], s[1::2]))
for key in s_dict.keys():
old = s_dict[key]
new = old.replace("v", "").replace("V", "").replace("u", "µ").replace("Ω", "")
s_dict.update({key: new})
return s_dict
def slash_preprocessor(a: str):
"""
Preprocesses a string to be used in a command.
"""
return a.replace("v", "").replace("V", "").replace("u", "µ").replace("F", "").strip() if a else None
class conversion:
"""
Conversion commands for PCB components.
"""
def __init__(self, d: dict):
self.mm = si_prefix.si_parse(d.get("mm")) if d.get("mm") else None
self.mil = si_prefix.si_parse(d.get("mil")) if d.get("mil") else None
self.oz = si_prefix.si_parse(d.get("oz")) if d.get("oz") else None
self.mode = "length"
def calculate(self):
if self.mm is not None and self.mil is None and self.oz is None:
self.mil = round(pcb_mod.mm2mil(self.mm), 3)
self.mode = "mil"
elif self.mm is None and self.mil is not None and self.oz is None:
self.mm = round(pcb_mod.mil2mm(self.mil), 3)
self.mode = "mm"
elif self.mm is None and self.mil is None and self.oz is not None:
self.mil = round(pcb_mod.weight2mil(self.oz), 3)
self.mm = round(pcb_mod.mil2mm(self.mil) * 1000, 1)
self.mode = "oz"
else:
raise TooFewArgsError()
def gen_embed(self):
try:
self.calculate()
except TooFewArgsError:
self.mode = None
embed = discord.Embed(title="PCB Unit Conversions")
match self.mode:
case None:
embed.add_field(
name="How to use this?",
value=f"""With this command you can convert between millimeters and mils for PCB applications
And it can give you the copper height in both mils and micrometers
Example: `conv mm=20` to convert 20 mm to mils
Example: `conv mil=20` to convert 20 mils to mm
Example: `conv oz=2` to get the height of 2oz/ft² copper on a PCB
This accepts any SI prefixes, but does not support endings with "mm" or "mil"
""",
inline=True)
return embed
case "mm":
embed.add_field(name="Result:",
value=f"{self.mil}mil(s) = __{self.mm}mm__")
case "mil":
embed.add_field(name="Result:",
value=f"{self.mm}mm = __{self.mil}mil(s)__")
case "oz":
embed.add_field(name="Result:",
value=f"{self.oz}oz/ft² = __{self.mil}mil(s)__ or __{self.mm}μm__")
if embed:
return embed
class PCB_calc:
"""
PCB calculations commands.
"""
def __init__(self, d: dict, internal = False, limit = False):
self.current = si_prefix.si_parse(d.get("i")) if d.get("i") else None
self.width = si_prefix.si_parse(d.get("w")) if d.get("w") else None
self.thicc = si_prefix.si_parse(d.get("t")) if d.get("t") else None
self.temp = si_prefix.si_parse(d.get("temp")) if d.get("temp") else None
self.internal = internal
self.limit = limit
self.mode = None
def calculate(self):
if self.limit:
self.mode = "limit"
if self.temp is not None and (self.temp < 10 or self.temp > 100):
raise ImpossibleValueError("Get real")
if self.thicc is not None and (self.thicc < 0.5 or self.thicc > 3):
raise ImpossibleValueError("Get real")
if self.current is not None and self.width is None:
if self.current <= 0 or self.current > 35:
raise ImpossibleValueError("Get real")
self.width = round(pcb_mod.width(self.current, int(0 if self.temp is None else self.temp), int(0 if self.thicc is None else self.thicc), self.internal), 3)
self.mode = "succ"
elif self.current is None and self.width is not None:
if self.width < 0 or self.width > 400:
raise ImpossibleValueError("Get real")
self.current = round(pcb_mod.current(int(0 if self.temp is None else self.temp), self.width, int(0 if self.thicc is None else self.thicc), self.internal), 3)
self.mode = "succ"
elif not self.limit:
raise ImpossibleValueError("Get real")
if self.thicc is None:
if self.internal:
self.thicc = 0.5
else:
self.thicc = 1
if self.temp is None:
self.temp = 10
def draw(self):
if self.mode != "succ":
return f"""
```
Width = {self.width}mils
<---->
┌──┐
───┴──┴───
Copper weight = {self.thicc}oz/ft²
Max Current = {self.current}A
ΔTemperature = {self.temp}°C
Internal layer? {self.internal}
```
"""
else:
return """
```
Width = {self.width}mils
<---->
┌──┐
───┴──┴───
```
"""
def randomize(self):
self.current = randint(1,10)
self.temp = randint(1, 100)
self.thicc = 1
self.temp = 10
def gen_embed(self):
try:
self.calculate()
except TooFewArgsError:
self.randomize()
self.calculate()
self.mode = None
embed = discord.Embed(title="PCB Trace Calculator")
if self.mode != "limit":
embed.add_field(name="Drawing", value=self.draw(), inline=False)
match self.mode:
case None:
embed.add_field(
name="How to use this?",
value=f"""With this command you can calculate either how wide your PCB traces have to be,
or how much current they can handle. This is done with the IPC-2221 formulas.
Example: `pcbtrace i=2 temp=10`
this will calculate how wide an outside trace has to be to carry 2A without heating more than 10°C
Example: `pcbtrace w=10 temp=10`
this will calculate how much current a 10 mils trace can carry without heating more than 10°C
You can also specify the copper weight in oz/ft² with the `t=2` variable.
however if you do not specify the copper weight the bot will use JLCPCBs standard values
To calculate for internal traces, add `internal` to the end of the command.
To check the command limits, type `pcbtrace limit`
""",
inline=True)
embed.set_footer(text="Note: the above values are randomly generated")
case "limit":
embed.add_field(
name="IPC2221 Formula limits",
value=f"""This command is using the IPC2221 standard as the source for PCB related formulas.
Because of that, it has been hardcoded to only accept inputs within the range the formula was made for.
These limits are:```
Width: 0 to 400mils
Copper weight: 0.5 to 3oz/ft²
Max Current: 0 to 35A
ΔTemperature: 10 to 100°C```
Note, exactly 0A is not an acceptable input.
Because who really needs a trace that cant carry any current? <:schmuck:900445607888551947>
""",
inline=True)
case "succ":
embed.add_field(
name="Values",
value=f"Width = {self.width}mils\nCopper weight = {self.thicc}oz/ft²\nMax Current = {self.current}A\nΔTemperature = {self.temp}°C\nInternal layer? {self.internal}\n")
if embed:
return embed
class VoltageDivider:
def __init__(self, d: dict):
self.vin = si_prefix.si_parse(d.get("vin")) if d.get("vin") else None
self.vout = si_prefix.si_parse(d.get("vout")) if d.get("vout") else None
self.r1 = si_prefix.si_parse(d.get("r1")) if d.get("r1") else None
self.r2 = si_prefix.si_parse(d.get("r2")) if d.get("r2") else None
self.mode = None
def calculate(self):
if self.r1 and self.r2 and self.vin and not self.vout:
self.vout = self.vin * self.r2 / (self.r1 + self.r2)
self.mode = "succ"
elif self.r2 and self.vin and self.vout and not self.r1:
self.r1 = self.r2 * (self.vin - self.vout) / self.vout
self.mode = "succ"
elif self.r1 and self.vin and self.vout and not self.r2:
self.r2 = self.vout * self.r1 / (self.vin - self.vout)
self.mode = "succ"
else:
raise TooFewArgsError()
self.format()
def draw(self):
if self.mode is None:
return f"""
```
Vin
▲
│
┌┴┐
│ │ R1
└┬┘
├───○ Vout
┌┴┐
│ │ R2
└┬┘
│
─┴─
GND
Vin = {self.vin}V
Vout = {self.vout}V
R1 = {self.r1}Ω
R2 = {self.r2}Ω
```
"""
else:
return """
```
Vin
▲
│
┌┴┐
│ │ R1
└┬┘
├───○ Vout
┌┴┐
│ │ R2
└┬┘
│
─┴─
GND
```
"""
def format(self):
self.r1 = si_prefix.si_format(self.r1)
self.r2 = si_prefix.si_format(self.r2)
self.vout = si_prefix.si_format(self.vout)
self.vin = si_prefix.si_format(self.vin)
def randomize(self):
self.r1 = randint(1,1000000)
self.r2 = randint(1, 1000000)
self.vin = randint(1, 100)
def gen_embed(self):
try:
self.calculate()
except TooFewArgsError:
self.randomize()
self.calculate()
self.mode = None
embed = discord.Embed(title="Unloaded voltage divider calculation")
embed.add_field(name="Schematic", value=self.draw(), inline=False)
match self.mode:
case None:
embed.add_field(
name="How to use this?",
value=f"""With this command you can calculate different values of an unloaded voltage divider.
Example: `divider vin=10v r2=3k vout=3v`to find the value of R1.
The bot will try to figure out what you are looking for based on the value you didn't enter.
You can do the same for every value except Vin.
This accepts any SI-prefix (e.g. k, m, M, µ, etc.).
Writing the "V" after the voltages is optional but don't try writing out the `Ω` in Ohms
as it just confuses the bot (don't use R either).
""",
inline=True)
embed.set_footer(text="Note: the above voltage divider is randomly generated")
return embed
case "succ":
embed.add_field(
name="Values",
value=f"R1 = {self.r1}Ω\nR2 = {self.r2}Ω\nVin = {self.vin}V\nVout = {self.vout}V")
embed.add_field(
name=f"Closest E{(12 if self.e is None or 0 else self.e)} resistor values",
value=f"R1 = {si_prefix.si_format(pcb_mod.e_resistor(self.r1, (12 if self.e is None or 0 else self.e)))}Ω\nR2 = {si_prefix.si_format(pcb_mod.e_resistor(self.r2, int(12 if self.e is None or 0 else self.e)))}Ω")
if embed:
return embed
class LM317:
def __init__(self, d: dict):
self.r1 = si_prefix.si_parse(d.get("r1")) if d.get("r1") else None
self.r2 = si_prefix.si_parse(d.get("r2")) if d.get("r2") else None
self.vout = si_prefix.si_parse(d.get("vout")) if d.get("vout") else None
self.vin = si_prefix.si_parse(d.get("vin")) if d.get("vin") | |
(t_points - osp['origin']) / osp['spacing']
# t_border_points_inside = ~np.any([(t_points_pix < 0) ^ ((t_points_pix - osp['size']) > 0)], axis=(0, 2))
# print(iview, 'heyho')
# import pdb;
# pdb.set_trace()
# if all borders inside it could be that the border is close to the edge,
# meaning it has to be considered
if np.all(t_border_points_inside):
ws.append(np.ones(stack_properties['size'], dtype=np.float32))
# print('all borders inside')
continue
#
if not np.any(t_border_points_inside):
# print(print(t_border_points_inside))
ws.append(np.zeros(stack_properties['size'], dtype=np.float32))
# print('all borders outside')
continue
# print('onborder')
# determine boundary using the psf? alternatively, um
##########
# optimize this
# e.g. calculate block only once: calc block containing distances to boundary,
# then apply different sigmoid based on spacing
##########
# a = 200
# x, y, z = np.mgrid[:a, :a, :a]
# d = np.min([x / 4., y, z, a - x / 4., a - y, a - z], 0)
# sigN = 200
sigN = 200
sigspacing = (np.array(orig_stack_propertiess[iview]['size'])-2)/(sigN-1)*orig_stack_propertiess[iview]['spacing']
# sigspacing = (np.array(orig_stack_propertiess[iview]['size'])-1)/(sigN-1)*orig_stack_propertiess[iview]['spacing']
b_in_um = 40.
b_in_pixels = int(b_in_um / sigspacing[0])
# sig = sigmoid(border_dist_template, b_in_pixels)
# print('blending weights: border width: %s um, %s pixels' %(b_in_um,b_in_pixels))
# r = 0.05 # relative border width
# sig = np.ones(reducedview.shape,dtype=np.float32)
# sigN = 200
sig = np.ones([sigN]*3,dtype=np.float32)
for d in range(3):
# borderwidth = int(r * sig.shape[d])
# blend bad part of stack more:
borderwidth = b_in_pixels
if d == 0: borderwidth = b_in_pixels*4
# print(borderwidth)
slices = [slice(0, sig.shape[i]) for i in range(3)]
for bx in range(borderwidth):
slices[d] = slice(bx, bx + 1)
sig[tuple(slices)] = np.min([sig[tuple(slices)] * 0 + sigmoid(bx, borderwidth), sig[tuple(slices)]], 0)
# don't blend best part of the image (assuming that is true for high zs)
# if d == 0: borderwidth = int(0.02 * sig.shape[d])
# if d == 0: borderwidth = int(0.05 * sig.shape[d])
borderwidth = b_in_pixels
for bx in range(borderwidth):
slices[d] = slice(sig.shape[d] - bx - 1, sig.shape[d] - bx)
sig[tuple(slices)] = np.min([sig[tuple(slices)] * 0 + sigmoid(bx, borderwidth), sig[tuple(slices)]], 0)
# sig = ImageArray(sig,spacing=views[iview].spacing,origin=views[iview].origin)
# sigspacing = (np.array(views[iview].shape)-1)/(sigN-1)*views[iview].spacing
# sigspacing = (np.array(orig_stack_propertiess[iview]['size'])-3)/(sigN-1)*orig_stack_propertiess[iview]['spacing']
sig = ImageArray(sig,spacing=sigspacing,origin=orig_stack_propertiess[iview]['origin']+1*orig_stack_propertiess[iview]['spacing'])
tmpvs = transform_stack_sitk(sig,params[iview],
out_origin=stack_properties['origin'],
out_shape=stack_properties['size'],
out_spacing=stack_properties['spacing'],
interp='linear',
)
ws.append(tmpvs)
wsum = np.sum(ws,0)
wsum[wsum==0] = 1
for iw,w in enumerate(ws):
ws[iw] = ws[iw] / wsum
return ws
# # @io_decorator
# def get_weights_simple(
# orig_stack_propertiess,
# params,
# stack_properties,
# ):
# """
# sigmoid on borders
# """
#
# # w_stack_properties = stack_properties.copy()
# # minspacing = 3.
# # changed_stack_properties = False
# # if w_stack_properties['spacing'][0] < minspacing:
# # changed_stack_properties = True
# # print('using downsampled images for calculating simple weights..')
# # w_stack_properties['spacing'] = np.array([minspacing]*3)
# # w_stack_properties['size'] = (stack_properties['spacing'][0]/w_stack_properties['spacing'][0])*stack_properties['size']
#
# ws = []
#
# border_points = []
# rel_coords = np.linspace(0,1,5)
# for point in [[i,j,k] for i in rel_coords for j in rel_coords for k in rel_coords]:
# phys_point = stack_properties['origin'] + np.array(point)*stack_properties['size']*stack_properties['spacing']
# border_points.append(phys_point)
#
#
# for iview in range(len(params)):
#
# # start = time.time()
#
# # quick check if stack_properties inside orig volume
# osp = orig_stack_propertiess[iview]
#
# # transform border points into orig view space (pixel coords)
# t_border_points_inside = []
# for point in border_points:
# t_point = np.dot(params[iview][:9].reshape((3,3)),point) + params[iview][9:]
# t_point_pix = (t_point - osp['origin']) / osp['spacing']
# inside = True
# for icoord,coord in enumerate(t_point_pix):
# if coord < 0 or coord >= osp['size'][icoord]:
# inside = False
# break
# t_border_points_inside.append(inside)
#
# # if all borders inside it could be that the border is close to the edge,
# # meaning it has to be considered
#
# # if np.all(t_border_points_inside):
# # ws.append(np.ones(stack_properties['size'],dtype=np.float32))
# # # print('all borders inside')
# # continue
# #
#
# if not np.any(t_border_points_inside):
# # print(print(t_border_points_inside))
# ws.append(np.zeros(stack_properties['size'], dtype=np.float32))
# # print('all borders outside')
# continue
#
#
# def sigmoid(x,borderwidth):
# x0 = float(borderwidth)/2.
# a = 12./borderwidth
# return 1/(1+np.exp(-a*(x-x0)))
#
# # determine boundary using the psf? alternatively, um
#
# ##########
# # optimize this
# # e.g. calculate block only once: calc block containing distances to boundary,
# # then apply different sigmoid based on spacing
# ##########
#
# # a = 200
# # x, y, z = np.mgrid[:a, :a, :a]
# # d = np.min([x / 4., y, z, a - x / 4., a - y, a - z], 0)
#
# # sigN = 200
# sigN = 200
# sigspacing = (np.array(orig_stack_propertiess[iview]['size'])-2)/(sigN-1)*orig_stack_propertiess[iview]['spacing']
# # sigspacing = (np.array(orig_stack_propertiess[iview]['size'])-1)/(sigN-1)*orig_stack_propertiess[iview]['spacing']
#
# b_in_um = 40.
# b_in_pixels = int(b_in_um / sigspacing[0])
# # print('blending weights: border width: %s um, %s pixels' %(b_in_um,b_in_pixels))
#
# # r = 0.05 # relative border width
# # sig = np.ones(reducedview.shape,dtype=np.float32)
# # sigN = 200
# sig = np.ones([sigN]*3,dtype=np.float32)
#
# for d in range(3):
# # borderwidth = int(r * sig.shape[d])
# # blend bad part of stack more:
# borderwidth = b_in_pixels
# if d == 0: borderwidth = b_in_pixels*4
# # print(borderwidth)
# slices = [slice(0, sig.shape[i]) for i in range(3)]
# for bx in range(borderwidth):
# slices[d] = slice(bx, bx + 1)
# sig[tuple(slices)] = np.min([sig[tuple(slices)] * 0 + sigmoid(bx, borderwidth), sig[tuple(slices)]], 0)
#
# # don't blend best part of the image (assuming that is true for high zs)
# # if d == 0: borderwidth = int(0.02 * sig.shape[d])
# # if d == 0: borderwidth = int(0.05 * sig.shape[d])
# borderwidth = b_in_pixels
# for bx in range(borderwidth):
# slices[d] = slice(sig.shape[d] - bx - 1, sig.shape[d] - bx)
# sig[tuple(slices)] = np.min([sig[tuple(slices)] * 0 + sigmoid(bx, borderwidth), sig[tuple(slices)]], 0)
#
# # sig = ImageArray(sig,spacing=views[iview].spacing,origin=views[iview].origin)
#
# # sigspacing = (np.array(views[iview].shape)-1)/(sigN-1)*views[iview].spacing
# # sigspacing = (np.array(orig_stack_propertiess[iview]['size'])-3)/(sigN-1)*orig_stack_propertiess[iview]['spacing']
# sig2 = ImageArray(sig,spacing=sigspacing,origin=orig_stack_propertiess[iview]['origin']+1*orig_stack_propertiess[iview]['spacing'])
#
# tmpvs = transform_stack_sitk(sig2,params[iview],
# out_origin=stack_properties['origin'],
# out_shape=stack_properties['size'],
# out_spacing=stack_properties['spacing'],
# interp='linear',
# )
#
# # mask = get_mask_in_target_space(orig_stack_propertiess[iview],
# # stack_properties,
# # params[iview]
# # )
# # times.append(time.time()-start)
# # mask = mask > 0
# # print('WARNING; 1 ITERATIONS FOR MASK DILATION (DCT WEIGHTS')
# # mask = ndimage.binary_dilation(mask == 0,iterations=1)
# # ws.append(tmpvs*mask)
# # ws.append(mask)
# ws.append(tmpvs)
#
# # print('times',times)
#
# wsum = np.sum(ws,0)
# wsum[wsum==0] = 1
# for iw,w in enumerate(ws):
# # ws[iw] /= wsum
# ws[iw] = ws[iw] / wsum
#
# return ws
from itertools import product
import dask.array as da
from dask.base import tokenize
from scipy.ndimage import affine_transform as ndimage_affine_transform
import warnings
def dask_affine_transform(
image,
matrix,
offset=None,
output_shape=None,
order=1,
output_chunks=None,
depth=None,
**kwargs
):
"""
MODIFIED VERSION TO PRODUCE OVERLAP
Apply an affine transform using Dask. For every
output chunk, only the slice containing the relevant part
of the image is processed. Chunkwise processing is performed
either using `ndimage.affine_transform` or
`cupyx.scipy.ndimage.affine_transform`, depending on the input type.
Notes
-----
Differences to `ndimage.affine_transformation`:
- currently, prefiltering is not supported
(affecting the output in case of interpolation `order > 1`)
- default order is 1
- modes 'reflect', 'mirror' and 'wrap' are not supported
Arguments equal to `ndimage.affine_transformation`,
except for `output_chunks`.
Parameters
----------
image : array_like (Numpy Array, Cupy Array, Dask Array...)
The image array.
matrix : array (ndim,), (ndim, ndim), (ndim, ndim+1) or (ndim+1, ndim+1)
Transformation matrix.
offset : array (ndim,)
Transformation offset.
output_shape : array (ndim,), optional
The size of the array to be returned.
order : int, optional
The order of the spline interpolation. Note that for order>1
scipy's affine_transform applies prefiltering, which is not
yet supported and skipped in this implementation.
output_chunks : array (ndim,), optional
The chunks of the output Dask Array.
Returns
-------
affine_transform : Dask Array
A dask array representing the transformed output
"""
if not type(image) == da.core.Array:
image = da.from_array(image)
if output_shape is None:
output_shape = image.shape
if output_chunks is None:
output_chunks = image.shape
if depth is None:
depth = {dim: 0 for dim in range(image.ndim)}
# Perform test run to ensure parameter validity.
ndimage_affine_transform(np.zeros([0] * image.ndim),
matrix,
offset)
# Make sure parameters contained in matrix and offset
# are not overlapping, i.e. that the offset is valid as
# it needs to be modified for each chunk.
# Further parameter checks are performed directly by
# `ndimage.affine_transform`.
matrix = np.asarray(matrix)
offset = np.asarray(offset).squeeze()
# these lines were copied and adapted from `ndimage.affine_transform`
if (matrix.ndim == 2 and matrix.shape[1] == image.ndim + 1 and
(matrix.shape[0] in [image.ndim, image.ndim + 1])):
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:image.ndim, image.ndim]
matrix = matrix[:image.ndim, :image.ndim]
# process kwargs
# prefilter is not yet supported
if 'prefilter' in kwargs:
if kwargs['prefilter'] and order > 1:
warnings.warn('Currently, `dask_image.ndinterp.affine_transform` '
'doesn\'t support `prefilter=True`. Proceeding with'
' `prefilter=False`, which if order > 1 can lead '
'to the output containing more blur than with '
'prefiltering.', UserWarning)
del kwargs['prefilter']
if 'mode' in kwargs:
if kwargs['mode'] in | |
setWikiWordAsRoot(self, word):
if not self.requireReadAccess():
return
try:
if word is not None and \
self.getWikiDocument().isDefinedWikiLinkTerm(word):
self.tree.setRootByWord(word)
self.tree.expandRoot()
self.getConfig().set("main", "tree_last_root_wiki_word", word)
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
def closeWiki(self, saveState=True):
def errCloseAnywayMsg():
return wx.MessageBox(_(u"There is no (write-)access to underlying wiki\n"
"Close anyway and loose possible changes?"),
_(u'Close anyway'),
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION, self)
wikiConfigPath = self.getWikiConfigPath()
if wikiConfigPath:
wd = self.getWikiDocument()
# Do not require access here, otherwise the user will not be able to
# close a disconnected wiki
if not wd.getReadAccessFailed() and not wd.getWriteAccessFailed():
try:
self.fireMiscEventKeys(("closing current wiki",))
self.hooks.closingWiki(self, wikiConfigPath)
if self.getWikiData() and saveState:
self.saveCurrentWikiState()
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
if errCloseAnywayMsg() != wx.YES:
raise
else:
traceback.print_exc()
self.fireMiscEventKeys(("dropping current wiki",))
self.hooks.droppingWiki(self, wikiConfigPath)
if self.continuousExporter is not None:
self.continuousExporter.stopContinuousExport()
self.continuousExporter = None
try:
self.lastAccessedWiki(self.getWikiConfigPath())
if self.getWikiData():
wd.release()
except (IOError, OSError, DbAccessError), e:
# TODO: Option to show such errors
# traceback.print_exc()
pass
self.wikiData = None
if self.wikiDataManager is not None:
self.wikiDataManager.getUpdateExecutor().getMiscEvent()\
.removeListener(self)
self.currentWikiDocumentProxyEvent.setWatchedEvent(None)
self.wikiDataManager = None
else:
# We had already a problem, so ask what to do
if errCloseAnywayMsg() != wx.YES:
raise LossyWikiCloseDeniedException
self.fireMiscEventKeys(("dropping current wiki",))
self.hooks.droppingWiki(self, wikiConfigPath)
self.wikiData = None
if self.wikiDataManager is not None:
self.wikiDataManager.getUpdateExecutor().getMiscEvent()\
.removeListener(self)
self.currentWikiDocumentProxyEvent.setWatchedEvent(None)
self.wikiDataManager = None
self._refreshHotKeys()
self.statusBarTimer.Stop()
self.getConfig().setWikiConfig(None)
if self.clipboardInterceptor is not None:
self.clipboardInterceptor.catchOff()
self.fireMiscEventKeys(("closed current wiki",))
self.hooks.closedWiki(self, wikiConfigPath)
self.resetGui()
def saveCurrentWikiState(self):
try:
# write out the current config
self.writeCurrentConfig()
# save the current wiki page if it is dirty
if self.isWikiLoaded():
self.saveAllDocPages()
# database commits
if self.getWikiData():
self.getWikiData().commit()
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
def requireReadAccess(self):
"""
Check flag in WikiDocument if database is readable. If not, take
measures to re-establish it. If read access is probably possible,
return True
"""
wd = self.getWikiDocument()
if wd is None:
wx.MessageBox(_(u"This operation requires an open database"),
_(u'No open database'), wx.OK, self)
return False
if not wd.getReadAccessFailed():
return True
while True:
wd = self.getWikiDocument()
if wd is None:
return False
self.SetFocus()
answer = wx.MessageBox(_(u"No connection to database. "
u"Try to reconnect?"), _(u'Reconnect database?'),
wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION, self)
if answer != wx.YES:
return False
self.showStatusMessage(_(u"Trying to reconnect database..."), 0,
"reconnect")
try:
try:
wd.reconnect()
wd.setNoAutoSaveFlag(False)
wd.setReadAccessFailed(False)
self.requireWriteAccess() # Just to test it # TODO ?
return True # Success
except DbReadAccessError, e:
sys.stderr.write(_(u"Error while trying to reconnect:\n"))
traceback.print_exc()
self.SetFocus()
self.displayErrorMessage(_(u'Error while reconnecting '
'database'), e)
finally:
self.dropStatusMessageByKey("reconnect")
def requireWriteAccess(self):
"""
Check flag in WikiDocument if database is writable. If not, take
measures to re-establish it. If write access is probably possible,
return True
"""
if not self.requireReadAccess():
return False
if not self.getWikiDocument().getWriteAccessFailed():
return True
while True:
wd = self.getWikiDocument()
if wd is None:
return False
self.SetFocus()
answer = wx.MessageBox(
_(u"This operation needs write access to database\n"
u"Try to write?"), _(u'Try writing?'),
wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION, self)
if answer != wx.YES:
return False
self.showStatusMessage(_(u"Trying to write to database..."), 0,
"reconnect")
try:
try:
# write out the current configuration
self.writeCurrentConfig()
self.getWikiData().testWrite()
wd.setNoAutoSaveFlag(False)
wd.setWriteAccessFailed(False)
return True # Success
except (IOError, OSError, DbWriteAccessError), e:
sys.stderr.write(_(u"Error while trying to write:\n"))
traceback.print_exc()
self.SetFocus()
self.displayErrorMessage(_(u'Error while writing to '
'database'), e)
finally:
self.dropStatusMessageByKey("reconnect")
def lostAccess(self, exc):
if isinstance(exc, DbReadAccessError):
self.lostReadAccess(exc)
elif isinstance(exc, DbWriteAccessError):
self.lostWriteAccess(exc)
else:
self.lostReadAccess(exc)
def lostReadAccess(self, exc):
"""
Called if read access was lost during an operation
"""
if self.getWikiDocument().getReadAccessFailed():
# Was already handled -> ignore
return
self.SetFocus()
wx.MessageBox(_(u"Database connection error: %s.\n"
u"Try to re-establish, then run \"Wiki\"->\"Reconnect\"") % unicode(exc),
_(u'Connection lost'), wx.OK, self)
# wd.setWriteAccessFailed(True) ?
self.getWikiDocument().setReadAccessFailed(True)
def lostWriteAccess(self, exc):
"""
Called if write access was lost during an operation
"""
if self.getWikiDocument().getWriteAccessFailed():
# Was already handled -> ignore
return
self.SetFocus()
wx.MessageBox(_(u"No write access to database: %s.\n"
u" Try to re-establish, then run \"Wiki\"->\"Reconnect\"") % unicode(exc),
_(u'Connection lost'), wx.OK, self)
self.getWikiDocument().setWriteAccessFailed(True)
def tryAutoReconnect(self): # TODO ???
"""
Try reconnect after an error, if not already tried automatically
"""
wd = self.getWikiDocument()
if wd is None:
return False
if wd.getAutoReconnectTriedFlag():
# Automatic reconnect was tried already, so don't try again
return False
self.showStatusMessage(_(u"Trying to reconnect ..."), 0,
"reconnect")
try:
try:
wd.setNoAutoSaveFlag(True)
wd.reconnect()
wd.setNoAutoSaveFlag(False)
return True
except:
sys.stderr.write(_(u"Error while trying to reconnect:") + u"\n")
traceback.print_exc()
finally:
self.dropStatusMessageByKey("reconnect")
return False
def openFuncPage(self, funcTag, **evtprops):
dpp = self.getCurrentDocPagePresenter()
if dpp is None:
dpp = self.createNewDocPagePresenterTab()
dpp.openFuncPage(funcTag, **evtprops)
def openWikiPage(self, wikiWord, addToHistory=True,
forceTreeSyncFromRoot=False, forceReopen=False, **evtprops):
if not self.requireReadAccess():
return
try:
## _prof.start()
dpp = self.getCurrentDocPagePresenter()
if dpp is None:
dpp = self.createNewDocPagePresenterTab()
dpp.openWikiPage(wikiWord, addToHistory, forceTreeSyncFromRoot,
forceReopen, **evtprops)
self.getMainAreaPanel().showPresenter(dpp)
## _prof.stop()
except (WikiFileNotFoundException, IOError, OSError, DbAccessError), e:
self.lostAccess(e)
return None
def saveCurrentDocPage(self, force=False):
dpp = self.getCurrentDocPagePresenter()
if dpp is None:
return
dpp.saveCurrentDocPage(force)
def activatePageByUnifiedName(self, unifName, tabMode=0, firstcharpos=-1,
charlength=-1):
"""
tabMode -- 0:Same tab; 2: new tab in foreground; 3: new tab in background; 6: New Window
"""
# open the wiki page
if tabMode & 2:
if tabMode == 6:
# New Window
#??
#presenter = self.presenter.getMainControl().\
# createNewDocPagePresenterTabInNewFrame()
presenter = self.createNewDocPagePresenterTabInNewFrame()
else:
# New tab
presenter = self.createNewDocPagePresenterTab()
else:
# Same tab
presenter = self.getCurrentDocPagePresenter()
if presenter is None:
presenter = self.createNewDocPagePresenterTab()
try:
if firstcharpos != -1:
presenter.openDocPage(unifName, motionType="random",
firstcharpos=firstcharpos, charlength=charlength)
else:
presenter.openDocPage(unifName, motionType="random")
except WikiFileNotFoundException, e:
self.lostAccess(e)
return None
if not tabMode & 1:
# Show in foreground (if presenter is in other window, this does nothing)
self.getMainAreaPanel().showPresenter(presenter)
return presenter
def saveAllDocPages(self, force = False, async=False):
if not self.requireWriteAccess():
return
try:
self.fireMiscEventProps({"saving all pages": None, "force": force})
self.refreshPageStatus()
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
def saveDocPage(self, page, text=None):
"""
Save page unconditionally
"""
if page is None:
return False
if page.isReadOnlyEffect():
return True # return False?
if not self.requireWriteAccess():
return
self.showStatusMessage(_(u"Saving page"), 0, "saving")
try:
# Test if editor is active
if page.getTxtEditor() is None:
# No editor -> nothing to do
return False
# text = page.getLiveText()
word = page.getWikiWord()
if word is not None:
# trigger hooks
self.hooks.savingWikiWord(self, word)
while True:
try:
if word is not None:
# only for real wiki pages
# TODO Enable support for AGAs again
# page.save(self.getActiveEditor().cleanAutoGenAreas(text))
# page.update(self.getActiveEditor().updateAutoGenAreas(text)) # ?
page.writeToDatabase()
self.attributeChecker.initiateCheckPage(page)
# trigger hooks
self.hooks.savedWikiWord(self, word)
else:
page.writeToDatabase()
self.getWikiData().commit()
return True
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
finally:
self.dropStatusMessageByKey("saving")
def deleteWikiWord(self, wikiWord):
wikiDoc = self.getWikiDocument()
if wikiWord and self.requireWriteAccess():
try:
if wikiDoc.isDefinedWikiLinkTerm(wikiWord):
page = wikiDoc.getWikiPage(wikiWord)
page.deletePageToTrashcan()
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
def renameWikiWord(self, wikiWord, toWikiWord, modifyText, processSubpages):
"""
Renames current wiki word to toWikiWord.
Returns True if renaming was done successful.
modifyText -- Should the text of links to the renamed page be
modified? (This text replacement works unreliably)
processSubpages -- Should subpages be renamed as well?
"""
if wikiWord is None or not self.requireWriteAccess():
return False
wikiDoc = self.getWikiDocument()
try:
if processSubpages:
renameSeq = wikiDoc.buildRenameSeqWithSubpages(wikiWord,
toWikiWord)
else:
renameSeq = [(wikiWord, toWikiWord)]
self.saveAllDocPages()
# TODO Don't recycle variable names!
for wikiWord, toWikiWord in renameSeq:
if wikiWord == wikiDoc.getWikiName():
# Renaming of root word = renaming of wiki config file
wikiConfigFilename = wikiDoc.getWikiConfigPath()
self.removeFromWikiHistory(wikiConfigFilename)
# self.wikiHistory.remove(wikiConfigFilename)
wikiDoc.renameWikiWord(wikiWord, toWikiWord,
modifyText)
# Store some additional information
self.lastAccessedWiki(wikiDoc.getWikiConfigPath())
else:
wikiDoc.renameWikiWord(wikiWord, toWikiWord,
modifyText)
return True
except (IOError, OSError, DbAccessError), e:
self.lostAccess(e)
raise
except WikiDataException, e:
traceback.print_exc()
self.displayErrorMessage(unicode(e))
return False
def findCurrentWordInTree(self):
try:
self.tree.buildTreeForWord(self.getCurrentWikiWord(), selectNode=True)
except Exception:
traceback.print_exc()
def makeRelUrlAbsolute(self, relurl, addSafe=''):
"""
Return the absolute file: URL for a rel: URL
TODO: Remove
"""
import warnings
warnings.warn("PersonalWikiFrame.makeRelUrlAbsolute() deprecated, use "
"WikiDocument.makeRelUrlAbsolute()", DeprecationWarning,
stacklevel=2)
return self.getWikiDocument().makeRelUrlAbsolute(relurl, addSafe=addSafe)
def makeAbsPathRelUrl(self, absPath, addSafe=''):
"""
Return the rel: URL for an absolute file path or None if
a relative URL can't be created.
TODO: Remove
"""
import warnings
warnings.warn("PersonalWikiFrame.makeAbsPathRelUrl() deprecated, use "
"WikiDocument.makeAbsPathRelUrl()", DeprecationWarning)
return self.getWikiDocument().makeAbsPathRelUrl(absPath, addSafe=addSafe)
def launchUrl(self, link):
if link.startswith(u"wikirel://"):
# Relative wiki link
link = self.getWikiDocument().makeRelUrlAbsolute(link)
elif link.startswith(u"rel://"):
# Relative link
link = self.getWikiDocument().makeRelUrlAbsolute(link)
if not link.startswith(u"wiki:"):
try:
OsAbstract.startFile(self, link)
return True
except Exception, e:
traceback.print_exc()
self.displayErrorMessage(_(u"Couldn't start file"), e)
return False
else:
# Open wiki
filePath, wikiWordToOpen, anchorToOpen = StringOps.wikiUrlToPathWordAndAnchor(
link)
if not os.path.exists(filePath):
self.showStatusMessage(
uniToGui(_(u"Couldn't open wiki: %s") % link), -2)
return | |
"""CFNgin config."""
import copy
import logging
import sys
import warnings
from io import StringIO
from string import Template
import yaml
from schematics import Model
from schematics.exceptions import BaseError as SchematicsError
from schematics.exceptions import UndefinedValueError, ValidationError
from schematics.types import (
BaseType,
BooleanType,
DictType,
ListType,
ModelType,
StringType,
)
from six import text_type
from runway.util import DOC_SITE
from .. import exceptions
from ..lookups import register_lookup_handler
from ..util import SourceProcessor, merge_map, yaml_to_ordered_dict
from .translators import * # noqa pylint: disable=wildcard-import
LOGGER = logging.getLogger(__name__)
def render_parse_load(raw_config, environment=None, validate=True):
"""Encapsulate the render -> parse -> validate -> load process.
Args:
raw_config (str): The raw CFNgin configuration string.
environment (Optional[Dict[str, Any]]): Any environment values that
should be passed to the config.
validate (bool): If provided, the config is validated before being
loaded.
Returns:
:class:`Config`: The parsed CFNgin config.
"""
pre_rendered = render(raw_config, environment)
rendered = process_remote_sources(pre_rendered, environment)
config = parse(rendered)
# For backwards compatibility, if the config doesn't specify a namespace,
# we fall back to fetching it from the environment, if provided.
if config.namespace is None:
namespace = environment.get("namespace")
if namespace:
LOGGER.warning(
"specifying namespace in the environment is "
"deprecated; to learn how to specify it correctly "
"visit %s/page/cfngin/configuration.html#namespace",
DOC_SITE,
)
config.namespace = namespace
if validate:
config.validate()
return load(config)
def render(raw_config, environment=None):
"""Render a config, using it as a template with the environment.
Args:
raw_config (str): The raw CFNgin configuration string.
environment (Optional[Dict[str, Any]]): Any environment values that
should be passed to the config.
Returns:
str: The CFNgin configuration populated with any values passed from
the environment.
"""
template = Template(raw_config)
buff = StringIO()
if not environment:
environment = {}
try:
substituted = template.substitute(**environment)
except KeyError as err:
raise exceptions.MissingEnvironment(err.args[0])
except ValueError:
# Support "invalid" placeholders for lookup placeholders.
# needs to pass a Dict for correct error handling by the built-in
substituted = template.safe_substitute(**environment)
if not isinstance(substituted, text_type):
substituted = substituted.decode("utf-8")
buff.write(substituted)
buff.seek(0)
return buff.read()
def parse(raw_config):
"""Parse a raw yaml formatted CFNgin config.
Args:
raw_config (str): The raw CFNgin configuration string in yaml format.
Returns:
:class:`Config`: The parsed CFNgin config.
"""
# Convert any applicable dictionaries back into lists
# This is necessary due to the move from lists for these top level config
# values to either lists or OrderedDicts.
# Eventually we should probably just make them OrderedDicts only.
config_dict = yaml_to_ordered_dict(raw_config)
if config_dict:
for top_level_key in [
"stacks",
"pre_build",
"post_build",
"pre_destroy",
"post_destroy",
]:
top_level_value = config_dict.get(top_level_key)
if isinstance(top_level_value, dict):
tmp_list = []
for key, value in top_level_value.items():
tmp_dict = copy.deepcopy(value)
if top_level_key == "stacks":
tmp_dict["name"] = key
tmp_list.append(tmp_dict)
config_dict[top_level_key] = tmp_list
# Top-level excess keys are removed by Config._convert, so enabling strict
# mode is fine here.
try:
return Config(config_dict, strict=True)
except SchematicsError as err:
raise exceptions.InvalidConfig(err.errors)
def load(config):
"""Load a CFNgin configuration by modifying syspath, loading lookups, etc.
Args:
config (:class:`Config`): The CFNgin config to load.
Returns:
:class:`Config`: The CFNgin config provided above.
"""
if config.sys_path:
LOGGER.debug("appending to sys.path: %s", config.sys_path)
sys.path.append(config.sys_path)
LOGGER.debug("sys.path: %s", sys.path)
if config.lookups:
for key, handler in config.lookups.items():
register_lookup_handler(key, handler)
return config
def dump(config):
"""Dump a CFNgin Config object as yaml.
Args:
config (:class:`Config`): The CFNgin Config object.
Returns:
str: The yaml formatted CFNgin Config.
"""
return yaml.safe_dump(
config.to_primitive(),
default_flow_style=False,
encoding="utf-8",
allow_unicode=True,
)
def process_remote_sources(raw_config, environment=None):
"""Stage remote package sources and merge in remote configs.
Args:
raw_config (str): The raw CFNgin configuration string.
environment (Optional[Dict, Any]): Any environment values that should
be passed to the config.
Returns:
str: The raw CFNgin configuration string.
"""
config = yaml.safe_load(raw_config)
if config and config.get("package_sources"):
processor = SourceProcessor(
sources=config["package_sources"],
cfngin_cache_dir=config.get(
"cfngin_cache_dir", config.get("stacker_cache_dir")
),
)
processor.get_package_sources()
if processor.configs_to_merge:
for i in processor.configs_to_merge:
LOGGER.debug("merging in remote config: %s", i)
remote_config = yaml.safe_load(open(i))
config = merge_map(remote_config, config)
# Call the render again as the package_sources may have merged in
# additional environment lookups
if not environment:
environment = {}
return render(str(config), environment)
return raw_config
class AnyType(BaseType):
"""Any type."""
class LocalPackageSource(Model):
"""Local package source model.
Package source located on a local disk.
Attributes:
configs (ListType): List of CFNgin config paths to execute.
paths (ListType): List of paths to append to ``sys.path``.
source (StringType): Source.
"""
configs = ListType(StringType, serialize_when_none=False)
paths = ListType(StringType, serialize_when_none=False)
source = StringType(required=True)
class GitPackageSource(Model):
"""Git package source model.
Package source located in a git repo.
Attributes:
branch (StringType): Branch name.
commit (StringType): Commit hash.
configs (ListType): List of CFNgin config paths to execute.
paths (ListType): List of paths to append to ``sys.path``.
tag (StringType): Git tag.
uri (StringType): Remote git repo URI.
"""
branch = StringType(serialize_when_none=False)
commit = StringType(serialize_when_none=False)
configs = ListType(StringType, serialize_when_none=False)
paths = ListType(StringType, serialize_when_none=False)
tag = StringType(serialize_when_none=False)
uri = StringType(required=True)
class S3PackageSource(Model):
"""S3 package source model.
Package source located in AWS S3.
Attributes:
bucket (StringType): AWS S3 bucket name.
configs (ListType): List of CFNgin config paths to execute.
key (StringType): Object key. The object should be a zip file.
paths (ListType): List of paths to append to ``sys.path``.
requester_pays (BooleanType): AWS S3 requester pays option.
use_latest (BooleanType): Use the latest version of the object.
"""
bucket = StringType(required=True)
configs = ListType(StringType, serialize_when_none=False)
key = StringType(required=True)
paths = ListType(StringType, serialize_when_none=False)
requester_pays = BooleanType(serialize_when_none=False)
use_latest = BooleanType(serialize_when_none=False)
class PackageSources(Model):
"""Package sources model.
Attributes:
git (GitPackageSource): Package source located in a git repo.
local (LocalPackageSource): Package source located on a local disk.
s3 (S3PackageSource): Package source located in AWS S3.
"""
git = ListType(ModelType(GitPackageSource))
local = ListType(ModelType(LocalPackageSource))
s3 = ListType(ModelType(S3PackageSource))
class Hook(Model):
"""Hook module.
Attributes:
args (DictType)
data_key (StringType)
enabled (BooleanType)
path (StringType)
required (BooleanType)
"""
args = DictType(AnyType)
data_key = StringType(serialize_when_none=False)
enabled = BooleanType(default=True)
path = StringType(required=True)
required = BooleanType(default=True)
class Target(Model):
"""Target model.
Attributes:
name (StringType)
required_by (ListType)
requires (ListType)
"""
name = StringType(required=True)
required_by = ListType(StringType, serialize_when_none=False)
requires = ListType(StringType, serialize_when_none=False)
class Stack(Model):
"""Stack model.
Attributes:
class_path (StringType)
description (StringType)
enabled (BooleanType)
in_progress_behavior (StringType)
locked (BooleanType)
name (StringType)
parameters (DictType)
profile (StringType)
protected (BooleanType)
region (StringType)
required_by (ListType)
requires (ListType)
stack_name (StringType)
stack_policy_path (StringType)
tags (DictType)
template_path (StringType)
termination_protection (BooleanType)
variables (DictType)
"""
class_path = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
enabled = BooleanType(default=True)
in_progress_behavior = StringType(serialize_when_none=False)
locked = BooleanType(default=False)
name = StringType(required=True)
parameters = DictType(AnyType, serialize_when_none=False)
profile = StringType(serialize_when_none=False)
protected = BooleanType(default=False)
region = StringType(serialize_when_none=False)
required_by = ListType(StringType, serialize_when_none=False)
requires = ListType(StringType, serialize_when_none=False)
stack_name = StringType(serialize_when_none=False)
stack_policy_path = StringType(serialize_when_none=False)
tags = DictType(StringType, serialize_when_none=False)
template_path = StringType(serialize_when_none=False)
termination_protection = BooleanType(default=False)
variables = DictType(AnyType, serialize_when_none=False)
def validate_class_path(self, data, value):
"""Validate class pass."""
if value and data["template_path"]:
raise ValidationError(
"template_path cannot be present when class_path is provided."
)
self.validate_stack_source(data)
def validate_template_path(self, data, value):
"""Validate template path."""
if value and data["class_path"]:
raise ValidationError(
"class_path cannot be present when template_path is provided."
)
self.validate_stack_source(data)
@staticmethod
def validate_stack_source(data):
"""Validate stack source."""
# Locked stacks don't actually need a template, since they're
# read-only.
if data["locked"]:
return
if not (data["class_path"] or data["template_path"]):
raise ValidationError("class_path or template_path is required.")
def validate_parameters(self, data, value): # pylint: disable=no-self-use
"""Validate parameters."""
if value:
stack_name = data["name"]
raise ValidationError(
"DEPRECATION: Stack definition %s contains "
"deprecated 'parameters', rather than 'variables'. You are"
" required to update your config. See "
"https://docs.onica.com/projects/runway/en/release/cfngin/"
"config.html#variables for additional information." % stack_name
)
return value
class Config(Model):
"""Python representation of a CFNgin config file.
This is used internally by CFNgin to parse and validate a yaml formatted
CFNgin configuration file, but can also be used in scripts to generate a
CFNgin config file before handing it off to CFNgin to build/destroy.
Example::
from runway.cfngin.config import dump, Config, Stack
vpc = Stack({
"name": "vpc",
"class_path": "blueprints.VPC"})
config = Config()
config.namespace = "prod"
config.stacks = [vpc]
print dump(config)
Attributes:
cfngin_bucket (StringType): Bucket to use for CFNgin resources (e.g.
CloudFormation templates). May be an empty string.
cfngin_bucket_region (StringType): Explicit region to use for
``cfngin_bucket``.
cfngin_cache_dir (StringType): Local directory to use for caching.
log_formats (DictType): Custom formatting for log messages.
lookups (DictType): Register custom lookups.
mappings (DictType): Mappings that will be added to all stacks.
namespace (StringType): Namespace to prepend to everything.
namespace_delimiter (StringType): Character used to separate
``namespace`` and anything it prepends.
package_sources (ModelType): Remote source locations.
persistent_graph_key (str): S3 object | |
<filename>ooipy/tools/ooiplotlib.py
"""
This modules provides functions for plotting spectrograms and power
spectral density estimates. It extends the matplotlib.pyplot.plot
function.
"""
# Import all dependancies
import datetime
import matplotlib
import matplotlib.dates as mdates
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
from obspy.core import UTCDateTime
from ooipy.ctd.basic import CtdProfile
from ooipy.hydrophone.basic import HydrophoneData, Psd, Spectrogram
def plot(*args, scalex=True, scaley=True, data=None, **kwargs):
"""
An extension to the matplotlib.pyplot.plot function that allows for
the nice plotting of :class:`ooipy.hydrophone.basic.Spectrogram`
and :class:`ooipy.hydrophone.basic.Psd` objects. For a description
of the input parameters, please refer to the matplotlib
documentation.
>>> from ooipy.request import hydrophone_request
>>> from ooipy.tools import ooiplotlib as ooiplt
>>> # pull OOI data and compute spectrogram and PSD
>>> start_time = datetime.datetime(2017,3,10,0,0,0)
>>> end_time = datetime.datetime(2017,3,10,0,5,0)
>>> node = 'PC01A'
>>> hydrophone_data = hydrophone_request.get_acoustic_data(
start_time, end_time, node)
>>> hydrophone_data.compute_spectrogram()
>>> hydrophone_data.compute_psd_welch()
>>> # plot spectrogram and change some default plot settings
>>> ooiplt.plot(hydrophone_data.spectrogram)
>>> plt.title('my spectrogram')
>>> plt.ylim([0, 20000])
>>> plt.show()
>>> # plot PSD and chnage some default plot settings
>>> ooiplt.plot(hydrophone_data.psd)
>>> plt.title('my PSD')
>>> plt.show()
Parameters
----------
args :
object (either :class:`ooipy.hydrophone.basic.Spectrogram` or
:class:`ooipy.hydrophone.basic.Psd`) or array to be plotted
scalex :
see matplotlib documentation
scaley :
see matplotlib doccumentation
data :
see matplotlib doccumentation
kwargs :
see matplotlib doccumentation,
:func:`ooipy.tools.ooiplotlib.plot_spectrogram`, and
:func:`ooipy.tools.ooiplotlib.plot_psd` for possible arguments
"""
for arg in args:
if isinstance(arg, Spectrogram):
plot_spectrogram(arg, **kwargs)
elif isinstance(arg, Psd):
plot_psd(arg, **kwargs)
elif isinstance(arg, HydrophoneData):
plot_timeseries(arg, **kwargs)
elif isinstance(arg, CtdProfile):
plot_ctd_profile(arg, **kwargs)
else:
plt.gca().plot(
arg,
scalex=scalex,
scaley=scaley,
**({"data": data} if data is not None else {}),
**kwargs
)
def plot_spectrogram(spec_obj, **kwargs):
"""
Plot a :class:`ooipy.hydrophone.basic.Spectrogram` object using the
matplotlib package.
Parameters
----------
spec_obj : :class:`ooipy.hydrophone.basic.Spectrogram`
spectrogram object to be plotted
kwargs :
See matplotlib doccumentation for list of arguments. Additional
arguments are
* plot : bool
If False, figure will be closed. Can save time if only
saving but not plotting is desired. Default is True
* save : bool
If True, figure will be saved under **filename**. Default is
False
* filename : str
filename of figure if saved. Default is "spectrogram.png"
* xlabel_rot : int or float
rotation angle (deg) of x-labels. Default is 70
* xlabel_format : str
format of the xlabel if the time array contains datetime
objects
* fmin : int or float
minimum frequency. Default is 0
* fmax : int or float
maximum frequency. Default is 32000
* vmin : int or float
lower limit of level axis (colormap). Default is 20
* vmax : int or float
upper limit of level axis (colormap). Default is 80
* vdelta : int or float
resolution of level axis (colormap). Default is 1
* vdelta_cbar : int or float
label distance of colorbar. Default is 5
* figsize : (int, int)
width and height of figure, Default is (16, 9)
* res_reduction_time : int
reduction factor of time domain resolution. This can
facilitate faster plotting of large spectroagm objects.
Default is 1 (no reduction)
* res_reduction_freq : int
reduction factor of frequency domain resolution. This can
facilitate faster plotting of large spectroagm objects.
Default is 1 (no reduction)
* dpi : int
dots per inch, passed to matplotlib figure.savefig()
* fontsize : int
fontsize of saved plot, passed to matplotlib figure
* extend_type : str
{'neither', 'both', 'min', 'max'} If not 'neither', make pointed
end(s) for out-of- range values. These are set for a given colormap
using the colormap set_under and set_over methods.
* logy : bool
If True, the y (frequency) axis is plotted using log scale
"""
# check for keys
if "plot" not in kwargs:
kwargs["plot"] = True
if "save" not in kwargs:
kwargs["save"] = False
if "filename" not in kwargs:
kwargs["filename"] = "spectrogram.png"
if "title" not in kwargs:
kwargs["title"] = "Spectrogram"
if "xlabel" not in kwargs:
kwargs["xlabel"] = "time"
if "xlabel_rot" not in kwargs:
kwargs["xlabel_rot"] = 70
if "xlabel_format" not in kwargs:
kwargs["xlabel_format"] = "%y-%m-%d %H:%M"
if "ylabel" not in kwargs:
kwargs["ylabel"] = "frequency"
if "fmin" not in kwargs:
kwargs["fmin"] = 0.0
if "fmax" not in kwargs:
kwargs["fmax"] = 32000.0
if "vmin" not in kwargs:
kwargs["vmin"] = 20.0
if "vmax" not in kwargs:
kwargs["vmax"] = 80.0
if "vdelta" not in kwargs:
kwargs["vdelta"] = 1.0
if "vdelta_cbar" not in kwargs:
kwargs["vdelta_cbar"] = 5.0
if "figsize" not in kwargs:
kwargs["figsize"] = (16, 9)
if "res_reduction_time" not in kwargs:
kwargs["res_reduction_time"] = 1
if "res_reduction_freq" not in kwargs:
kwargs["res_reduction_freq"] = 1
if "dpi" not in kwargs:
kwargs["dpi"] = 100
if "fontsize" not in kwargs:
kwargs["fontsize"] = 22
if "extend_type" not in kwargs:
kwargs["extend_type"] = "neither"
if "logy" not in kwargs:
kwargs["logy"] = False
# set backend for plotting/saving:
if not kwargs["plot"]:
matplotlib.use("Agg")
font = {"size": kwargs["fontsize"]}
matplotlib.rc("font", **font)
# reduce resolution in time and frequency
v = spec_obj.values[:: kwargs["res_reduction_time"], :: kwargs["res_reduction_freq"]]
if len(spec_obj.time) != len(spec_obj.values):
t = np.linspace(
0,
len(spec_obj.values) - 1,
int(len(spec_obj.values) / kwargs["res_reduction_time"]),
)
else:
t = spec_obj.time[:: kwargs["res_reduction_time"]]
if len(spec_obj.freq) != len(spec_obj.values[0]):
f = np.linspace(
0,
len(spec_obj.values[0]) - 1,
int(len(spec_obj.values[0]) / kwargs["res_reduction_freq"]),
)
else:
f = spec_obj.freq[:: kwargs["res_reduction_freq"]]
# plot spectrogram object
cbarticks = np.arange(kwargs["vmin"], kwargs["vmax"] + kwargs["vdelta"], kwargs["vdelta"])
fig, ax = plt.subplots(figsize=kwargs["figsize"])
ax.contourf(
t,
f,
np.transpose(v),
cbarticks,
norm=Normalize(vmin=kwargs["vmin"], vmax=kwargs["vmax"]),
cmap=plt.cm.jet,
extend=kwargs["extend_type"],
**kwargs
)
plt.ylabel(kwargs["ylabel"])
plt.xlabel(kwargs["xlabel"])
# Adjust y limits and yscale
if kwargs["logy"]:
plt.yscale("log")
if kwargs["fmin"] == 0:
plt.ylim([1, kwargs["fmax"]])
else:
plt.ylim([kwargs["fmin"], kwargs["fmax"]])
else:
plt.ylim([kwargs["fmin"], kwargs["fmax"]])
plt.xticks(rotation=kwargs["xlabel_rot"])
plt.title(kwargs["title"])
# Build Colorbar
cmap = matplotlib.cm.jet
norm = matplotlib.colors.BoundaryNorm(cbarticks, cmap.N, extend=kwargs["extend_type"])
plt.colorbar(
matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap),
ax=ax,
ticks=np.arange(kwargs["vmin"], kwargs["vmax"] + kwargs["vdelta"], kwargs["vdelta_cbar"]),
label=r"spectral level (dB rel $1 \mathrm{\frac{μ Pa^2}{Hz}}$)",
pad=0.03,
)
plt.tick_params(axis="y")
# Make tight layout
plt.tight_layout()
if isinstance(t[0], datetime.datetime) or isinstance(t[0], UTCDateTime):
ax.xaxis.set_major_formatter(mdates.DateFormatter(kwargs["xlabel_format"]))
if kwargs["save"]:
plt.savefig(kwargs["filename"], bbox_inches="tight", dpi=kwargs["dpi"])
if not kwargs["plot"]:
plt.close(fig)
def plot_psd(psd_obj, **kwargs):
"""
Plot a :class:`ooipy.hydrophone.basic.Psd` object using the
matplotlib package.
Parameters
----------
spec_obj : :class:`ooipy.hydrophone.basic.Psd`
Psd object to be plotted
kwargs :
See matplotlib doccumentation for list of arguments. Additional
arguments are
* plot : bool
If False, figure will be closed. Can save time if only
saving but not plotting is desired. Default is True
* save : bool
If True, figure will be saved under **filename**. Default is
False
* new_fig : bool
If True, matplotlib will create a new fugure. Default is
True
* filename : str
filename of figure if saved. Default is "spectrogram.png"
* xlabel_rot : int or float
rotation angle (deg) of x-labels. Default is 70
* fmin : int or float
minimum frequency. Default is 0
* fmax : int or float
maximum frequency. Default is 32000
* vmin : int or float
lower limit of level axis (colormap). Default is 20
* vmax : int or float
upper limit of level axis (colormap). Default is 80
* figsize : (int, int)
width and height of figure. Default is (16, 9)
* dpi : int
dots per inch, passed to matplotlib figure.savefig()
* fontsize : int
fontsize of saved plot, passed to matplotlib figure
"""
# check for keys
if "plot" not in kwargs:
kwargs["plot"] = True
if "save" not in kwargs:
kwargs["save"] = False
if "new_fig" not in kwargs:
kwargs["new_fig"] = True
if "filename" not in kwargs:
kwargs["filename"] = "psd.png"
if "title" not in kwargs:
kwargs["title"] = "PSD"
if "xlabel" not in kwargs:
kwargs["xlabel"] = "frequency"
if "xlabel_rot" not in kwargs:
kwargs["xlabel_rot"] = 0
if "ylabel" not in kwargs:
kwargs["ylabel"] = "spectral level"
if "fmin" not in kwargs:
kwargs["fmin"] = 0.0
if "fmax" not in kwargs:
kwargs["fmax"] = 32000.0
if "vmin" not in kwargs:
kwargs["vmin"] = 20.0
if "vmax" not in kwargs:
kwargs["vmax"] = 80.0
if "figsize" not in kwargs:
kwargs["figsize"] = (16, 9)
if "dpi" not in kwargs:
kwargs["dpi"] = 100
if "fontsize" not in kwargs:
kwargs["fontsize"] = 22
# set backend for plotting/saving:
if not kwargs["plot"]:
matplotlib.use("Agg")
font = {"size": kwargs["fontsize"]}
matplotlib.rc("font", **font)
if len(psd_obj.freq) != len(psd_obj.values):
f = np.linspace(0, len(psd_obj.values) - 1, len(psd_obj.values))
else:
f = psd_obj.freq
# plot PSD object
if kwargs["new_fig"]:
fig, ax | |
from solution import Solution
class Board:
def __init__(self, rows_):
self.rows_ = rows_
self.draws = []
self.all = self.rows + self.columns
self.won = False
@property
def rows(self):
return self.rows_
@property
def columns(self):
return [list(col) for col in zip(*self.rows_)]
def draw(self, draw):
self.draws.append(draw)
win = self.is_win()
if win:
score = self.score(draw)
print(f"#####\t{self} is a win, {draw}, {score}")
return score
@property
def sum_all_unmarked_numbers(self):
return sum(
[
number
for sublist in self.rows
for number in sublist
if number not in self.draws
]
)
def score(self, draw):
return self.sum_all_unmarked_numbers * draw
def is_win(self):
for row in self.all:
win = all([elem in self.draws for elem in row])
if win:
self.won = True
return win
def __repr__(self):
return f"""
{self.draws}
{self.rows[0]}
{self.rows[1]}
{self.rows[2]}
{self.rows[3]}
{self.rows[4]}\n\n
"""
class Sol(Solution):
"""--- Day 4: Giant Squid ---
You're already almost 1.5km (almost a mile) below the surface of the ocean,
already so deep that you can't see any sunlight. What you can see, however,
is a giant squid that has attached itself to the outside of your submarine.
Maybe it wants to play bingo?
Bingo is played on a set of boards each consisting of a 5x5 grid of numbers.
Numbers are chosen at random, and the chosen number is marked on all boards
on which it appears. (Numbers may not appear on all boards.) If all numbers
in any row or any column of a board are marked, that board wins.
(Diagonals don't count.)
The submarine has a bingo subsystem to help passengers (currently, you and
the giant squid) pass the time. It automatically generates a random order
in which to draw numbers and a random set of boards (your puzzle input).
For example:
7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7
After the first five numbers are drawn (7, 4, 9, 5, and 11), there are no
winners, but the boards are marked as follows (shown here adjacent to
each other to save space):
22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
After the next six numbers are drawn (17, 23, 2, 0, 14, and 21),
there are still no winners:
22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
Finally, 24 is drawn:
22 13 17 11 0 3 15 0 2 22 14 21 17 24 4
8 2 23 4 24 9 18 13 17 5 10 16 15 9 19
21 9 14 16 7 19 8 7 25 23 18 8 23 26 20
6 10 3 18 5 20 11 10 24 4 22 11 13 6 5
1 12 20 15 19 14 21 16 12 6 2 0 12 3 7
At this point, the third board wins because it has at least one
complete row or column of marked numbers (in this case, the entire
top row is marked: 14 21 17 24 4).
The score of the winning board can now be calculated. Start by
finding the sum of all unmarked numbers on that board; in this
case, the sum is 188. Then, multiply that sum by the number that
was just called when the board won, 24, to get the final score,
188 * 24 = 4512.
To guarantee victory against the giant squid, figure out which
board will win first. What will your final score be if you
choose that board?
--- Part Two ---
On the other hand, it might be wise to try a different strategy:
let the giant squid win.
You aren't sure how many bingo boards a giant squid could play at once,
so rather than waste time counting its arms, the safe thing to do is to
figure out which board will win last and choose that one. That way,
no matter which boards it picks, it will win for sure.
In the above example, the second board is the last to win, which happens
after 13 is eventually called and its middle column is completely marked.
If you were to keep playing until this point, the second board would have a
sum of unmarked numbers equal to 148 for a final score of 148 * 13 = 1924.
Figure out which board will win last. Once it wins, what would its
final score be?
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.order = None
self.boards = None
self.clean()
self.winners = {}
def clean(self):
order, *boards = [x for x in self.input.split("\n\n") if x]
self.order = [int(x) for x in order.split(",") if x]
dirty_boards = [x.split("\n") for x in boards if x]
clean_boards = []
for dirty_board in dirty_boards:
clean_board = []
for dirty_row in dirty_board:
clean_row = []
row = [x for x in dirty_row.split(" ") if x]
for item in row:
if item:
clean_row.append(int(item))
if clean_row:
clean_board.append(clean_row)
clean_boards.append(clean_board)
self.boards = [Board(board) for board in clean_boards]
def make_draw(self):
for draw in self.order:
for board in self.boards:
score = board.draw(draw)
if score is not None:
return score
def p1(self):
pass # return self.make_draw()
def p2(self):
return self.last_to_win()
def last_to_win(self):
boards = self.boards.copy()
draws = self.order.copy()
while draws:
draw = draws.pop(0)
print("+++++", draw, "+++++")
for board in self.boards:
if board.won:
continue
else:
score = board.draw(draw)
print("!!!!!!!boards:", boards)
if score is not None:
lats_to_win = board, score, draw
boards.remove(board)
if not boards:
return lats_to_win
@property
def solution(self):
return f"p1: {self.p1()}\np2: {self.p2()}\n"
test_ = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7"""
input_ = """13,47,64,52,60,69,80,85,57,1,2,6,30,81,86,40,27,26,97,77,70,92,43,94,8,78,3,88,93,17,55,49,32,59,51,28,33,41,83,67,11,91,53,36,96,7,34,79,98,72,39,56,31,75,82,62,99,66,29,58,9,50,54,12,45,68,4,46,38,21,24,18,44,48,16,61,19,0,90,35,65,37,73,20,22,89,42,23,15,87,74,10,71,25,14,76,84,5,63,95
88 67 20 19 15
22 76 86 44 73
7 42 6 69 25
12 68 92 21 75
97 45 13 52 70
75 98 24 18 77
17 93 46 49 13
92 56 97 57 66
44 0 65 54 74
23 6 53 42 20
92 94 9 27 41
73 28 62 90 40
78 3 12 37 32
8 86 91 16 30
84 38 68 11 19
51 5 12 76 97
72 31 15 61 71
38 32 55 87 10
91 4 85 84 53
59 79 28 69 23
35 48 10 81 60
25 86 24 43 15
44 55 12 54 62
94 89 95 2 23
64 63 45 50 66
80 87 49 88 39
33 81 95 68 55
83 46 36 41 54
90 74 3 52 7
71 40 35 8 77
34 21 24 8 97
99 23 94 70 9
14 98 2 11 10
16 38 92 13 35
82 25 76 42 39
52 | |
set_resolution(self, resolution):
"""
"""
#TODO verify resolution
self._resolution = resolution
resolution = property(get_resolution, set_resolution)
@classmethod
def from_MIDI_track(cls, meta_track):
"""
Extracts the meta information from a midi track
"""
info = cls()
for e in meta_track:
if isinstance(e, midi.TimeSignatureEvent):
info._time_signature = e
elif isinstance(e, midi.KeySignatureEvent):
info._key_signature = e
elif isinstance(e, midi.SetTempoEvent):
info._tempo = e
elif isinstance(e, midi.SmpteOffsetEvent):
info._smpte_offset = e
elif isinstance(e, midi.InstrumentNameEvent):
info._instrument = e
else:
info._extra.append(e)
return info
def to_dict(self):
"""
"""
ret = {
"tempo": (self._tempo.tick, self._tempo.data),
"time_signature": (self._time_signature.tick, self._time_signature.data),
"key_signature": (self._key_signature.tick, self._key_signature.data),
"smpte_offset": (self._smpte_offset.tick,self._smpte_offset.data),
"instrument": (self._instrument.tick, self._instrument.text, self._instrument.data),
#"extra" : [str(e) for e in self._extra] #extra is not completely supported, it is printed for possible future support, nothing else
}
return ret
@classmethod
def from_dict(cls, meta_dict):
"""
Create an instance from a dict containing the information
"""
ret = cls()
tmp = meta_dict['tempo']
ret._tempo.tick = tmp[0]; ret._tempo.data = tmp[1];
ts = meta_dict['time_signature']
ret._time_signature.tick = ts[0]; ret._time_signature.data = ts[1];
ks = meta_dict['key_signature']
ret._key_signature.tick = ks[0]; ret._key_signature.data = ks[1];
smpte = meta_dict['smpte_offset']
ret._smpte_offset.tick = smpte[0]; ret._smpte_offset.data = smpte[1];
instr = meta_dict['instrument']
ret._instrument.tick = instr[0]; ret._instrument.text = instr[1]; ret._instrument.data = instr[2];
#TODO implement this ... maybe some nice thing that reads text and recreates the events... but this is dangerous for distribution as people can "hack" into the game with modifications to a JSON file
#ret._extra = meta_dict['extra']
return ret
class SongInfo(object):
"""
Represents a musical piece or song
contains a list of tracks, each track contains a certain information
Tracks can be separated by
"""
def __init__(self):
"""
"""
self._meta = SongMetaInfo()
self._tracks = []
def add_track(self, track, pos=None):
"""
"""
if pos is None:
pos = len(self._tracks)
self._tracks.append(pos, track)
def add_tracks(self, tracks):
"""
"""
self._tracks.extend(tracks)
def add_track(self, track):
"""
"""
#TODO verify type
self._tracks.append(track)
def get_tracks(self):
"""
"""
return self._tracks
def set_meta(self, meta):
"""
"""
#TODO make some more things test and verify data, etc
self._meta = meta
def get_meta(self):
"""
"""
return self._meta
meta = property(get_meta, set_meta)
def to_dict(self):
"""
"""
ret = {
"meta" : self._meta.to_dict(),
"tracks": [t.to_dict() for t in self._tracks]
}
return ret
@classmethod
def from_dict(cls, song_dict):
"""
Create an instance from a dict containing the information
"""
ret = cls()
tracks = [TrackInfo.from_dict(t) for t in song_dict["tracks"]]
meta = SongMetaInfo.from_dict(song_dict["meta"])
ret.add_tracks(tracks)
ret.meta = meta
return ret
@classmethod
def from_JSON_string(cls, json_string):
"""
Load
"""
data_dict = json.loads(json_string)
ret = cls.from_dict(data_dict)
return ret
@classmethod
def from_JSON_file(cls, fpath):
"""
Load
"""
#TODO FIXME this will not work
data_dict = json.load(fpath)
ret = cls.from_dict(data_dict)
return ret
def to_JSON(self):
"""
returns JSON string representing this object
"""
return json.dumps(self.to_dict())
class PartitionInfo(object):
"""
Contains the information for a single part set of the song
A part set will be used to generate the following parts:
- rithm part
- auditive memory part
- reading part
"""
#def __init__(self, name, content, dependencies, measure_ticks=None, metronome_ticks=None, song_info=None)
def __init__(self, name, content, dependencies=[]):
"""
"""
#intrinsec values, the ones that give identity to this part set
self._name = name
self._content = content
self._dependencies = dependencies
self._deps_ids = [d._name for d in dependencies]
#reference to tempo and song information
#ticks list containing the ticks for each mesure
#self.measure_ticks = measure_ticks
#ticks for the metronome
#self.metronome_ticks = metronome_ticks
#song information (all the notes and hints
#self._song_info = song_info
@staticmethod
def flatten_tree(part_tree):
"""
Flattens a tree giving back an array containing all the elements in the dependencies tree
as the dependencies are linked AND also named, the link for the tree is not lost
"""
flat = [part_tree]
for dep in part_tree._dependencies:
flat = flat + PartitionInfo.flatten_tree(dep)
return flat
def to_dict(self):
"""
Note nor tempo nor song information are serialized.
This serializes only this part and DOES NOT serialize recursive
This is because if so, it'll duplicate data that in reality accompanies a list of parts (or a tree)
"""
#return {'name':self._name, 'content':self._content, 'deps':self._dependencies.to_dict()} #TODO fix this, will break if deps are not objects ...
#return {'name':self._name, 'content':self._content, 'deps':[d.to_dict() for d in self._dependencies]} #this does not work either
#return {'name':self._name, 'content':self._content, 'deps':[d.to_dict() for d in self._dependencies]} #this does not work either
return {'name':self._name, 'content':self._content, 'deps':self._deps_ids} #this works but does not go recursive
@classmethod
def from_dict(cls, data_dict):
"""
"""
#TODO validte that all the data is correct!!
ret = cls(data_dict['name'], data_dict['content'])
ret._deps_ids = data_dict['deps']
#self._dependencies = data_dict['...']
return ret
def to_JSON(self):
"""
"""
return json.dumps(self.to_dict())
@classmethod
def from_JSON(cls, json_str):
"""
"""
return cls.from_dict(json.loads(json_str))
class DrillInfo(object):
"""
Contains all the information about one partition, this partition will be created
with the information about the song and PartitionInfo
The midi tick time positions are all absolute positioned to the whole song, therefore there is the
indication of tick begin and tick end for being able to handle this relative time positioning
"""
def __init__(self):
"""
"""
self.name = 'drill'
self._metronome_ticks = [] #the metronome ticks that will take place in this drill, also relative to tick begin
self._measure_ids = [] ##id, the order of the measures to play
self._measure_ticks = [] ##ticks, the begin ticks of the measures to play
#keeps the tracks of associated events
self._tracks = []
#this is a raw list per track of the events, useful for playing and some other game transformations
self._events_per_track = []
#buckets, useful to separate the track into buckets, each bucket represent a "given bucket resolution" ticks range.
#buckets list does not keep empty bucket elements, only the ones that have content
self.buckets = []
self.midi_buckets = []
self._bucket_resolution = 0
self._tick_begin = 0
self._tick_end = 0
self._instrument = "piano"
#information to be able to get the time in sec
self.resolution = 220
self.bpm = 60
#
self._deps_ids = []
#dependencies (names)
#self._name = name
#self._content = content
#self._dependencies = dependencies
def bucketize_events(self, bucket_resolution=0):
"""
Separates the events into buckets of resolution = tick_resolution,
if none given will default to self.resolution
Minimum resolution accepted is 1, any other number will be interpreted as non
bucket_resolution=0, bucket resolution (will default to the current resolution
"""
if bucket_resolution <=0:
bucket_resolution = self.resolution
self._bucket_resolution = bucket_resolution
#simple algorithm to bucketize a list of midi events
#step zero, create empty bucket list
tick_range = self._tick_end - self._tick_begin
n_buckets = (tick_range / bucket_resolution) + 1
t_buckets = [[] for i in range(n_buckets)]
#first pass, bucketize
for t in self._tracks:
for ae in t:
noe = ae.data[0]
b_id = (noe.tick - self._tick_begin) / bucket_resolution
t_buckets[b_id].append(ae)
self.buckets = t_buckets
def bucketize_midi_events(self, bucket_resolution=0, ignore_note_off=False):
"""
Separates the events into buckets of resolution = tick_resolution,
if none given will default to self.resolution
Minimum resolution accepted is 1, any other number will be interpreted as non
bucket_resolution=0, bucket resolution (will default to the current resolution
ignore_note_off=False, if note_off events should be ignored in the buckets (many exercises do not need to know about it)
"""
if bucket_resolution <=0:
bucket_resolution = self.resolution
self._bucket_resolution = bucket_resolution
#simple algorithm to bucketize a list of midi events
#step zero, create empty bucket list
tick_range = self._tick_end - self._tick_begin
n_buckets = (tick_range / bucket_resolution) + 1
t_buckets = [[] for i in range(n_buckets)]
#first pass, bucketize
for t in self._events_per_track:
for e in t:
b_id = (e.tick - self._tick_begin) / bucket_resolution
#TODO make boolean calculation to simplify the expression if-else
#if ignore_note_off and is_note_off(e):
# #ignore
# pass
#else:
# t_buckets[b_id].append(e)
if is_note_on(e) or not ignore_note_off:
t_buckets[b_id].append(e)
self.midi_buckets = t_buckets
#second pass: compress buckets, erase empty buckets and save result
#TODO before compaction, an ID of the tick (init or end) of each bucket to be able to trace it)
#self.buckets = [b for b in t_buckets if len(b)>0]
@classmethod
def create_drill(cls, partition, song, metronome_ticks, measure_ticks, ticks_per_measure):
"""
Takes information from the song with the partition information
"""
drill = cls()
#set some basic things:
drill.name = partition._name
drill._measure_ids | |
257.9, 271.5, 253.3, 270.8, 273.7, 270.7, 280.3],
[270.4, 262.5, 272.5, 268.6, 282.5, 254.3, 272.1, 280.2],
],
[
[274.4, 290.9, 276.8, 267.3, 274.7, 289.6, 267.5, 292.8],
[260.7, 281.5, 264.6, 272.4, 259.0, 274.3, 279.7, 272.6],
[273.2, 262.4, 269.2, 280.0, 275.7, 270.5, 286.6, 293.1],
[267.2, 277.4, 274.5, 274.4, 274.2, 298.3, 286.3, 265.0],
[285.1, 272.3, 263.6, 282.3, 248.3, 275.0, 254.5, 288.2],
],
[
[273.3, 268.5, 282.8, 259.8, 251.3, 270.6, 259.7, 269.6],
[269.6, 278.6, 281.7, 286.3, 283.3, 255.9, 267.4, 283.3],
[256.9, 272.4, 273.2, 263.5, 269.9, 277.0, 259.4, 289.0],
[249.0, 271.5, 279.6, 264.4, 264.4, 263.8, 269.5, 266.3],
[274.4, 278.7, 262.8, 286.0, 282.3, 282.2, 267.3, 273.7],
],
[
[266.9, 259.2, 262.7, 286.4, 257.2, 265.0, 261.5, 283.8],
[275.8, 285.6, 284.7, 258.0, 277.1, 281.5, 266.8, 256.5],
[279.2, 273.6, 267.9, 266.6, 278.4, 275.8, 257.4, 258.7],
[269.9, 247.3, 288.3, 265.3, 269.2, 268.7, 267.9, 280.9],
[252.7, 276.1, 267.5, 279.1, 276.5, 279.8, 257.0, 264.3],
],
[
[284.5, 284.9, 267.2, 274.0, 284.2, 285.8, 274.6, 279.3],
[274.4, 281.6, 260.7, 253.7, 264.9, 261.2, 279.6, 284.1],
[260.1, 276.5, 278.0, 264.0, 264.1, 271.5, 265.2, 290.4],
[274.2, 262.7, 284.7, 290.1, 279.3, 263.6, 279.3, 270.4],
[271.5, 273.7, 264.5, 271.8, 286.5, 263.7, 272.3, 273.6],
],
[
[274.6, 255.3, 290.2, 273.7, 268.6, 267.0, 284.3, 257.5],
[260.7, 248.0, 271.0, 279.5, 279.1, 278.6, 258.2, 290.7],
[264.4, 281.6, 271.5, 283.0, 276.7, 292.1, 274.3, 267.2],
[280.3, 266.8, 272.8, 267.6, 271.0, 272.1, 285.2, 262.9],
[267.5, 256.5, 283.1, 280.3, 263.4, 270.6, 274.9, 268.2],
],
[
[277.8, 270.0, 288.3, 276.6, 276.9, 267.6, 286.9, 275.1],
[282.3, 259.3, 273.5, 295.0, 282.6, 273.9, 275.9, 288.2],
[269.0, 270.2, 292.6, 254.1, 271.9, 271.7, 271.7, 279.6],
[279.5, 252.0, 285.7, 260.6, 252.7, 275.4, 290.4, 277.3],
[276.0, 281.8, 270.6, 272.5, 259.3, 274.4, 290.2, 278.3],
],
[
[256.6, 274.6, 274.0, 285.3, 263.0, 266.5, 264.6, 277.0],
[276.0, 279.5, 272.4, 259.3, 273.0, 276.7, 286.7, 284.0],
[279.8, 265.5, 272.2, 276.8, 283.9, 272.7, 287.7, 263.0],
[267.9, 266.4, 274.4, 251.5, 279.1, 274.1, 258.6, 284.7],
[279.9, 274.3, 268.2, 273.2, 284.7, 291.7, 257.4, 281.0],
],
[
[264.8, 274.6, 275.8, 269.6, 263.1, 254.5, 286.9, 260.2],
[279.8, 281.9, 277.0, 256.9, 268.3, 277.3, 258.9, 268.9],
[255.6, 269.4, 290.6, 276.8, 261.0, 261.6, 286.6, 279.8],
[295.8, 270.1, 259.1, 286.5, 282.2, 269.1, 274.1, 293.4],
[281.4, 264.5, 258.0, 283.1, 272.5, 263.5, 269.8, 266.1],
],
[
[281.0, 287.9, 289.3, 251.7, 284.6, 273.3, 269.2, 274.5],
[268.7, 266.3, 265.5, 271.6, 258.9, 270.1, 277.6, 279.0],
[250.8, 277.2, 260.2, 285.0, 263.0, 279.1, 278.1, 251.8],
[267.4, 272.5, 250.2, 283.1, 269.6, 275.8, 257.6, 275.4],
[273.7, 261.9, 258.7, 267.0, 277.0, 272.1, 280.8, 265.4],
],
[
[281.4, 274.2, 274.0, 278.7, 282.4, 285.4, 262.9, 266.7],
[294.3, 283.0, 282.1, 269.5, 266.3, 292.0, 258.3, 273.1],
[274.7, 257.9, 264.8, 261.9, 247.3, 284.5, 283.9, 274.9],
[287.1, 273.3, 286.9, 283.1, 288.3, 265.8, 272.0, 295.6],
[276.9, 267.3, 271.6, 275.2, 279.6, 277.0, 262.8, 265.3],
],
[
[277.1, 289.1, 284.3, 279.8, 276.7, 290.2, 265.7, 260.0],
[260.8, 251.6, 263.8, 267.6, 266.2, 259.9, 276.7, 267.7],
[273.2, 265.0, 281.7, 270.2, 281.7, 259.8, 264.0, 260.8],
[275.8, 274.9, 279.4, 283.3, 272.6, 273.0, 279.1, 288.0],
[266.9, 261.1, 270.0, 289.2, 250.5, 276.0, 289.7, 281.9],
],
[
[274.8, 273.8, 277.1, 288.6, 277.1, 257.3, 277.7, 279.4],
[278.3, 269.0, 278.2, 277.3, 275.1, 266.6, 280.7, 276.7],
[270.3, 257.8, 264.6, 271.6, 287.7, 274.0, 272.5, 265.8],
[259.1, 269.2, 290.7, 281.9, 270.4, 287.1, 290.8, 290.0],
[285.0, 273.4, 283.4, 278.1, 286.7, 259.6, 249.6, 273.6],
],
[
[269.2, 291.3, 282.4, 279.8, 269.8, 272.5, 271.7, 261.8],
[261.1, 273.4, 287.5, 282.1, 262.1, 275.4, 271.9, 269.1],
[266.1, 272.4, 270.5, 281.2, 271.9, 285.6, 259.6, 290.2],
[270.5, 275.9, 255.8, 275.6, 277.8, 272.3, 271.8, 278.8],
[268.7, 277.8, 267.6, 264.7, 279.0, 258.8, 288.6, 277.3],
],
[
[272.1, 267.0, 251.9, 277.8, 263.8, 275.8, 275.8, 293.0],
[264.4, 268.3, 261.4, 266.6, 283.4, 282.3, 255.5, 272.3],
[262.7, 285.2, 276.2, 283.8, 275.3, 274.8, 290.9, 280.4],
[275.2, 263.9, 275.8, 267.1, 267.2, 267.4, 269.2, 270.3],
[289.4, 259.3, 275.2, 274.0, 268.4, 280.8, 278.5, 266.6],
],
[
[261.1, 264.3, 275.0, 282.6, 286.0, 271.9, 276.3, 263.2],
[280.9, 268.7, 274.8, 280.0, 263.5, 284.9, 279.8, 256.3],
[269.5, 274.8, 271.5, 273.5, 265.1, 283.4, 271.6, 269.9],
[293.7, 278.1, 267.6, 265.9, 261.6, 269.8, 272.4, 277.1],
[267.8, 292.5, 271.5, 279.8, 256.7, 271.9, 273.7, 275.1],
],
[
[290.6, 269.7, 282.3, 277.8, 289.0, 284.4, 274.0, 275.3],
[261.0, 276.2, 282.4, 260.1, 274.1, 279.1, 280.7, 266.0],
[275.6, 265.3, 287.5, 272.9, 278.9, 258.9, 273.2, 264.0],
[287.0, 280.2, 268.2, 277.5, 277.2, 258.7, 263.0, 262.2],
[277.2, 293.3, 270.3, 265.9, 264.7, 262.0, 281.5, 275.9],
],
[
[277.8, 282.3, 278.4, 263.6, 270.7, 275.2, 277.3, 281.0],
[275.2, 263.9, 285.6, 269.5, 272.8, 279.1, 269.4, 268.2],
[261.5, 267.2, 260.3, 293.5, 281.6, 280.1, 282.5, 274.3],
[279.7, 264.8, 258.8, 279.2, 278.6, 261.3, 261.7, 268.0],
[281.1, 258.5, 290.3, 268.9, 275.5, 272.2, 267.3, 276.0],
],
[
[257.4, 261.4, 271.5, 273.8, 272.6, 254.5, 282.5, 262.8],
[260.2, 260.9, 280.4, 279.8, 268.2, 273.2, 275.6, 274.7],
[275.1, 277.5, 285.3, 280.8, 273.3, 263.0, 263.2, 297.7],
[274.7, 275.9, 265.6, 266.9, 273.4, 269.6, 279.8, 276.2],
[279.6, 268.9, 270.3, 269.1, 267.5, 282.4, 279.1, 252.3],
],
[
[277.5, 272.1, 261.2, 266.4, 291.3, 273.9, 270.5, 270.9],
[275.4, 270.1, 260.8, 270.1, 277.3, 271.5, 280.9, 268.7],
[277.8, 277.3, 274.3, 269.1, 280.6, 283.8, 268.4, 278.3],
[291.0, 267.1, 261.3, 269.0, 271.5, 280.5, 274.5, 290.5],
[280.9, 268.4, 283.4, 284.3, 269.5, 261.1, 279.7, 288.4],
],
[
[274.7, 258.8, 271.0, 272.1, 263.9, 268.4, 264.6, 288.0],
[280.5, 254.7, 272.6, 278.1, 250.7, 280.2, 285.2, 275.4],
[286.7, 281.5, 254.7, 276.5, 263.9, 281.3, 278.1, 273.0],
[259.4, 277.5, 271.9, 273.2, 264.0, 273.4, 286.5, 268.5],
[277.3, 270.3, 286.2, 273.9, 268.8, 282.7, 272.1, 274.1],
],
[
[273.2, 257.3, 287.3, 268.4, 278.7, 272.9, 249.7, 272.8],
[272.3, 266.7, 284.1, 280.8, 265.1, 275.5, 282.5, 249.9],
[270.0, 272.4, 276.6, 269.2, 262.0, 289.0, 278.1, 282.9],
[277.6, 246.5, 264.1, 285.4, 279.1, 279.4, 279.2, 288.6],
[276.9, 259.9, 276.0, 252.1, 272.8, 277.6, 277.4, 281.5],
],
[
[280.5, 277.4, 273.8, 284.9, 263.1, 262.4, 297.5, 274.8],
[265.6, 271.3, 294.9, 290.9, 275.9, 275.0, 279.2, 256.7],
[275.8, 266.7, 260.6, 273.5, 270.9, 266.2, 253.6, 270.1],
[282.8, 270.1, 269.4, 278.6, 276.0, 270.8, 279.3, 272.3],
[277.4, 282.5, 257.7, 267.2, 271.4, 267.1, 269.9, 253.6],
],
[
[275.5, 272.7, 268.2, 253.0, 272.0, 272.8, 282.0, 274.8],
[271.3, 295.3, 275.4, 276.9, 270.7, 268.7, 281.0, 294.1],
[285.9, 254.7, 274.8, 259.6, 262.1, 279.0, 280.7, 275.5],
[276.9, 276.7, 258.6, 274.9, 281.7, 279.6, 267.7, 269.1],
[289.2, 256.6, 274.6, 254.9, 257.0, 280.3, 270.2, 272.6],
],
[
[269.6, 270.3, 269.7, 279.4, 253.0, 257.1, 295.4, 287.3],
[264.8, 277.6, 271.6, 271.1, 268.8, 275.6, 262.6, 267.5],
[272.7, 256.1, 259.7, 273.3, 265.6, 287.5, 270.7, 283.3],
[262.8, 276.3, 263.5, 281.9, 264.6, 267.2, 260.5, 263.5],
[286.6, 267.0, 273.4, 277.2, 276.6, 287.9, 262.0, 261.8],
],
[
[277.3, 277.7, 269.9, 263.9, 282.3, 281.7, 280.3, 269.8],
[275.0, 285.4, 290.3, 280.3, 265.4, 262.6, 251.7, 273.8],
[275.0, 276.3, 262.7, 266.6, 273.2, 269.0, 269.6, 256.1],
[271.4, 291.2, 275.5, 265.8, 274.0, 280.6, 291.5, 260.2],
[258.3, 281.9, 264.1, 278.0, 279.1, 269.8, 278.9, 257.5],
],
[
[276.7, 280.6, 262.5, 260.8, 276.9, 284.7, 289.2, 245.8],
[266.1, 251.6, 267.1, 272.0, 275.1, 261.7, 272.0, 269.1],
[276.2, 267.4, 264.4, 279.1, 278.0, 261.8, 291.5, 268.1],
[281.6, 282.3, 267.2, 285.8, 267.3, 287.0, 262.5, 282.3],
[276.1, 276.5, 253.5, 274.9, 264.8, 262.6, 268.3, 276.9],
],
[
[281.0, 290.1, 285.8, 279.5, 275.5, 286.3, 277.5, 268.3],
[284.4, 279.7, 290.9, 273.3, 275.1, 276.8, 270.6, 271.8],
[270.2, 271.8, 266.6, 269.5, 287.0, 281.4, 261.6, 264.1],
[276.7, 269.5, 278.1, 275.2, 281.0, 264.9, 275.4, 272.4],
[267.5, 287.3, 262.6, 270.7, 273.2, 280.5, 264.8, 263.9],
],
[
[287.5, 290.3, 280.7, 271.0, 265.0, 274.3, 277.4, 265.4],
[271.6, 263.7, 294.4, 289.2, 273.0, 267.7, 276.1, 279.3],
[277.1, 273.6, 269.9, 258.6, 252.7, 276.9, 286.8, 266.5],
[277.3, 271.8, 269.1, 280.1, 291.2, 262.5, 263.6, 261.4],
[276.4, 260.8, 259.8, 265.0, 296.3, 269.9, 276.8, 278.7],
],
[
[261.1, 260.2, 270.9, 269.2, 279.8, 243.0, 270.1, 265.4],
[275.1, 271.3, 271.6, 297.3, 275.4, 264.1, 275.1, 268.9],
[274.9, 277.0, 274.7, 280.4, 283.5, 269.7, 253.7, 272.1],
[271.8, 262.5, 259.3, 266.2, 279.2, 275.5, 276.8, 272.5],
[269.8, 277.3, 275.8, 283.6, 261.1, 268.3, 248.6, 262.8],
],
[
[270.5, 274.3, 270.4, 269.5, 271.5, 273.9, 281.5, 276.6],
[277.3, 271.4, 294.4, 281.4, 269.7, 287.9, 260.6, 276.1],
[270.9, 271.2, 269.0, 269.8, 263.8, 263.1, 281.3, 288.1],
[263.8, 277.5, 270.3, 264.8, 270.0, 290.4, 265.1, 273.1],
[251.5, 282.6, 265.7, 262.8, 267.1, 269.4, 285.3, 269.2],
],
[
[270.1, 281.4, 284.2, | |
> cajasH[cc, :], cajasH[c, :], cajasH[cc, :])
nuevasL = np.where(cajasL[c, :] < cajasL[cc, :], cajasL[c, :], cajasL[cc, :])
# verifica que solo un tipo de patron este en la caja
res = True
for p in range(patrones.shape[0]):
if patrones[p, dim] != pertenece[c]:
den = True
if True in np.hstack(
(patrones[p, :dim] > nuevasH, patrones[p, :dim] < nuevasL)):
den = False
if den:
res = False
break
# desactivar una caja y redimensionar la otra
if res:
cambio = True
pertenece[cc] = -2
cajasH[c, :] = nuevasH.copy()
cajasL[c, :] = nuevasL.copy()
# crear la red DMNN resultante
self.numK = np.zeros(int(patrones[:, dim].max() + 1), dtype=int)
self.pesW = np.array([])
for m in range(self.numK.size):
k = 0
for c in range(pertenece.size):
if pertenece[c] == m:
self.pesW = np.concatenate((self.pesW, np.dstack((cajasH[c, :], cajasL[c, :])).ravel()))
k += 1
self.numK[m] = k
self.actK = np.ones(self.numK.sum()) > 0
def ImportarRed(self, entradas, clases, adecuaH, adecuaL, adecuaN):
nota = 0
fileDir, _ = QFileDialog.getOpenFileName(caption="Abrir Red",
filter="XML File (*.xml);;Text File (*.txt)")
if fileDir:
if ".xml" in fileDir:
nota = self.LeerXML(entradas, clases, adecuaH, adecuaL, fileDir, adecuaN)
else:
nota = self.LeerTXT(entradas, clases, adecuaH, adecuaL, fileDir, adecuaN)
return nota
def LeerXML(self, entradas, clases, adecuaH, adecuaL, archivo, adecuaN):
file = open(archivo, "r")
raiz = ET.fromstring(file.read())
file.close()
if raiz.tag == "DMNN":
dim = np.zeros(2, dtype=int)
dim[0] = int(raiz.find("Dimension").find("Entradas").text)
dim[1] = int(raiz.find("Dimension").find("Clases").text)
self.pesW = self.ExtraeTags(raiz, "Pesos", True)
self.actK = self.ExtraeTags(raiz, "Activas", False) > 0
self.numK = self.ExtraeTags(raiz, "DendritasPorClase", False)
norH = self.ExtraeTags(raiz, "NormalizacionH", True)
norL = self.ExtraeTags(raiz, "NormalizacionL", True)
norN = float(raiz.find("NormalizacionN").text)
if dim[0] == entradas and dim[1] == clases:
if norN == 0.0:
nota = 1
elif False in (norH == adecuaH) or False in (norL == adecuaL):
nota = 2
elif norN == adecuaN:
nota = 1
else:
nota = 2
else:
nota = -2
else:
nota = -1
return nota
def ExtraeTags(self, raiz, llave, esFloat):
if esFloat:
vec = np.array([])
else:
vec = np.array([], dtype=int)
item = {"NormalizacionH": "H", "NormalizacionL": "L", "Pesos": "W",
"DendritasPorClase": "C", "Activas": "T"}
i = 0
while True:
n = raiz.find(llave).find(item[llave] + str(i))
if n == None:
break
else:
if esFloat:
vec = np.concatenate([vec, np.array([float(n.text)])])
else:
vec = np.concatenate([vec, np.array([int(n.text)], dtype=int)])
i += 1
return vec
def LeerTXT(self, entradas, clases, adecuaH, adecuaL, archivo, adecuaN):
file = open(archivo, "r")
data = file.readlines()
file.close()
if len(data) >= 16 and "DMNN: " in data[0]:
dim = np.fromstring(data[2], dtype=int, sep=",")
self.pesW = np.fromstring(data[4], sep=",")
self.numK = np.fromstring(data[6], dtype=int, sep=",")
self.actK = np.fromstring(data[8], dtype=int, sep=",") > 0
norH = np.fromstring(data[10], sep=",")
norL = np.fromstring(data[12], sep=",")
norN = float(data[13].replace("NormalizacionN: ", ""))
if dim[0] == entradas and dim[1] == clases:
if norN == 0.0:
nota = 1
elif False in (norH == adecuaH) or False in (norL == adecuaL):
nota = 2
elif norN == adecuaN:
nota = 1
else:
nota = 2
else:
nota = -2
else:
nota = -1
return nota
def ExportarRed(self, entradas, clases, adecuaH, adecuaL, titulo, apodos, nombresE, adecuaN):
fileDir, _ = QFileDialog.getSaveFileName(caption="Guardar Red",
filter="XML File (*.xml);;Text File (*.txt)")
if fileDir:
if ".xml" in fileDir:
self.CrearXML(entradas, clases, fileDir, adecuaH, adecuaL, titulo, apodos, nombresE, adecuaN)
else:
self.CrearTXT(entradas, clases, fileDir, adecuaH, adecuaL, titulo, apodos, nombresE, adecuaN)
def CrearXML(self, entradas, clases, archivo, adecuaH, adecuaL, titulo, apodos, nombresE, adecuaN):
datos = ET.Element("DMNN")
llave = ET.SubElement(datos, "Titulo")
llave.text = titulo
llave = ET.SubElement(datos, "Dimension")
item = ET.SubElement(llave, "Entradas")
item.text = str(entradas)
item = ET.SubElement(llave, "Clases")
item.text = str(clases)
llave = ET.SubElement(datos, "NombresSalidas")
for i in range(len(apodos)):
item = ET.SubElement(llave, "A" + str(i))
item.text = apodos[i]
llave = ET.SubElement(datos, "NombresEntradas")
for i in range(len(nombresE)):
item = ET.SubElement(llave, "N" + str(i))
item.text = nombresE[i]
llave = ET.SubElement(datos, "NormalizacionH")
for i in range(adecuaH.size):
item = ET.SubElement(llave, "H" + str(i))
item.text = str(adecuaH[i])
llave = ET.SubElement(datos, "NormalizacionL")
for i in range(adecuaL.size):
item = ET.SubElement(llave, "L" + str(i))
item.text = str(adecuaL[i])
llave = ET.SubElement(datos, "NormalizacionN")
llave.text = str(adecuaN)
llave = ET.SubElement(datos, "Pesos")
for i in range(self.pesW.size):
item = ET.SubElement(llave, "W" + str(i))
item.text = str(self.pesW[i])
llave = ET.SubElement(datos, "Activas")
for i in range(self.actK.size):
item = ET.SubElement(llave, "T" + str(i))
item.text = ("1" if self.actK[i] else "0")
llave = ET.SubElement(datos, "DendritasPorClase")
for i in range(self.numK.size):
item = ET.SubElement(llave, "C" + str(i))
item.text = str(self.numK[i])
file = open(archivo, "w")
file.write(ET.tostring(datos).decode())
file.close()
def CrearTXT(self, entradas, clases, archivo, adecuaH, adecuaL, titulo, apodos, nombresE, adecuaN):
file = open(archivo, "w")
file.write("DMNN: " + titulo + "\n")
file.write("Dimension: Entradas, Clases\n")
txx = np.array2string(np.array([entradas, clases]), separator=",").replace("\n", "")
file.write(txx.replace(" ", "").replace("[", "").replace("]", "") + "\n")
file.write("Pesos\n")
for w in self.pesW[:(self.pesW.size - 1)]:
file.write(str(w) + ",")
file.write(str(self.pesW[-1]) + "\n")
file.write("DendritasPorClase\n")
txx = np.array2string(self.numK, separator=",").replace("\n", "")
file.write(txx.replace(" ", "").replace("[", "").replace("]", "") + "\n")
file.write("Activas\n")
for a in self.actK[:(self.actK.size - 1)]:
file.write(("1" if a else "0") + ",")
file.write(("1" if self.actK[-1] else "0") + "\n")
file.write("NormalizacionH\n")
txx = np.array2string(adecuaH, separator=",").replace("\n", "")
file.write(txx.replace(" ", "").replace("[", "").replace("]", "") + "\n")
file.write("NormalizacionL\n")
txx = np.array2string(adecuaL, separator=",").replace("\n", "")
file.write(txx.replace(" ", "").replace("[", "").replace("]", "") + "\n")
file.write("NormalizacionN: " + str(adecuaN) + "\n")
txx = ""
for i in range(len(apodos) - 1):
txx += apodos[i] + ", "
txx += apodos[-1] + "\n"
file.write("NombresSalidas: " + txx)
txx = ""
for i in range(len(nombresE) - 1):
txx += nombresE[i] + ", "
txx += nombresE[-1] + "\n"
file.write("NombresEntradas: " + txx)
file.close()
def UnirDendritas(self, patrones, toler):
param = int(self.pesW.size / (1 if self.numK.sum() == 0 else self.numK.sum()))
# ver el error actual de la red
self.errorCM(patrones)
if self.error <= toler:
# crear vector de apoyo para operacion interna
bas = []
bas.append(np.dstack((np.ones(int(param / 2)), np.zeros(int(param / 2)))).ravel())
bas.append(abs(bas[0] - 1.0))
# hacer ciclo de union de hipercajas, dividido segun clase m
mOperado = []
for mmm in range(self.numK.size):
# se eligen las clases al azar para no sesgar por orden
mOperado.append(-1)
m = -1
while m in mOperado:
m = np.random.randint(self.numK.size)
mOperado[-1] = m
n = self.numK[:m].sum()
# se reordenan las dendritas al azar para no sesgar por orden
binAct = np.where(self.actK, 1, 0)
pedazoW = self.pesW[(n * param):((n + self.numK[m]) * param)].copy()
revu = np.vstack((binAct[n:(n + self.numK[m])],
np.arange(self.numK[m], dtype=int)))
np.random.shuffle(revu.T)
binAct[n:(n + self.numK[m])] = revu[0, :]
self.actK = binAct > 0
pedazoW = pedazoW.reshape(-1, param)[revu[1, :], :].ravel()
self.pesW[(n * param):((n + self.numK[m]) * param)] = pedazoW
# revisar cada caja de la clase actual
for k in range(self.numK[m]):
if self.actK[n]:
# verificar si la caja se puede unir con las siguientes
nn = 0
for kk in range(k, self.numK[m]):
if self.actK[n + nn] and kk != k:
# desactivar la caja secundaria kk
self.actK[n + nn] = False
# formar la caja grande entre ambas
ant = self.pesW[(n * param):((n + 1) * param)].copy()
sec = self.pesW[((n + nn) * param):((n + nn + 1) * param)].copy()
H = np.maximum(ant, sec) * bas[0]
L = np.minimum(ant, sec) * bas[1]
self.pesW[(n * param):((n + 1) * param)] = H + L
# comparar el error de la gran caja con el original
self.errorCM(patrones)
if self.error > toler:
self.actK[n + nn] = True
self.pesW[(n * param):((n + 1) * param)] = ant
nn += 1
n += 1
def QuitarDendritas(self, patrones, toler):
dim = patrones.shape[1] - 1
# crear un vector de prioridad segun area/volumen/hipervolumen de cajas
priori = self.pesW.reshape(-1, 2).copy()
priori = (priori[:, 0] - priori[:, 1]).reshape(-1, dim)
priori = np.prod(priori, axis=1)
limite = priori.max() * 2.0
priori = np.where(self.actK, priori, limite)
# ver el error actual de la red
self.errorCM(patrones)
if self.error <= toler:
# hacer ciclo para ver que pasa al quitar cada caja
for j in range(priori.size):
menor = priori.argmin()
if priori[menor] != limite:
priori[menor] = limite
# compara el error sin la caja, con el original
self.actK[menor] = False
if self.ClaseVacia():
self.actK[menor] = True
else:
self.errorCM(patrones)
if self.error > toler:
self.actK[menor] = True
def ClaseVacia(self):
res = False
unos = np.where(self.actK, 1, 0)
for m in range(self.numK.size):
if unos[self.numK[:m].sum():self.numK[:(m + 1)].sum()].sum() == 0:
res = True
break
return | |
content-version="1.0" format-version="1.0">
<icFilters>
<icFilter operation="delete">
"""
CE_NC_DELETE_CHANNEL_FILTER_TAIL = """
</icFilter>
</icFilters>
</syslog>
</config>
"""
CE_NC_GET_SERVER_IP_INFO_HEADER = """
<filter type="subtree">
<syslog xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<syslogServers>
<syslogServer>
<ipType>%s</ipType>
<serverIp>%s</serverIp>
<vrfName>%s</vrfName>
<isDefaultVpn>%s</isDefaultVpn>
"""
CE_NC_GET_SERVER_IP_INFO_TAIL = """
</syslogServer>
</syslogServers>
</syslog>
</filter>
"""
CE_NC_MERGE_SERVER_IP_INFO_HEADER = """
<config>
<syslog xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<syslogServers>
<syslogServer operation="merge">
<ipType>%s</ipType>
<serverIp>%s</serverIp>
<vrfName>%s</vrfName>
<isDefaultVpn>%s</isDefaultVpn>
"""
CE_NC_MERGE_SERVER_IP_INFO_TAIL = """
</syslogServer>
</syslogServers>
</syslog>
</config>
"""
CE_NC_DELETE_SERVER_IP_INFO_HEADER = """
<config>
<syslog xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<syslogServers>
<syslogServer operation="delete">
<ipType>%s</ipType>
<serverIp>%s</serverIp>
<vrfName>%s</vrfName>
<isDefaultVpn>%s</isDefaultVpn>
"""
CE_NC_DELETE_SERVER_IP_INFO_TAIL = """
</syslogServer>
</syslogServers>
</syslog>
</config>
"""
CE_NC_GET_SERVER_DNS_INFO_HEADER = """
<filter type="subtree">
<syslog xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<syslogDNSs>
<syslogDNS>
"""
CE_NC_GET_SERVER_DNS_INFO_TAIL = """
</syslogDNS>
</syslogDNSs>
</syslog>
</filter>
"""
CE_NC_MERGE_SERVER_DNS_INFO_HEADER = """
<config>
<syslog xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<syslogDNSs>
<syslogDNS operation="merge">
<serverDomain>%s</serverDomain>
<vrfName>%s</vrfName>
<isDefaultVpn>%s</isDefaultVpn>
"""
CE_NC_MERGE_SERVER_DNS_INFO_TAIL = """
</syslogDNS>
</syslogDNSs>
</syslog>
</config>
"""
CE_NC_DELETE_SERVER_DNS_INFO_HEADER = """
<config>
<syslog xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<syslogDNSs>
<syslogDNS operation="delete">
<serverDomain>%s</serverDomain>
<vrfName>%s</vrfName>
<isDefaultVpn>%s</isDefaultVpn>
"""
CE_NC_DELETE_SERVER_DNS_INFO_TAIL = """
</syslogDNS>
</syslogDNSs>
</syslog>
</config>
"""
def get_out_direct_default(out_direct):
"""get default out direct"""
outdict = {"console": "1", "monitor": "2", "trapbuffer": "3",
"logbuffer": "4", "snmp": "5", "logfile": "6"}
channel_id_default = outdict.get(out_direct)
return channel_id_default
def get_channel_name_default(channel_id):
"""get default out direct"""
channel_dict = {"0": "console", "1": "monitor", "2": "loghost", "3": "trapbuffer", "4": "logbuffer",
"5": "snmpagent", "6": "channel6", "7": "channel7", "8": "channel8", "9": "channel9"}
channel_name_default = channel_dict.get(channel_id)
return channel_name_default
class InfoCenterGlobal(object):
"""
Manages info center global configuration.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.info_center_enable = self.module.params['info_center_enable'] or None
self.packet_priority = self.module.params['packet_priority'] or None
self.suppress_enable = self.module.params['suppress_enable'] or None
self.logfile_max_num = self.module.params['logfile_max_num'] or None
self.logfile_max_size = self.module.params['logfile_max_size'] or None
self.channel_id = self.module.params['channel_id'] or None
self.channel_cfg_name = self.module.params['channel_cfg_name'] or None
self.channel_out_direct = self.module.params['channel_out_direct'] or None
self.filter_feature_name = self.module.params['filter_feature_name'] or None
self.filter_log_name = self.module.params['filter_log_name'] or None
self.ip_type = self.module.params['ip_type'] or None
self.server_ip = self.module.params['server_ip'] or None
self.server_domain = self.module.params['server_domain'] or None
self.is_default_vpn = self.module.params['is_default_vpn'] or None
self.vrf_name = self.module.params['vrf_name'] or None
self.level = self.module.params['level'] or None
self.server_port = self.module.params['server_port'] or None
self.facility = self.module.params['facility'] or None
self.channel_name = self.module.params['channel_name'] or None
self.timestamp = self.module.params['timestamp'] or None
self.transport_mode = self.module.params['transport_mode'] or None
self.ssl_policy_name = self.module.params['ssl_policy_name'] or None
self.source_ip = self.module.params['source_ip'] or None
self.state = self.module.params['state'] or None
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.existing = dict()
self.proposed = dict()
self.end_state = dict()
# syslog info
self.cur_global_info = None
self.cur_logfile_info = None
self.channel_info = None
self.channel_direct_info = None
self.filter_info = None
self.server_ip_info = None
self.server_domain_info = None
def init_module(self):
""" init module """
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, con_obj, xml_name):
"""Check if response message is already succeed."""
xml_str = con_obj.xml
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_channel_dict(self):
""" get channel attributes dict."""
channel_info = dict()
# get channel info
conf_str = CE_NC_GET_CHANNEL_INFO % self.channel_id
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return channel_info
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
channel_info["channelInfos"] = list()
channels = root.findall("data/syslog/icChannels/icChannel")
if channels:
for channel in channels:
channel_dict = dict()
for ele in channel:
if ele.tag in ["icChnlId", "icChnlCfgName"]:
channel_dict[ele.tag] = ele.text
channel_info["channelInfos"].append(channel_dict)
return channel_info
def is_exist_channel_id_name(self, channel_id, channel_name):
"""if channel id exist"""
if not self.channel_info:
return False
for id2name in self.channel_info["channelInfos"]:
if id2name["icChnlId"] == channel_id and id2name["icChnlCfgName"] == channel_name:
return True
return False
def config_merge_syslog_channel(self, channel_id, channel_name):
"""config channel id"""
if not self.is_exist_channel_id_name(channel_id, channel_name):
conf_str = CE_NC_MERGE_CHANNEL_INFO_HEADER
if channel_id:
conf_str += "<icChnlId>%s</icChnlId>" % channel_id
if channel_name:
conf_str += "<icChnlCfgName>%s</icChnlCfgName>" % channel_name
conf_str += CE_NC_MERGE_CHANNEL_INFO_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: Merge syslog channel id failed.')
self.updates_cmd.append(
"info-center channel %s name %s" % (channel_id, channel_name))
self.changed = True
def delete_merge_syslog_channel(self, channel_id, channel_name):
"""delete channel id"""
change_flag = False
if channel_name:
for id2name in self.channel_info["channelInfos"]:
channel_default_name = get_channel_name_default(
id2name["icChnlId"])
if id2name["icChnlId"] == channel_id and id2name["icChnlCfgName"] == channel_name:
channel_name = channel_default_name
change_flag = True
if not channel_name:
for id2name in self.channel_info["channelInfos"]:
channel_default_name = get_channel_name_default(
id2name["icChnlId"])
if id2name["icChnlId"] == channel_id and id2name["icChnlCfgName"] != channel_default_name:
channel_name = channel_default_name
change_flag = True
if change_flag:
conf_str = CE_NC_MERGE_CHANNEL_INFO_HEADER
if channel_id:
conf_str += "<icChnlId>%s</icChnlId>" % channel_id
if channel_name:
conf_str += "<icChnlCfgName>%s</icChnlCfgName>" % channel_name
conf_str += CE_NC_MERGE_CHANNEL_INFO_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: Merge syslog channel id failed.')
self.updates_cmd.append("undo info-center channel %s" % channel_id)
self.changed = True
def get_channel_direct_dict(self):
""" get channel direct attributes dict."""
channel_direct_info = dict()
# get channel direct info
conf_str = CE_NC_GET_CHANNEL_DIRECT_INFO % self.channel_out_direct
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return channel_direct_info
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
channel_direct_info["channelDirectInfos"] = list()
dir_channels = root.findall("data/syslog/icDirChannels/icDirChannel")
if dir_channels:
for ic_dir_channel in dir_channels:
channel_direct_dict = dict()
for ele in ic_dir_channel:
if ele.tag in ["icOutDirect", "icCfgChnlId"]:
channel_direct_dict[ele.tag] = ele.text
channel_direct_info["channelDirectInfos"].append(
channel_direct_dict)
return channel_direct_info
def is_exist_out_direct(self, out_direct, channel_id):
"""if channel out direct exist"""
if not self.channel_direct_info:
return False
for id2name in self.channel_direct_info["channelDirectInfos"]:
if id2name["icOutDirect"] == out_direct and id2name["icCfgChnlId"] == channel_id:
return True
return False
def config_merge_out_direct(self, out_direct, channel_id):
"""config out direct"""
if not self.is_exist_out_direct(out_direct, channel_id):
conf_str = CE_NC_MERGE_CHANNEL_DIRECT_HEADER
if out_direct:
conf_str += "<icOutDirect>%s</icOutDirect>" % out_direct
if channel_id:
conf_str += "<icCfgChnlId>%s</icCfgChnlId>" % channel_id
conf_str += CE_NC_MERGE_CHANNEL_DIRECT_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: Merge syslog channel out direct failed.')
self.updates_cmd.append(
"info-center %s channel %s" % (out_direct, channel_id))
self.changed = True
def delete_merge_out_direct(self, out_direct, channel_id):
"""delete out direct"""
change_flag = False
channel_id_default = get_out_direct_default(out_direct)
if channel_id:
for id2name in self.channel_direct_info["channelDirectInfos"]:
if id2name["icOutDirect"] == out_direct and id2name["icCfgChnlId"] == channel_id:
if channel_id != channel_id_default:
channel_id = channel_id_default
change_flag = True
if not channel_id:
for id2name in self.channel_direct_info["channelDirectInfos"]:
if id2name["icOutDirect"] == out_direct and id2name["icCfgChnlId"] != channel_id_default:
channel_id = channel_id_default
change_flag = True
if change_flag:
conf_str = CE_NC_MERGE_CHANNEL_DIRECT_HEADER
if out_direct:
conf_str += "<icOutDirect>%s</icOutDirect>" % out_direct
if channel_id:
conf_str += "<icCfgChnlId>%s</icCfgChnlId>" % channel_id
conf_str += CE_NC_MERGE_CHANNEL_DIRECT_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: Merge syslog channel out direct failed.')
self.updates_cmd.append("undo info-center logfile channel")
self.changed = True
def get_filter_dict(self):
""" get syslog filter attributes dict."""
filter_info = dict()
# get filter info
conf_str = CE_NC_GET_FILTER_INFO
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return filter_info
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
filter_info["filterInfos"] = list()
ic_filters = root.findall("data/syslog/icFilters/icFilter")
if ic_filters:
for ic_filter in ic_filters:
filter_dict = dict()
for ele in ic_filter:
if ele.tag in ["icFeatureName", "icFilterLogName"]:
filter_dict[ele.tag] = ele.text
filter_info["filterInfos"].append(filter_dict)
return filter_info
def is_exist_filter(self, filter_feature_name, filter_log_name):
"""if filter info exist"""
if not self.filter_info:
return False
for id2name in self.filter_info["filterInfos"]:
if id2name["icFeatureName"] == filter_feature_name and id2name["icFilterLogName"] == filter_log_name:
return True
return False
def config_merge_filter(self, filter_feature_name, filter_log_name):
"""config filter"""
if not self.is_exist_filter(filter_feature_name, filter_log_name):
conf_str = CE_NC_CREATE_CHANNEL_FILTER_HEADER
conf_str += "<icFilterFlag>true</icFilterFlag>"
if filter_feature_name:
conf_str += "<icFeatureName>%s</icFeatureName>" % filter_feature_name
if filter_log_name:
conf_str += "<icFilterLogName>%s</icFilterLogName>" % filter_log_name
conf_str += CE_NC_CREATE_CHANNEL_FILTER_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(msg='Error: Merge syslog filter failed.')
self.updates_cmd.append("info-center filter-id bymodule-alias %s %s"
% (filter_feature_name, filter_log_name))
self.changed = True
def delete_merge_filter(self, filter_feature_name, filter_log_name):
"""delete filter"""
change_flag = False
if self.is_exist_filter(filter_feature_name, filter_log_name):
for id2name in self.filter_info["filterInfos"]:
if id2name["icFeatureName"] == filter_feature_name and id2name["icFilterLogName"] == filter_log_name:
change_flag = True
if change_flag:
conf_str = CE_NC_DELETE_CHANNEL_FILTER_HEADER
conf_str += "<icFilterFlag>true</icFilterFlag>"
if filter_feature_name:
conf_str += "<icFeatureName>%s</icFeatureName>" % filter_feature_name
if filter_log_name:
conf_str += "<icFilterLogName>%s</icFilterLogName>" % filter_log_name
conf_str += CE_NC_DELETE_CHANNEL_FILTER_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: Merge syslog channel out direct failed.')
self.updates_cmd.append("undo info-center filter-id bymodule-alias %s %s"
% (filter_feature_name, filter_log_name))
self.changed = True
def get_server_ip_dict(self):
""" get server ip attributes dict."""
server_ip_info = dict()
# get server ip info
is_default_vpn = "false"
if not self.is_default_vpn:
self.is_default_vpn = False
if self.is_default_vpn is True:
is_default_vpn = "true"
if not self.vrf_name:
self.vrf_name = "_public_"
conf_str = CE_NC_GET_SERVER_IP_INFO_HEADER % (
self.ip_type, self.server_ip, self.vrf_name, is_default_vpn)
conf_str += CE_NC_GET_SERVER_IP_INFO_TAIL
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return server_ip_info
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
server_ip_info["serverIpInfos"] = list()
syslog_servers = root.findall("data/syslog/syslogServers/syslogServer")
if syslog_servers:
for syslog_server in syslog_servers:
server_dict = dict()
for ele in syslog_server:
if ele.tag in ["ipType", "serverIp", "vrfName", "level", "serverPort", "facility", "chnlId",
"chnlName", "timestamp", "transportMode", "sslPolicyName", "isDefaultVpn",
"sourceIP", "isBriefFmt"]:
server_dict[ele.tag] = ele.text
server_ip_info["serverIpInfos"].append(server_dict)
return server_ip_info
def config_merge_loghost(self):
"""config loghost ip or dns"""
| |
from logbook import Logger
from mock import patch, create_autospec, MagicMock, Mock
import pandas as pd
from ccxt.base.errors import RequestTimeout
from catalyst.exchange.exchange_errors import ExchangeRequestError
from .base import BaseExchangeTestCase
from catalyst.exchange.ccxt.ccxt_exchange import CCXT
from catalyst.exchange.exchange_execution import ExchangeLimitOrder
from catalyst.exchange.utils.exchange_utils import get_exchange_auth
from catalyst.finance.order import Order
log = Logger('test_ccxt')
class TestCCXT(BaseExchangeTestCase):
@classmethod
def setup(self):
exchange_name = 'bittrex'
auth = get_exchange_auth(exchange_name)
self.exchange = CCXT(
exchange_name=exchange_name,
key=auth['key'],
secret=auth['secret'],
password='',
quote_currency='usdt',
)
self.exchange.init()
def create_orders_dict(self, asset, last_order):
"""
create an orders dict which mocks the .orders object
:param asset: TradingPair
:param last_order: bool, adds another order to the dict.
mocks the functionality of the fetchOrder methods
:return: dict(Order)
"""
orders = dict()
orders['208612980769'] = Order(
dt=pd.to_datetime('2018-05-01 17:34', utc=True),
asset=asset,
amount=2,
stop=None,
limit=0.0025,
id='208612980769'
)
orders['656797594'] = Order(
dt=pd.to_datetime('2018-05-01 18:34', utc=True),
asset=asset,
amount=1,
stop=None,
limit=0.0027,
id='656797594'
)
orders['656797494'] = Order(
dt=pd.to_datetime('2018-05-01 18:54', utc=True),
asset=asset,
amount=7,
stop=None,
limit=0.00246,
id='656797494'
)
if last_order:
orders['111'] = Order(
dt=pd.to_datetime('2018-05-01 19:54', utc=True),
asset=asset,
amount=2,
stop=None,
limit=0.00254,
id='111'
)
return orders
def create_trades_dict(self, symbol):
"""
:param symbol: only for the side effect
:return: list(dict)
"""
trades = list()
trades.append(
{'info': {'globalTradeID': 369156767, 'tradeID': '8415970',
'rate': '0.0025', 'amount': '0.78', 'total': '0.0019',
'fee': '0.00250000', 'orderNumber': '208612980769',
'type': 'buy', 'category': 'exchange'},
'datetime': pd.Timestamp.utcnow(),
'symbol': 'ETH/USDT',
'id': '8415970',
'order': '208612980769',
'type': 'limit',
'side': 'buy',
'price': 0.0025,
'amount': 0.78,
'cost': 0.0019,
'fee': {'type': None, 'rate': 0.0025,
'cost': 0.0019690912999999997, 'currency': 'ETH'}
}
)
trades.append(
{'info': {'globalTradeID': 369156780, 'tradeID': '8415971',
'rate': '0.0025', 'amount': '1.22', 'total': '0.0031',
'fee': '0.0025', 'orderNumber': '208612980769',
'type': 'buy', 'category': 'exchange'},
'datetime': pd.Timestamp.utcnow(),
'symbol': 'ETH/USDT',
'id': '8415971',
'order': '208612980769',
'type': 'limit',
'side': 'buy',
'price': 0.0025,
'amount': 1.22,
'cost': 0.0031,
'fee': {'type': None, 'rate': 0.0025,
'cost': 0.0031, 'currency': 'ETH'}
}
)
if self.last_trade:
trades.append(
{'info': {'globalTradeID': 369156784, 'tradeID': '8415972',
'rate': '0.0025', 'amount': '0.78',
'total': '0.0019', 'fee': '0.0025',
'orderNumber': '111', 'type': 'buy',
'category': 'exchange'},
'datetime': pd.Timestamp.utcnow(),
'symbol': 'ETH/USDT',
'id': '8415972',
'order': '111',
'type': 'limit',
'side': 'buy',
'price': 0.0025,
'amount': 2,
'cost': 0.005,
'fee': {'type': None, 'rate': 0.0025,
'cost': 0.005, 'currency': 'ETH'}
}
)
return trades
def mod_last_order(self):
"""
adds the last order into .orders
:return:
"""
self.last_order = True
asset = [pair for pair in self.exchange.assets if
pair.symbol == 'eth_usdt'][0]
orders_dict = self.create_orders_dict(asset, self.last_order)
self.exchange.api.orders = orders_dict
def compare_orders(self, observed, expected):
"""
compares orders arguments to make sure that they are equal
:param observed: Order
:param expected: Order
:return: bool
"""
return observed.id == expected.id and \
observed.amount == expected.amount and \
observed.asset == expected.asset and \
observed.limit == expected.limit
def test_create_order_timeout_order(self):
"""
create_order method
tests the handling of a RequestTimeout exception and locating the
order, if was created, using the fetchOrders method
:return:
"""
asset = [pair for pair in self.exchange.assets if
pair.symbol == 'eth_usdt'][0]
amount = 2
is_buy = True
self.last_order = False
price = 0.00254
self.exchange.api = MagicMock(
spec=[u'create_order', u'fetch_orders', u'orders', u'has',
u'amount_to_precision'])
self.exchange.api.create_order.side_effect = RequestTimeout
orders_dict = self.create_orders_dict(asset, self.last_order)
self.exchange.api.orders = orders_dict
self.exchange.api.has = {'fetchOrders': True}
self.exchange.api.fetch_orders.side_effect = self.mod_last_order
mock_style = create_autospec(ExchangeLimitOrder, return_value=price)
mock_style.get_limit_price.return_value = price
style = mock_style
self.exchange.api.amount_to_precision = Mock(
return_value=float(amount))
with patch('catalyst.exchange.ccxt.ccxt_exchange.CCXT.get_symbol') as \
mock_symbol:
mock_symbol.return_value = 'ETH/USDT'
observed_fetchOrders_order = self.exchange.create_order(
asset, amount, is_buy, style)
expected_fetchOrders_order = Order(
dt=pd.to_datetime('2018-05-01 19:54', utc=True),
asset=asset,
amount=amount,
stop=None,
limit=price,
id='111'
)
assert self.compare_orders(observed_fetchOrders_order,
expected_fetchOrders_order) is True
def test_create_order_timeout_open(self):
"""
create_order method
tests the handling of a RequestTimeout exception and locating the
order, if was created, using the fetchOpenOrders method
:return:
"""
asset = [pair for pair in self.exchange.assets if
pair.symbol == 'eth_usdt'][0]
amount = 2
is_buy = True
self.last_order = False
price = 0.00254
self.exchange.api = MagicMock(
spec=[u'create_order', u'fetch_open_orders',
u'fetch_orders', u'orders', u'has', u'amount_to_precision'
]
)
self.exchange.api.create_order.side_effect = RequestTimeout
orders_dict = self.create_orders_dict(asset, self.last_order)
self.exchange.api.orders = orders_dict
self.exchange.api.has = {'fetchOpenOrders': True,
'fetchOrders': 'emulated',
'fetchClosedOrders': True
}
self.exchange.api.fetch_open_orders.side_effect = self.mod_last_order
mock_style = create_autospec(ExchangeLimitOrder,
return_value=price)
mock_style.get_limit_price.return_value = price
style = mock_style
self.exchange.api.amount_to_precision = Mock(
return_value=float(amount))
with patch('catalyst.exchange.ccxt.ccxt_exchange.CCXT.get_symbol') as \
mock_symbol:
mock_symbol.return_value = 'ETH/USDT'
observed_fetchOpen_order = self.exchange.create_order(
asset, amount, is_buy, style)
expected_fetchOpen_order = Order(
dt=pd.to_datetime('2018-05-01 19:54', utc=True),
asset=asset,
amount=amount,
stop=None,
limit=price,
id='111'
)
assert self.compare_orders(observed_fetchOpen_order,
expected_fetchOpen_order) is True
def test_create_order_timeout_closed(self):
"""
create_order method
tests the handling of a RequestTimeout exception and locating the
order, if was created, using the fetchClosedOrders method
:return:
"""
asset = [pair for pair in self.exchange.assets if
pair.symbol == 'eth_usdt'][0]
amount = 2
is_buy = True
self.last_order = False
price = 0.00254
self.exchange.api = MagicMock(
spec=[u'create_order', u'fetch_closed_orders', u'orders', u'has',
u'amount_to_precision'])
self.exchange.api.create_order.side_effect = RequestTimeout
orders_dict = self.create_orders_dict(asset, self.last_order)
self.exchange.api.orders = orders_dict
self.exchange.api.has = {'fetchOpenOrders': False,
'fetchClosedOrders': True
}
self.exchange.api.fetch_closed_orders.side_effect = self.mod_last_order
mock_style = create_autospec(ExchangeLimitOrder,
return_value=price)
mock_style.get_limit_price.return_value = price
style = mock_style
self.exchange.api.amount_to_precision = Mock(
return_value=float(amount))
with patch('catalyst.exchange.ccxt.ccxt_exchange.CCXT.get_symbol') as \
mock_symbol:
mock_symbol.return_value = 'ETH/USDT'
observed_fetchClosed_order = self.exchange.create_order(
asset, amount, is_buy, style)
expected_fetchClosed_order = Order(
dt=pd.to_datetime('2018-05-01 19:54', utc=True),
asset=asset,
amount=amount,
stop=None,
limit=price,
id='111'
)
assert self.compare_orders(observed_fetchClosed_order,
expected_fetchClosed_order) is True
def test_create_order_timeout_trade(self):
"""
create_order method
tests the handling of a RequestTimeout exception and locating the
order, if was created, using the fetchTrades method.
checks as well, the case that the order was not created at all,
and makes sure an exception is raised in order to retry the
creation of the order.
:return:
"""
asset = [pair for pair in self.exchange.assets if
pair.symbol == 'eth_usdt'][0]
amount = 2
is_buy = True
self.last_order = False
self.last_trade = False
price = 0.00254
stop_price = 0.00354
self.exchange.api = MagicMock(
spec=[u'create_order', u'fetch_my_trades', u'has',
u'fetch_open_orders', u'orders', u'fetch_closed_orders',
u'amount_to_precision']
)
self.exchange.api.create_order.side_effect = RequestTimeout
orders_dict = self.create_orders_dict(asset, self.last_order)
self.exchange.api.orders = orders_dict
self.exchange.api.has = {'fetchClosedOrders': 'emulated',
'fetchOrders': False,
'fetchMyTrades': True,
}
self.exchange.api.fetch_my_trades.side_effect = self.create_trades_dict
mock_style = create_autospec(ExchangeLimitOrder,
return_value=price)
mock_style.get_limit_price.return_value = price
mock_style.get_stop_price.return_value = stop_price
style = mock_style
self.exchange.api.amount_to_precision = Mock(
return_value=float(amount))
# check the case there are no new trades and an exception is raised
with patch('catalyst.exchange.ccxt.ccxt_exchange.CCXT.get_symbol') as \
mock_symbol:
mock_symbol.return_value = 'ETH/USDT'
try:
observed_fetchTrade_None = self.exchange.create_order(
asset, amount, is_buy, style)
print(observed_fetchTrade_None)
except ExchangeRequestError:
pass
# check the case there are trades which form a neew order
self.last_trade = True
with patch('catalyst.exchange.ccxt.ccxt_exchange.CCXT.get_symbol') as \
mock_symbol:
mock_symbol.return_value = 'ETH/USDT'
observed_fetchTrade_order = self.exchange.create_order(
asset, amount, is_buy, style)
expected_fetchTrade_order = Order(
dt=pd.Timestamp.utcnow(),
asset=asset,
amount=amount,
stop=stop_price,
limit=price,
id='111'
)
assert self.compare_orders(observed_fetchTrade_order,
expected_fetchTrade_order) is True
# check the case there are no new trades or orders and an exception is
# raised
self.last_trade = False
self.exchange.api.has['fetchOpenOrders'] = True
with patch('catalyst.exchange.ccxt.ccxt_exchange.CCXT.get_symbol') as \
mock_symbol:
mock_symbol.return_value = 'ETH/USDT'
try:
observed_fetchTradeOrder_None = self.exchange.create_order(
asset, amount, is_buy, style)
print(observed_fetchTradeOrder_None)
except ExchangeRequestError:
pass
def test_process_order_timeout(self):
"""
in case of a requestTimeout make sure that the process_order method
returns an exception so the retry method can request the trades again.
:return:
"""
asset = [pair for pair in self.exchange.assets if
pair.symbol == 'eth_usdt'][0]
amount = 2
price = 0.0025
order = Order(
dt=pd.to_datetime('2018-05-01 19:54', utc=True),
asset=asset,
amount=amount,
stop=None,
limit=price,
id='111'
)
self.exchange.api = MagicMock(
spec=[u'create_order', u'fetch_my_trades', u'has',
u'fetch_open_orders', u'orders', u'fetch_closed_orders']
)
self.exchange.api.has = {'fetchClosedOrders': 'emulated',
'fetchOrders': False,
'fetchMyTrades': True,
}
with patch('catalyst.exchange.ccxt.ccxt_exchange.CCXT.get_trades') as \
mock_trades:
mock_trades.side_effect = RequestTimeout
try:
observed_transactions = self.exchange.process_order(order)
print(observed_transactions)
except ExchangeRequestError:
pass
# def test_order(self):
# log.info('creating order')
# asset = self.exchange.get_asset('eth_usdt')
# order_id = self.exchange.order(
# asset=asset,
# style=ExchangeLimitOrder(limit_price=1000),
# amount=1.01,
# )
# log.info('order created {}'.format(order_id))
# assert order_id is not None
# pass
#
# def test_open_orders(self):
# # log.info('retrieving open orders')
# # asset = self.exchange.get_asset('neo_eth')
# # orders = self.exchange.get_open_orders(asset)
# pass
#
# def test_get_order(self):
# log.info('retrieving order')
# order = self.exchange.get_order('2631386', 'neo_eth')
# # order = self.exchange.get_order('2631386')
# assert isinstance(order, Order)
# pass
#
# def test_cancel_order(self, ):
# log.info('cancel order')
# self.exchange.cancel_order('2631386', 'neo_eth')
# pass
#
# def test_get_candles(self):
# log.info('retrieving candles')
# candles = self.exchange.get_candles(
# freq='1T',
# assets=[self.exchange.get_asset('eth_btc')],
# bar_count=200,
# # start_dt=pd.to_datetime('2017-09-01', utc=True),
# )
#
# for asset in candles:
# df = pd.DataFrame(candles[asset])
# df.set_index('last_traded', drop=True, inplace=True)
#
# set_print_settings()
# print('got {} candles'.format(len(df)))
# print(df.head(10))
# print(df.tail(10))
# pass
#
# def test_tickers(self):
# log.info('retrieving tickers')
# assets = [
# self.exchange.get_asset('ada_eth'),
# self.exchange.get_asset('zrx_eth'),
# ]
# tickers = self.exchange.tickers(assets)
# assert len(tickers) == 2
# pass
#
# def test_my_trades(self):
# asset | |
<gh_stars>10-100
import datetime
from energym.envs.env_fmu_eplus import EnvEPlusFMU
INPUTS_SPECS = {
"Bd_Cooling_onoff_sp": {
"type": "discrete",
"size": 2,
"default": 1,
"description": "Cooling availability on/off setpoint.",
},
"Bd_Heating_onoff_sp": {
"type": "discrete",
"size": 2,
"default": 1,
"description": "Heating availability on/off setpoint.",
},
"Z01_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 1 thermostat setpoint (°C).",
},
"Z02_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 2 thermostat setpoint (°C).",
},
"Z03_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 3 thermostat setpoint (°C).",
},
"Z04_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 4 thermostat setpoint (°C).",
},
"Z05_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 5 thermostat setpoint (°C).",
},
"Z06_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 6 thermostat setpoint (°C).",
},
"Z07_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 7 thermostat setpoint (°C).",
},
"Z15_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 15 thermostat setpoint (°C).",
},
"Z16_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 16 thermostat setpoint (°C).",
},
"Z17_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 17 thermostat setpoint (°C).",
},
"Z18_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 18 thermostat setpoint (°C).",
},
"Z19_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 19 thermostat setpoint (°C).",
},
"Z20_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 20 thermostat setpoint (°C).",
},
"Z25_T_Thermostat_sp": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"default": 20,
"description": "Zone 25 thermostat setpoint (°C).",
},
}
OUTPUTS_SPECS = {
"Bd_Pw_All": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 5000,
"description": "Building power consumption (W).",
},
"Ext_Irr": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1000,
"description": "Direct normal radiation (W/m2).",
},
"Ext_RH": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 100,
"description": "Outdoor relative humidity (%RH).",
},
"Ext_T": {
"type": "scalar",
"lower_bound": -10,
"upper_bound": 40,
"description": "Outdoor temperature (°C).",
},
"Fa_Pw_All": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1e4,
"description": "Total power consumption (W).",
},
"Fa_Pw_HVAC": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1e4,
"description": "HVAC power consumption (W).",
},
"Fa_Pw_PV": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 2e3,
"description": "PV power production (W).",
},
"Z01_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 1 fan flow setpoint.",
},
"Z01_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 1 temperature (°C).",
},
"Z01_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 1 thermostat setpoint (°C).",
},
"Z02_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 2 fan flow setpoint.",
},
"Z02_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 2 temperature (°C).",
},
"Z02_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 2 thermostat setpoint (°C).",
},
"Z03_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 3 fan flow setpoint.",
},
"Z03_Fl_Fan1_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 3 fan 1 flow setpoint.",
},
"Z03_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 3 temperature (°C).",
},
"Z03_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 3 thermostat setpoint (°C).",
},
"Z04_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 4 fan flow setpoint.",
},
"Z04_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 4 temperature (°C).",
},
"Z04_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 4 thermostat setpoint (°C).",
},
"Z05_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 5 fan flow setpoint.",
},
"Z05_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 5 temperature (°C).",
},
"Z05_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 5 thermostat setpoint (°C).",
},
"Z06_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 6 fan flow setpoint.",
},
"Z06_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 6 temperature (°C).",
},
"Z06_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 6 thermostat setpoint (°C).",
},
"Z07_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 7 fan flow setpoint.",
},
"Z07_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 7 temperature (°C).",
},
"Z07_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 7 thermostat setpoint (°C).",
},
"Z15_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 15 fan flow setpoint.",
},
"Z15_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 15 temperature (°C).",
},
"Z15_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 15 thermostat setpoint (°C).",
},
"Z16_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 16 fan flow setpoint.",
},
"Z16_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 16 temperature (°C).",
},
"Z16_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 16 thermostat setpoint (°C).",
},
"Z17_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 17 fan flow setpoint.",
},
"Z17_Fl_Fan1_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 17 fan 1 flow setpoint.",
},
"Z17_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 17 temperature (°C).",
},
"Z17_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 17 thermostat setpoint (°C).",
},
"Z18_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 18 fan flow setpoint.",
},
"Z18_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 18 temperature (°C).",
},
"Z18_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 18 thermostat setpoint (°C).",
},
"Z19_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 19 fan flow setpoint.",
},
"Z19_Fl_Fan1_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 19 fan 1 flow setpoint.",
},
"Z19_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 19 temperature (°C).",
},
"Z19_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 19 thermostat setpoint (°C).",
},
"Z20_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 20 fan flow setpoint.",
},
"Z20_Fl_Fan1_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 20 fan 1 flow setpoint.",
},
"Z20_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 20 temperature (°C).",
},
"Z20_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 20 thermostat setpoint (°C).",
},
"Z25_Fl_Fan_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 25 fan flow setpoint.",
},
"Z25_Fl_Fan1_sp_out": {
"type": "scalar",
"lower_bound": 0,
"upper_bound": 1,
"description": "Zone 25 fan 1 flow setpoint.",
},
"Z25_T": {
"type": "scalar",
"lower_bound": 10,
"upper_bound": 40,
"description": "Zone 25 temperature (°C).",
},
"Z25_T_Thermostat_sp_out": {
"type": "scalar",
"lower_bound": 16,
"upper_bound": 26,
"description": "Zone 25 thermostat setpoint (°C).",
},
}
default_kpi_options = {
"kpi1": {"name": "Fa_Pw_All", "type": "avg"},
"kpi2": {"name": "Z01_T", "type": "avg_dev", "target": [19, 24]},
"kpi3": {"name": "Z02_T", "type": "avg_dev", "target": [19, 24]},
"kpi4": {"name": "Z03_T", "type": "avg_dev", "target": [19, 24]},
"kpi5": {"name": "Z04_T", "type": "avg_dev", "target": [19, 24]},
"kpi6": {"name": "Z05_T", "type": "avg_dev", "target": [19, 24]},
"kpi7": {"name": "Z06_T", "type": "avg_dev", "target": [19, 24]},
"kpi8": {"name": "Z07_T", "type": "avg_dev", "target": [19, 24]},
"kpi16": {"name": "Z15_T", "type": "avg_dev", "target": [19, 24]},
"kpi17": {"name": "Z16_T", "type": "avg_dev", "target": [19, 24]},
"kpi18": {"name": "Z17_T", "type": "avg_dev", "target": [19, 24]},
"kpi19": {"name": "Z18_T", "type": "avg_dev", "target": [19, 24]},
"kpi20": {"name": "Z19_T", "type": "avg_dev", "target": [19, 24]},
"kpi21": {"name": "Z20_T", "type": "avg_dev", "target": [19, 24]},
"kpi26": {"name": "Z25_T", "type": "avg_dev", "target": [19, 24]},
"kpi27": {"name": "Z01_T", "type": "tot_viol", "target": [19, 24]},
"kpi28": {"name": "Z02_T", "type": "tot_viol", "target": [19, 24]},
"kpi29": {"name": "Z03_T", "type": "tot_viol", "target": [19, 24]},
"kpi30": {"name": "Z04_T", "type": "tot_viol", "target": [19, 24]},
"kpi31": {"name": "Z05_T", "type": "tot_viol", "target": [19, 24]},
"kpi32": {"name": "Z06_T", "type": "tot_viol", "target": [19, 24]},
"kpi33": {"name": "Z07_T", "type": "tot_viol", "target": [19, 24]},
"kpi41": {"name": "Z15_T", "type": "tot_viol", "target": [19, 24]},
"kpi42": {"name": "Z16_T", "type": "tot_viol", "target": [19, 24]},
"kpi43": {"name": "Z17_T", "type": "tot_viol", "target": [19, 24]},
"kpi44": {"name": "Z18_T", "type": "tot_viol", "target": [19, 24]},
"kpi45": {"name": "Z19_T", "type": "tot_viol", | |
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
from tqdm import trange
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import json
from transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from transformers import XLNetLMHeadModel, XLNetTokenizer
from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from transformers import CTRLLMHeadModel, CTRLTokenizer
from transformers import XLMWithLMHeadModel, XLMTokenizer
from itertools import permutations
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'ctrl': (CTRLLMHeadModel, CTRLTokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
'xlm': (XLMWithLMHeadModel, XLMTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by <NAME>
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich <NAME>, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, repetition_penalty=1.0,
is_xlnet=False, is_xlm_mlm=False, xlm_mask_token=None, xlm_lang=None, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
if is_xlm_mlm and xlm_mask_token:
# XLM MLM models are direct models (predict same token, not next token)
# => need one additional dummy token in the input (will be masked and guessed)
input_ids = torch.cat((generated, torch.full((1, 1), xlm_mask_token, dtype=torch.long, device=device)), dim=1)
inputs = {'input_ids': input_ids}
if xlm_lang is not None:
inputs["langs"] = torch.tensor([xlm_lang] * inputs["input_ids"].shape[1], device=device).view(1, -1)
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / (temperature if temperature > 0 else 1.)
# reptition penalty from CTRL (https://arxiv.org/abs/1909.05858)
for _ in set(generated):
next_token_logits[_] /= repetition_penalty
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
if temperature == 0: #greedy sampling:
next_token = torch.argmax(filtered_logits).unsqueeze(0)
else:
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def main(inc=True):
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--xlm_lang", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0,
help="temperature of 0 implies greedy sampling")
parser.add_argument("--repetition_penalty", type=float, default=1.0,
help="primarily useful for CTRL model; in that case, use 1.2")
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--stop_token', type=str, default=None,
help="Token at which text generation is stopped")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
#model = torch.nn.DataParallel(model)
model.to(args.device)
model.eval()
device = args.device
criterion = nn.CrossEntropyLoss()
max_sen = 3
# if args.length < 0 and model.config.max_position_embeddings > 0:
# args.length = model.config.max_position_embeddings
# elif 0 < model.config.max_position_embeddings < args.length:
# args.length = model.config.max_position_embeddings # No generation bigger than model size
# elif args.length < 0:
# args.length = MAX_LENGTH # avoid infinite loop
# logger.info(args)
# if args.model_type in ["ctrl"]:
# if args.temperature > 0.7 :
# logger.info('CTRL typically works better with lower temperatures (and lower top_k).')
# EXH
if not inc:
with open('../../final_data_exp/data/test_remove_distractor.json') as f:
datas = json.load(f)
correct = 0.
total = 0
whole_correct = 0.
whole_total = 0
for idx, data in enumerate(datas):
print(idx)
#single_answer = [num_candidates]
eid = data['eid']
passage = data['passage']
candidates = data['candidates']
answers = data['answer_sequence']
num_blanks = data['number_of_blanks']
num_candidates = data['candidate_length']
#print(passage, answers, candidates, idx)
cur_prob = float('inf')
cur_rst = None
blank_indexes = [passage.index('<' + str(i + 1) + '>') for i in range(num_blanks)]
acc_scores = []
golden_ans = [ans[1] for ans in answers]
#dis = [ans for ans in list(range(num_candidates)) if ans not in golden_ans]
for idx, bidx in enumerate(blank_indexes):
acc_scores.append([])
left_context = ' '.join(passage[bidx-max_sen:bidx])
right_context = ' '.join(passage[bidx+1:bidx+1+max_sen])
for i in range(num_candidates):
cur_context = ' '.join((left_context, candidates[i], right_context))
cur_context= tokenizer.encode(cur_context)
cur_context = torch.tensor(cur_context, dtype=torch.long, device=device)
cur_context = cur_context.unsqueeze(0)
inp = cur_context[:,:-1]
out = cur_context[:, 1:]
pred = model(inp)[0]
loss = criterion(pred.view(-1, pred.shape[2]), out.view(-1)).item()
#print(len(acc_scores), idx)
acc_scores[idx].append(loss)
for perm in permutations(list(range(num_candidates)), len(answers)):
cur_loss = 0.
# flag = False
# for d in dis:
# if d in perm:
# flag = True
# break
# if flag:
# continue
for idx, p in enumerate(perm):
#print(idx, p)
cur_loss += acc_scores[idx][p]
# for idx, bidx in enumerate(blank_indexes):
# passage[bidx] = candidates[perm[idx]]
# cur_loss = 0.
# for idx, bidx in enumerate(blank_indexes):
# left_context = ' '.join(passage[bidx-max_sen:bidx])
# right_context = ' '.join(passage[bidx+1:bidx+1+max_sen])
# cur_context = ' '.join((left_context, passage[bidx], right_context))
# #print(cur_context)
# #exit()
# cur_context= tokenizer.encode(cur_context)
# cur_context = torch.tensor(cur_context, dtype=torch.long, device=device)
# cur_context = cur_context.unsqueeze(0)
# inp = | |
"""
storing/retrieving of (position-dependent) fields obtained from simulations.
a field is a collection of functions x -> f(x; params), and will be stored
simply as a number of lists with matching lengths, including the list of
positions x, alongside with a name and a uniquely identifying
parameter dict params.
essentially, this is a self-hacked implementation of a database
for a very specific purpose.
achievements of this module:
-) save fields in hidden database
-) add additional data points x, f(x) at any time
-) automatically identify field based on the given name and params
-) if parameters are superset of existing parameter set,
assume they match and update existing field
-) retrieve field values and handle non-existence
-) display list of available fields
-) reading AND saving fields can be done in parallel, only update() step not
TODO: if demanded, overwrite already calculated values
(non-trivial because it contradicts the asynchronous design)
"""
import os, json
import numpy as np
from nanopores.dirnames import DATADIR, HOME
DIR = os.path.join(DATADIR, "fields")
# changes to these constants break access to existing stored data:
HEADER = "header.txt"
SUFFIX = ".field.txt"
ARRAY_DIR = "arrays"
ARRAY_PREFIX = "_npa"
def array_dir():
return os.path.join(DIR, ARRAY_DIR)
def set_dir(NEWDIR):
global DIR
DIR = NEWDIR
# assert directory and header exists
if not os.path.exists(DIR):
os.makedirs(DIR)
if not os.path.exists(array_dir()):
os.makedirs(array_dir())
if not HEADER in os.listdir(DIR):
_save(dict(_flist=[]), HEADER)
def set_dir_default():
set_dir(os.path.join(DATADIR, "fields"))
# options for cloud storage (with a fixed known path)
DROPBOX = os.path.join(HOME, "Dropbox", "nanopores", "fields")
MEGA = os.path.join(HOME, "code", "nanopores", "fields")
def set_dir_dropbox():
set_dir(DROPBOX)
def set_dir_mega():
set_dir(MEGA)
# user interface that wraps Header object
def update():
Header().update()
def get_params(name, index=None, **params):
_, params = Header().get_file_params(name, params, index)
return params
def load_file(name, index=None, **params):
return Header().load_file(name, params, index)
def get_fields(name, index=None, **params):
return Header().get_fields(name, index, **params)
def _sorted(data, key):
I = sorted(range(len(key)), key=lambda k: key[k])
return {k: [data[k][i] for i in I] for k in data}, [key[i] for i in I]
def _subset(data, key, condition=None):
if condition is None:
condition = lambda x: x
I = [i for i in range(len(key)) if condition(key[i])]
return {k: [data[k][i] for i in I] for k in data}, [key[i] for i in I]
def get_field(name, field, **params):
return Header().get_field(name, field, **params)
def save_fields(name, params=None, **fields):
# fields has to be a dictionary of lists, x a list
if params is None:
params = {}
data = dict(name=name, params=params, fields=fields)
FILE = name + _unique_id() + SUFFIX
_save(data, FILE)
def save(name, params=None, **entries):
if params is None:
params = {}
data = dict(name=name, params=params, **entries)
FILE = name + _unique_id() + SUFFIX
_save(data, FILE)
def get(name, *args, **params):
"""main method to read from database.
usage: either get the full file
>>> data = get(name[, index][, **params])
or get particular entries/fields
>>> a, b, c = get(name[, index], "a", "b", "c"[, **params])
"""
if len(args) > 0 and isinstance(args[0], int):
args = list(args)
index = args.pop(0)
data = load_file(name, index, **params)
else:
data = load_file(name, **params)
if not args:
return data
values = []
for entry in args:
if entry in data:
values.append(data[entry])
elif "fields" in data and entry in data["fields"]:
values.append(data["fields"][entry])
else:
KeyError("No entry of this name.")
if len(values) == 1:
return values[0]
return tuple(values)
def is_function_test(name, index=None, **params):
FILE, params = Header().get_file_params(name, params, index)
data = _load(FILE)
print "is function:", is_function(data)
def is_function(data):
return all([string in data for string in ["functions", "ranks", "prefix", "empty"]])
def remove(name, index=None, **params):
# test if file is saved function; if yes, call remove_functions
h = Header()
FILE, params = h.get_file_params(name, params, index)
data = _load(FILE)
if is_function(data):
remove_functions(name, index, **params)
else:
h.remove(name, params, index)
def rename(name, index, newname):
# do NOT create new file, because this would break function data
h = Header()
FILE, params = h.get_file_params(name, {}, index)
# modify file
f = _load(FILE)
f["name"] = newname
_save(f, FILE)
# modify header
h._delete_entry(name, params)
h._add_entry(newname, params)
h._write()
def purge(name, **params):
while exists(name, **params):
remove(name, **params)
def get_entry(name, entry, **params):
return Header().get_entry(name, entry, **params)
def set_entries(name, params, **entries):
# TODO could make sense also to change name/params
assert all(k not in entries for k in ("name", "params"))
Header().set_entries(name, params, **entries)
def set_param(name, index, pname, pvalue):
h = Header()
FILE, params = h.get_file_params(name, {}, index)
h.header[name][index-1][pname] = pvalue
h._write()
f = _load(FILE)
f["params"][pname] = pvalue
_save(f, FILE)
def set_params(name, index, **params):
for k in params:
set_param(name, index, k, params[k])
def diff(params0, name, index=None, **params):
"to find out why data is not compatible with params0, print conflicts"
h = get_params(name, index, **params)
keys = list(set(h.keys()) & set(params0.keys()))
return {k: (params0[k], h[k]) for k in keys if params0[k] != h[k]}
def diffs(params0, name):
"get all diffs with data stored under name"
headers = Header().get_all(name)
diffs = []
for h in headers:
keys = list(set(h.keys()) & set(params0.keys()))
diff = {k: (params0[k], h[k]) for k in keys if params0[k] != h[k]}
diffs.append(diff)
return diffs
def exists(name, **params):
try:
Header().get_file_params(name, params)
except KeyError:
return False
return True
# core class that communicates with txt files
class Header(object):
"""access/organize data files in directory
usage:
Header.get_XXX(name, **params) -- retrieve simulation data
Header.update() -- incorporate new data in folder
header file:
dict name : list paramsets
"_flist" : list of files in folder
paramset also contains filename"""
def __init__(self):
self.header = Params(_load(HEADER))
# TODO: could add pretty print of header content to view existing data
def list_files(self):
# TODO: could include global file names and sizes in list
return self.header["_flist"]
def get_all(self, name):
"return all headers for a given name"
if name not in self.header:
raise KeyError("Header: Name '%s' not found." %name)
return self.header[name]
def get_file_params(self, name, params, index=None):
"take first file compatible with params"
if name not in self.header:
raise KeyError("Header: Name '%s' not found." %name)
if index is not None:
assert 0 <= index-1 < len(self.header[name])
params0 = self.header[name][index-1]
FILE = params0["FILE"]
else:
for params0 in self.header[name]:
if _compatible(params0, params):
FILE = params0["FILE"]
break
else:
raise KeyError("Header: No matching parameter set.")
return FILE, params0
def get_file(self, name, params, index=None):
"take first file compatible with params"
FILE, _ = self.get_file_params(name, params, index)
return FILE
def load_file(self, name, params, index=None):
FILE = self.get_file(name, params, index)
return _load(FILE)
def get_fields(self, name, index=None, **params):
fdict = self.load_file(name, params, index)
return fdict["fields"]
def get_field(self, name, field, index=None, **params):
fdict = self.load_file(name, params, index)
return fdict["fields"][field]
# TODO: this would be slightly more elegant and much more
# efficient if it was possible to get handle on one file
def get_entry(self, name, entry, **params):
fdict = self.load_file(name, params)
return fdict[entry]
def set_entries(self, name, params, **entries):
FILE = self.get_file(name, params)
f = _load(FILE)
for entry, value in entries.items():
f[entry] = value
_save(f, FILE)
def update(self):
"update file list, merge all mutually compatible files"
new = self._update_list()
N = len(new)
n = 0
# inspect new files and create entries in header[name]
for FILE in new:
f = _load(FILE)
name, params = f["name"], f["params"].copy()
try:
MATCH, params0 = self.get_file_params(name, params)
except KeyError:
MATCH = None
if MATCH is None:
# create new entry
params["FILE"] = FILE
self._add_entry(name, params)
else:
# merge file into existing file
params0.update(f["params"])
mergefile(f, MATCH)
self._delete_file(FILE)
n += 1
self._write()
if N>0:
print ("Found %d new files, merged %d of them into "
"existing files.") % (N, n)
else: print "Nothing to be updated."
def reread(self):
"completely clear existing information and read again"
self.header = dict(_flist=[])
self.update()
def remove(self, name, params, index=None):
FILE, params = self.get_file_params(name, params, index)
_delete_arrays(FILE)
self._delete_file(FILE)
self._delete_entry(name, params)
self._write()
def _update_list(self):
"update filelist field in header and return list of new files"
flist = [f for f in os.listdir(DIR) if f.endswith(SUFFIX)]
old = self.header["_flist"]
self.header["_flist"] = flist
new = [f for f in flist if f not in old]
return new
def _delete_file(self, FILE):
if FILE in self.header["_flist"]:
self.header["_flist"].remove(FILE)
path = os.path.join(DIR, FILE)
print "Removing %s" %path
os.remove(path)
def _delete_entry(self, name, params):
self.header[name].remove(params)
if len(self.header[name]) == 0:
self.header.pop(name)
def _add_entry(self, name, params):
if not name in self.header:
self.header[name] = []
self.header[name].append(params)
def _write(self):
_save(self.header, HEADER)
def mergefile(f, FILE):
"merge file content f into FILE, knowing they are compatible"
# TODO: make the .extend work with stored arrays!!
# read FILE
f0 | |
),
"DR1c_16" : ( 34235, 34236 ),
"DR1a_17" : ( 34236, 34240 ),
"DR1a1_17" : ( 34240, 34241 ),
"DR1a2_17" : ( 34241, 34242 ),
"DR1bAgeOns_17" : ( 34242, 34244 ),
"DR1bOns_17" : ( 34244, 34245 ),
"DR1bAgeRec_17" : ( 34245, 34247 ),
"DR1bRec_17" : ( 34247, 34248 ),
"DR1c_17" : ( 34248, 34249 ),
"DR1a_18" : ( 34249, 34253 ),
"DR1a1_18" : ( 34253, 34254 ),
"DR1a2_18" : ( 34254, 34255 ),
"DR1bAgeOns_18" : ( 34255, 34257 ),
"DR1bOns_18" : ( 34257, 34258 ),
"DR1bAgeRec_18" : ( 34258, 34260 ),
"DR1bRec_18" : ( 34260, 34261 ),
"DR1c_18" : ( 34261, 34262 ),
"DR1a_19" : ( 34262, 34266 ),
"DR1a1_19" : ( 34266, 34267 ),
"DR1a2_19" : ( 34267, 34268 ),
"DR1bAgeOns_19" : ( 34268, 34270 ),
"DR1bOns_19" : ( 34270, 34271 ),
"DR1bAgeRec_19" : ( 34271, 34273 ),
"DR1bRec_19" : ( 34273, 34274 ),
"DR1c_19" : ( 34274, 34275 ),
"DR22_2" : ( 34275, 34278 ),
"DR22A_2" : ( 34278, 34280 ),
"DR22FromMnth11" : ( 34280, 34282 ),
"DR22FromYr11" : ( 34282, 34286 ),
"DR22ToMnth11" : ( 34286, 34288 ),
"DR22ToYR11" : ( 34288, 34292 ),
"DR22FromMnth12" : ( 34292, 34294 ),
"DR22FromYr12" : ( 34294, 34298 ),
"DR22ToMnth12" : ( 34298, 34300 ),
"DR22ToYR12" : ( 34300, 34304 ),
"DR22FromMnth13" : ( 34304, 34306 ),
"DR22FromYr13" : ( 34306, 34310 ),
"DR22ToMnth13" : ( 34310, 34312 ),
"DR22ToYR13" : ( 34312, 34316 ),
"DR22FromMnth14" : ( 34316, 34318 ),
"DR22FromYr14" : ( 34318, 34322 ),
"DR22ToMnth14" : ( 34322, 34324 ),
"DR22ToYR14" : ( 34324, 34328 ),
"DR22FromMnth15" : ( 34328, 34330 ),
"DR22FromYr15" : ( 34330, 34334 ),
"DR22ToMnth15" : ( 34334, 34336 ),
"DR22ToYR15" : ( 34336, 34340 ),
"DR1a_20" : ( 34340, 34344 ),
"DR1a1_20" : ( 34344, 34345 ),
"DR1a2_20" : ( 34345, 34346 ),
"DR1bAgeOns_20" : ( 34346, 34348 ),
"DR1bOns_20" : ( 34348, 34349 ),
"DR1bAgeRec_20" : ( 34349, 34351 ),
"DR1bRec_20" : ( 34351, 34352 ),
"DR1c_20" : ( 34352, 34353 ),
"DR1a_21" : ( 34353, 34357 ),
"DR1a1_21" : ( 34357, 34358 ),
"DR1a2_21" : ( 34358, 34359 ),
"DR1bAgeOns_21" : ( 34359, 34361 ),
"DR1bOns_21" : ( 34361, 34362 ),
"DR1bAgeRec_21" : ( 34362, 34364 ),
"DR1bRec_21" : ( 34364, 34365 ),
"DR1c_21" : ( 34365, 34366 ),
"DR1a_22" : ( 34366, 34370 ),
"DR1a1_22" : ( 34370, 34371 ),
"DR1a2_22" : ( 34371, 34372 ),
"DR1bAgeOns_22" : ( 34372, 34374 ),
"DR1bOns_22" : ( 34374, 34375 ),
"DR1bAgeRec_22" : ( 34375, 34377 ),
"DR1bRec_22" : ( 34377, 34378 ),
"DR1c_22" : ( 34378, 34379 ),
"DR1a_23" : ( 34379, 34383 ),
"DR1a1_23" : ( 34383, 34384 ),
"DR1a2_23" : ( 34384, 34385 ),
"DR1bAgeOns_23" : ( 34385, 34387 ),
"DR1bOns_23" : ( 34387, 34388 ),
"DR1bAgeRec_23" : ( 34388, 34390 ),
"DR1bRec_23" : ( 34390, 34391 ),
"DR1c_23" : ( 34391, 34392 ),
"DR1a_24" : ( 34392, 34396 ),
"DR1a1_24" : ( 34396, 34397 ),
"DR1a2_24" : ( 34397, 34398 ),
"DR1bAgeOns_24" : ( 34398, 34400 ),
"DR1bOns_24" : ( 34400, 34401 ),
"DR1bAgeRec_24" : ( 34401, 34403 ),
"DR1bRec_24" : ( 34403, 34404 ),
"DR1c_24" : ( 34404, 34405 ),
"DR22_3" : ( 34405, 34408 ),
"DR22A_3" : ( 34408, 34410 ),
"DR22FromMnth16" : ( 34410, 34412 ),
"DR22FromYr16" : ( 34412, 34416 ),
"DR22ToMnth16" : ( 34416, 34418 ),
"DR22ToYR16" : ( 34418, 34422 ),
"DR22FromMnth17" : ( 34422, 34424 ),
"DR22FromYr17" : ( 34424, 34428 ),
"DR22ToMnth17" : ( 34428, 34430 ),
"DR22ToYR17" : ( 34430, 34434 ),
"DR22FromMnth18" : ( 34434, 34436 ),
"DR22FromYr18" : ( 34436, 34440 ),
"DR22ToMnth18" : ( 34440, 34442 ),
"DR22ToYR18" : ( 34442, 34446 ),
"DR22FromMnth19" : ( 34446, 34448 ),
"DR22FromYr19" : ( 34448, 34452 ),
"DR22ToMnth19" : ( 34452, 34454 ),
"DR22ToYR19" : ( 34454, 34458 ),
"DR22FromMnth20" : ( 34458, 34460 ),
"DR22FromYr20" : ( 34460, 34464 ),
"DR22ToMnth20" : ( 34464, 34466 ),
"DR22ToYR20" : ( 34466, 34470 ),
"DR1a_25" : ( 34470, 34474 ),
"DR1a1_25" : ( 34474, 34475 ),
"DR1a2_25" : ( 34475, 34476 ),
"DR1bAgeOns_25" : ( 34476, 34478 ),
"DR1bOns_25" : ( 34478, 34479 ),
"DR1bAgeRec_25" : ( 34479, 34481 ),
"DR1bRec_25" : ( 34481, 34482 ),
"DR1c_25" : ( 34482, 34483 ),
"DR1a_26" : ( 34483, 34487 ),
"DR1a1_26" : ( 34487, 34488 ),
"DR1a2_26" : ( 34488, 34489 ),
"DR1bAgeOns_26" : ( 34489, 34491 ),
"DR1bOns_26" : ( 34491, 34492 ),
"DR1bAgeRec_26" : ( 34492, 34494 ),
"DR1bRec_26" : ( 34494, 34495 ),
"DR1c_26" : ( 34495, 34496 ),
"DR1a_27" : ( 34496, 34500 ),
"DR1a1_27" : ( 34500, 34501 ),
"DR1a2_27" : ( 34501, 34502 ),
"DR1bAgeOns_27" : ( 34502, 34504 ),
"DR1bOns_27" : ( 34504, 34505 ),
"DR1bAgeRec_27" : ( 34505, 34507 ),
"DR1bRec_27" : ( 34507, 34508 ),
"DR1c_27" : ( 34508, 34509 ),
"DR1a_28" : ( 34509, 34513 ),
"DR1a1_28" : ( 34513, 34514 ),
"DR1a2_28" : ( 34514, 34515 ),
"DR1bAgeOns_28" : ( 34515, 34517 ),
"DR1bOns_28" : ( 34517, 34518 ),
"DR1bAgeRec_28" : ( 34518, 34520 ),
"DR1bRec_28" : ( 34520, 34521 ),
"DR1c_28" : ( 34521, 34522 ),
"DR1a_29" : ( 34522, 34526 ),
"DR1a1_29" : ( 34526, 34527 ),
"DR1a2_29" : ( 34527, 34528 ),
"DR1bAgeOns_29" : ( 34528, 34530 ),
"DR1bOns_29" : ( 34530, 34531 ),
"DR1bAgeRec_29" : ( 34531, 34533 ),
"DR1bRec_29" : ( 34533, 34534 ),
"DR1c_29" : ( 34534, 34535 ),
"DR22_4" : ( 34535, 34538 ),
"DR22A_4" : ( 34538, 34540 ),
"DR22FromMnth21" : ( 34540, 34542 ),
"DR22FromYr21" : ( 34542, 34546 ),
"DR22ToMnth21" : ( 34546, 34548 ),
"DR22ToYR21" : ( 34548, 34552 ),
"DR22FromMnth22" : ( 34552, 34554 ),
"DR22FromYr22" : ( 34554, 34558 ),
"DR22ToMnth22" : ( 34558, 34560 ),
"DR22ToYR22" : ( 34560, 34564 ),
"DR22FromMnth23" : ( 34564, 34566 ),
"DR22FromYr23" : ( 34566, 34570 ),
"DR22ToMnth23" : ( 34570, 34572 ),
"DR22ToYR23" : ( 34572, 34576 ),
"DR22FromMnth24" : ( 34576, 34578 ),
"DR22FromYr24" : ( 34578, 34582 ),
"DR22ToMnth24" : ( 34582, 34584 ),
"DR22ToYR24" : ( 34584, 34588 ),
"DR22FromMnth25" : ( 34588, 34590 ),
"DR22FromYr25" : ( 34590, 34594 ),
"DR22ToMnth25" : ( 34594, 34596 ),
"DR22ToYR25" : ( 34596, 34600 ),
"DR1a_30" : ( 34600, 34604 ),
"DR1a1_30" : ( 34604, 34605 ),
"DR1a2_30" : ( 34605, 34606 ),
"DR1bAgeOns_30" : ( 34606, 34608 ),
"DR1bOns_30" : ( 34608, 34609 ),
"DR1bAgeRec_30" : ( 34609, 34611 ),
"DR1bRec_30" : ( 34611, 34612 ),
"DR1c_30" : ( 34612, 34613 ),
"DR1a_31" : ( 34613, 34617 ),
"DR1a1_31" : ( 34617, 34618 ),
"DR1a2_31" : ( 34618, 34619 ),
"DR1bAgeOns_31" : ( 34619, 34621 ),
"DR1bOns_31" : ( 34621, 34622 ),
"DR1bAgeRec_31" : ( 34622, 34624 ),
"DR1bRec_31" : ( 34624, 34625 ),
"DR1c_31" : ( 34625, 34626 ),
"DR1a_32" : ( 34626, 34630 ),
"DR1a1_32" : ( 34630, 34631 ),
"DR1a2_32" : ( 34631, 34632 ),
"DR1bAgeOns_32" : ( 34632, 34634 ),
"DR1bOns_32" : ( 34634, 34635 ),
"DR1bAgeRec_32" : ( 34635, 34637 ),
"DR1bRec_32" : ( 34637, 34638 ),
"DR1c_32" : ( 34638, 34639 ),
"DR1a_33" : ( 34639, 34643 ),
"DR1a1_33" : ( 34643, 34644 ),
"DR1a2_33" : ( 34644, 34645 ),
"DR1bAgeOns_33" : ( 34645, 34647 ),
"DR1bOns_33" : ( 34647, 34648 ),
"DR1bAgeRec_33" : ( 34648, 34650 ),
"DR1bRec_33" : ( 34650, 34651 ),
"DR1c_33" : ( 34651, 34652 ),
"DR1a_34" : ( 34652, 34656 ),
"DR1a1_34" : ( 34656, 34657 ),
"DR1a2_34" : ( 34657, 34658 ),
"DR1bAgeOns_34" : ( 34658, 34660 ),
"DR1bOns_34" : ( 34660, 34661 ),
"DR1bAgeRec_34" : ( 34661, 34663 ),
"DR1bRec_34" : ( 34663, 34664 ),
"DR1c_34" : ( 34664, 34665 ),
"DR22_5" : ( 34665, 34668 ),
"DR22A_5" : ( 34668, 34670 ),
"DR23" | |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Root search functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
BrentResults = collections.namedtuple(
"BrentResults",
[
# A tensor containing the best estimate. If the search was successful,
# this estimate is a root of the objective function.
"estimated_root",
# A tensor containing the value of the objective function at the best
# estimate. If the search was successful, then this is close to 0.
"objective_at_estimated_root",
# A tensor containing number of iterations performed for each pair of
# starting points.
"num_iterations",
# Scalar boolean tensor indicating whether the best estimate is a root
# within the tolerance specified for the search.
"converged",
])
# Values which remain fixed across all root searches (except for tensor dtypes
# and shapes).
_BrentSearchConstants = collections.namedtuple("_BrentSearchConstants", [
"false",
"zero",
"zero_value",
])
# Values which are updated during the root search.
_BrentSearchState = collections.namedtuple("_BrentSearchState", [
"best_estimate",
"value_at_best_estimate",
"last_estimate",
"value_at_last_estimate",
"contrapoint",
"value_at_contrapoint",
"step_to_best_estimate",
"step_to_last_estimate",
"num_iterations",
"finished",
])
# Values which remain fixed for a given root search.
_BrentSearchParams = collections.namedtuple("_BrentSearchParams", [
"objective_fn",
"max_iterations",
"absolute_root_tolerance",
"relative_root_tolerance",
"function_tolerance",
"stopping_policy_fn",
])
def _swap_where(condition, x, y):
"""Swaps the elements of `x` and `y` based on `condition`.
Args:
condition: A `Tensor` of dtype bool.
x: A `Tensor` with the same shape as `condition`.
y: A `Tensor` with the same shape and dtype as `x`.
Returns:
Two `Tensors` with the same shape as `x` and `y`.
"""
return tf.where(condition, y, x), tf.where(condition, x, y)
def _secant_step(x1, x2, y1, y2):
"""Returns the step size at the current position if using the secant method.
This function is meant for exclusive use by the `_brent_loop_body` function:
- It does not guard against divisions by zero, and instead assumes that `y1`
is distinct from `y2`. The `_brent_loop_body` function guarantees this
property.
- It does not guard against overflows which may occur if the difference
between `y1` and `y2` is small while that between `x1` and `x2` is not.
In this case, the resulting step size will be larger than `bisection_step`
and thus ignored by the `_brent_loop_body` function.
Args:
x1: `Tensor` containing the current position.
x2: `Tensor` containing the previous position.
y1: `Tensor` containing the value of `objective_fn` at `x1`.
y2: `Tensor` containing the value of `objective_fn` at `x2`.
Returns:
A `Tensor` with the same shape and dtype as `current`.
"""
x_difference = x1 - x2
y_difference = y1 - y2
return -y1 * x_difference / y_difference
def _quadratic_interpolation_step(x1, x2, x3, y1, y2, y3):
"""Returns the step size to use when using quadratic interpolation.
This function is meant for exclusive use by the `_brent_loop_body` function.
It does not guard against divisions by zero, and instead assumes that `y1` is
distinct from `y2` and `y3`. The `_brent_loop_body` function guarantees this
property.
Args:
x1: `Tensor` of any shape and real dtype containing the first position used
for extrapolation.
x2: `Tensor` of the same shape and dtype as `x1` containing the second
position used for extrapolation.
x3: `Tensor` of the same shape and dtype as `x1` containing the third
position used for extrapolation.
y1: `Tensor` containing the value of the interpolated function at `x1`.
y2: `Tensor` containing the value of interpolated function at `x2`.
y3: `Tensor` containing the value of interpolated function at `x3`.
Returns:
A `Tensor` with the same shape and dtype as `x1`.
"""
r2 = (x2 - x1) / (y2 - y1)
r3 = (x3 - x1) / (y3 - y1)
return -x1 * (x3 * r3 - x2 * r2) / (r3 * r2 * (x3 - x2))
def default_relative_root_tolerance(dtype):
"""Returns the default relative root tolerance used for a TensorFlow dtype."""
return 4 * np.finfo(dtype.as_numpy_dtype()).eps
def _should_stop(state, stopping_policy_fn):
"""Indicates whether the overall Brent search should continue.
Args:
state: A Python `_BrentSearchState` namedtuple.
stopping_policy_fn: Python `callable` controlling the algorithm termination.
Returns:
A boolean value indicating whether the overall search should continue.
"""
return tf.convert_to_tensor(
stopping_policy_fn(state.finished), name="should_stop", dtype=tf.bool)
# This is a direct translation of the Brent root-finding method.
# Each operation is guarded by a call to `tf.where` to avoid performing
# unnecessary calculations.
def _brent_loop_body(state, params, constants):
"""Performs one iteration of the Brent root-finding algorithm.
Args:
state: A Python `_BrentSearchState` namedtuple.
params: A Python `_BrentSearchParams` namedtuple.
constants: A Python `_BrentSearchConstants` namedtuple.
Returns:
The `Tensor`s to use for the next iteration of the algorithm.
"""
best_estimate = state.best_estimate
last_estimate = state.last_estimate
contrapoint = state.contrapoint
value_at_best_estimate = state.value_at_best_estimate
value_at_last_estimate = state.value_at_last_estimate
value_at_contrapoint = state.value_at_contrapoint
step_to_best_estimate = state.step_to_best_estimate
step_to_last_estimate = state.step_to_last_estimate
num_iterations = state.num_iterations
finished = state.finished
# If the root is between the last two estimates, use the worst of the two
# as new contrapoint. Adjust step sizes accordingly.
replace_contrapoint = ~finished & (
value_at_last_estimate * value_at_best_estimate < constants.zero_value)
contrapoint = tf.where(replace_contrapoint, last_estimate, contrapoint)
value_at_contrapoint = tf.where(replace_contrapoint, value_at_last_estimate,
value_at_contrapoint)
step_to_last_estimate = tf.where(replace_contrapoint,
best_estimate - last_estimate,
step_to_last_estimate)
step_to_best_estimate = tf.where(replace_contrapoint, step_to_last_estimate,
step_to_best_estimate)
# If the contrapoint is a better guess than the current root estimate, swap
# them. Also, replace the worst of the two with the current contrapoint.
replace_best_estimate = tf.where(
finished, constants.false,
tf.math.abs(value_at_contrapoint) < tf.math.abs(value_at_best_estimate))
last_estimate = tf.where(replace_best_estimate, best_estimate, last_estimate)
best_estimate = tf.where(replace_best_estimate, contrapoint, best_estimate)
contrapoint = tf.where(replace_best_estimate, last_estimate, contrapoint)
value_at_last_estimate = tf.where(replace_best_estimate,
value_at_best_estimate,
value_at_last_estimate)
value_at_best_estimate = tf.where(replace_best_estimate, value_at_contrapoint,
value_at_best_estimate)
value_at_contrapoint = tf.where(replace_best_estimate, value_at_last_estimate,
value_at_contrapoint)
# Compute the tolerance used to control root search at the current position
# and the step size corresponding to the bisection method.
root_tolerance = 0.5 * (
params.absolute_root_tolerance +
params.relative_root_tolerance * tf.math.abs(best_estimate))
bisection_step = 0.5 * (contrapoint - best_estimate)
# Mark the search as finished if either:
# 1. the maximum number of iterations has been reached;
# 2. the desired tolerance has been reached (even if no root was found);
# 3. the current root estimate is good enough.
# Using zero as `function_tolerance` will check for exact roots and match
# both Brent's original algorithm and the SciPy implementation.
finished |= (num_iterations >= params.max_iterations) | (
tf.math.abs(bisection_step) <
root_tolerance) | (~tf.math.is_finite(value_at_best_estimate)) | (
tf.math.abs(value_at_best_estimate) <= params.function_tolerance)
# Determine whether interpolation or extrapolation are worth performing at
# the current position.
compute_short_step = tf.where(
finished, constants.false,
(root_tolerance < tf.math.abs(step_to_last_estimate)) &
(tf.math.abs(value_at_best_estimate) <
tf.math.abs(value_at_last_estimate)))
short_step = tf.where(
compute_short_step,
tf.where(
# The contrapoint cannot be equal to the current root estimate since
# they have opposite signs. However, it may be equal to the previous
# estimate.
tf.equal(last_estimate, contrapoint),
# If so, use the secant method to avoid a division by zero which
# would occur if using extrapolation.
_secant_step(best_estimate, last_estimate, value_at_best_estimate,
value_at_last_estimate),
# Pass values of the objective function as x values, and root
# estimates as y values in order to perform *inverse* extrapolation.
_quadratic_interpolation_step(value_at_best_estimate,
value_at_last_estimate,
value_at_contrapoint, best_estimate,
last_estimate, contrapoint)),
# Default to zero if using bisection.
constants.zero)
# Use the step calculated above if both:
# 1. step size < |previous step size|
# 2. step size < 3/4 * |contrapoint - current root estimate|
# Ensure that `short_step` was calculated by guarding the calculation with
# `compute_short_step`.
use_short_step = tf.where(
compute_short_step, 2 * tf.math.abs(short_step) < tf.minimum(
3 * tf.math.abs(bisection_step) - root_tolerance,
tf.math.abs(step_to_last_estimate)), constants.false)
# Revert to bisection when not using `short_step`.
step_to_last_estimate = tf.where(use_short_step, step_to_best_estimate,
bisection_step)
step_to_best_estimate = tf.where(
finished, constants.zero,
tf.where(use_short_step, short_step, bisection_step))
# Update the previous and current root estimates.
last_estimate = tf.where(finished, last_estimate, best_estimate)
best_estimate += tf.where(
finished, constants.zero,
tf.where(root_tolerance < tf.math.abs(step_to_best_estimate),
step_to_best_estimate,
tf.where(bisection_step > 0, root_tolerance, -root_tolerance)))
value_at_last_estimate = tf.where(finished, value_at_last_estimate,
value_at_best_estimate)
value_at_best_estimate = tf.where(finished, value_at_best_estimate,
params.objective_fn(best_estimate))
num_iterations | |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# Created: 2020-07-09 1:20 PM
"""
=========================
Review and correct tracks
=========================
Usage:
::
python -m argos.review
Basic operation
---------------
At startup it will show a window with two empty panes separated in the
middle by three empty lists titled ``Previous tracks``, ``All tracks`` and
``Current tracks`` like :numref:`review_startup` below.
.. _review_startup:
.. figure:: ../doc/images/review_00.png
:width: 100%
:alt: Screenshot of review tool at startup
Screenshot of review tool at startup
To start reviewing tracked data, select ``File->Open tracked data``
from the menubar or press ``Ctrl+O`` on keyboard. This will prompt you
to pick a data file. Once you select the data file, it will then
prompt you to select the corresponding video file. Once done, you
should see the first frame of the video on the right pane with the
bounding boxes (referred to as *bbox* for short) and IDs of the tracked
objects (:numref:`review_loaded`).
.. _review_loaded:
.. figure:: ../doc/images/review_01.png
:width: 100%
:alt: Screenshot of review tool after loading data
Screenshot of review tool after loading data
Here you notice that trackid ``4`` is spurious. So you select it by
clicking on the entry in ``Right tracks`` list. As you select the
enetry, its bbox and ID on the image change color (and line style)
(:numref:`review_select`). If the ``Show track position`` button is
checked, like in the screenshot, then you will also see some points
turning from dark purple to light yellow, indicating all the position
this object takes across the video.
.. _review_select:
.. figure:: ../doc/images/review_02.png
:width: 100%
:alt: Screenshot of review tool after selecting object
Screenshot of review tool after selecting object
Now delete object ``4`` by pressing ``x`` or ``Delete`` on keyboard,
or selecting ``Delete track`` from ``Action`` in menubar
(:numref:`review_delete`).
.. _review_delete:
.. figure:: ../doc/images/review_03.png
:width: 100%
:alt: Screenshot of review tool deleting object
Screenshot of review tool deleting object
Once you delete ``4``, selection will change to the next object
(``# 5``) and the path taken by it over time will be displayed in the
same purple-to-yellow color code (:numref:`review_post_delete`) [#]_.
.. [#] Changing the frame will clear the selection and the path
display. If you want the selection (and the path-display of the
selected ID) to be retained across frames, check the menu item
``View->Retain selection across frames``.
.. _review_post_delete:
.. figure:: ../doc/images/review_04.png
:width: 100%
:alt: Screenshot of review tool after deleting object
Screenshot of review tool after deleting object, as the next object
is selected.
Now to play the video, click the ``play`` button at bottom. The right
frame will be transferred to the left pane, and the next frame will
appear in the right pane.
You will notice the spinbox on bottom right updates the current frame
number as we go forward in the video. Instead of playing the video,
you can also move one frame at a time by clicking the up-arrow in the
spinbox, or by pressing ``PgDn`` on keyboard.
It is useful to pause and inspect the tracks whenever a new object is
dected. In order to pause the video when there is a new trackid, check
the ``Show popup message for new tracks`` item in the ``Diff
settings`` menu (:numref:`review_diff_popup_new`).
.. _review_diff_popup_new:
.. figure:: ../doc/images/review_05.png
:width: 100%
:alt: Screenshot Diff settings - popup on new tracks menu
Enable popup message when a new trackid appears
If you you already played through the video, then all trackids are
old. In order to go back to a prestine state, click the ``Reset``
button at bottom right. If you play the video now, as soon as a new
track appears, the video will pause and a popup message will tell you
the new tracks that appeared between the last frame and the current
frame (:numref:`review_new_track_popup`).
.. _review_new_track_popup:
.. figure:: ../doc/images/review_06.png
:width: 100%
:alt: Popup message on new track(s)
Popup message when a new trackid appears
After you click ``OK`` to dispose of the popup window, the status
message will remind you of the last change
(:numref:`review_status_msg`).
.. _review_status_msg:
.. figure:: ../doc/images/review_07.png
:width: 100%
:alt: Status message on new track(s)
Status message after a new trackid appears
You can also choose ``Show popup message for left/right mismatch`` in
the ``Diff settings`` menu. In this case whenever the trackids on the
previous frame are different from those on the current frame, the video will
be paused with a popup message.
If you want to just watch the video without interruption, select ``No
popup message for tracks``.
The other option ``Overlay previous frame``, if selected, will overlay
the previous frame on the right pane in a different color. This may be
helpful for looking at differences between the two frames if the left
and right display is not good enough (:numref:`review_overlay`).
.. _review_overlay:
.. figure:: ../doc/images/review_08.png
:width: 100%
:alt: Overlaid previous and current frame.
Overlaid previous and current frame. The previous frame is in the
red channel and the current frame in the blue channel, thus
producing shades of magenta where they have similar values, and
more red or blue in pixels where they mismatch.
Selecting a region of interest (ROI)
------------------------------------
If you want to process only a certain part of the frames, you can draw an ROI
by clicking the left mouse-button to set the vertices of a polygon. Click on
the first vertex to close the polygon. If you want to cancel it half-way, click
the right mouse-button.
The track lists
---------------
The three lists between the left (previous) and right (current) video frame in
the GUI present the track Ids of the detected objects. These allow you to
display the tracks and carry out modifications of the tracks described
later).
- ``Previous tracks`` shows the tracks detected in the left (previous)
frame. If you select an entry here, its detected track across frames
will be overlayed on the previous frame in the left pane
(:numref:`review_track_hist`).
- ``All tracks`` in the middle shows all the tracks seen so far
(including those that have been lost in the previous or the current
frame). If you select an entry here, its detected track across
frames will be overlayed on the previous frame in the left pane. If
you select different entries in ``Left tracks`` and ``All tracks``,
the last selected track will be displayed.
- ``Current tracks`` shows the tracks detected in the current frame (on the
right). If you select an entry here, its detected track across frames will be
overlayed on the current frame in the right pane.
.. _review_track_hist:
.. figure:: ../doc/images/review_09.png
:width: 100%
:alt: Track of the selected object
The track of the selected object (track Id) in ``Previous tracks`` or
``All tracks`` is displayed on the left pane. That of the selected
object in the ``Current tracks`` is displayed on the right pane.
Moving around and break points
------------------------------
To speed up navigation of tracked data, Argos review tool provides
several shortcuts. The corresponding actions are also available in the
``Play`` menu. To play the video, or to stop a video that is already
playing, press the ``Space bar`` on keyboard. You can try to double
the play speed by pressing ``Ctrl + Up Arrow`` and halve the speed by
pressing ``Ctrl + Down Arrow``. The maximum speed is limited by the
time needed to read and display a frame.
Instead of going through the entire video, you can jump to the next
frame where a new trackid was introduced, press ``N`` key (``Jump to
next new track``).
You can jump forward 10 frames by pressing ``Ctrl + PgDn`` and
backward by pressing ``Ctrl + PgUp`` on the keyboard.
To jump to a specific frame number, press ``G`` (``Go to frame``)
and enter the frame number in the dialog box that pops up.
To remember the current location (frame number) in the video, you can
press ``Ctrl+B`` (``Set breakpoint at current frame``) to set a
breakpoint. You can go to other parts of the video and jump back to
this location by pressing ``J`` (``Jump to breakpoint frame``). To
clear the breakpoint, press ``Shift+J`` (``Clear frame breakpoint``).
You can set a breakpoint on the appearance of a particular trackid
using ``Set breakpoint on appearance`` (keyboard ``A``), and entering
the track id in the dialog box. When playing the video, it will pause
on the frame where this trackid appears next. Similarly you can set
breakpoint on disappearance of a trackid using ``Set breakpoint on
disappearance`` (keyboard ``D``). You can clear these breakpoints by
pressing ``Shift + | |
<reponame>Marlin-Na/canine<gh_stars>0
import abc
import os
import sys
import typing
import glob
import shlex
import tempfile
import subprocess
import traceback
import shutil
import warnings
import crayons
import re
from uuid import uuid4
from collections import namedtuple
from contextlib import ExitStack, contextmanager
from ..backends import AbstractSlurmBackend, AbstractTransport, LocalSlurmBackend
from ..utils import get_default_gcp_project, check_call, canine_logging
from hound.client import _getblob_bucket
from agutil import status_bar
import pandas as pd
Localization = namedtuple("Localization", ['type', 'path'])
# types: stream, download, ro_disk, None
# indicates what kind of action needs to be taken during job startup
PathType = namedtuple(
'PathType',
['localpath', 'remotepath']
)
class OverrideValueError(ValueError):
def __init__(self, override, arg, value):
super().__init__("'{}' override is invalid for input {} with value {}".format(arg, value))
class AbstractLocalizer(abc.ABC):
"""
Base class for localization.
"""
def __init__(
self, backend: AbstractSlurmBackend, transfer_bucket: typing.Optional[str] = None,
common: bool = True, staging_dir: str = None,
project: typing.Optional[str] = None, temporary_disk_type: str = 'standard',
local_download_dir: typing.Optional[str] = None, **kwargs
):
"""
Initializes the Localizer using the given transport.
Localizer assumes that the SLURMBackend is connected and functional during
the localizer's entire life cycle.
If staging_dir is not provided, a random directory is chosen.
local_download_dir: Where `local` overrides should be saved. Default: /mnt/canine-local-downloads/(random id).
temporary_disk_type: "standard" or "ssd". Default "standard".
NOTE: If temporary_disk_type is explicitly "None", disks will not be created. Files will be downloaded
to local_download_dir without mounting a disk there. The directory will not be created in that case
"""
self.transfer_bucket = transfer_bucket
if transfer_bucket is not None and self.transfer_bucket.startswith('gs://'):
self.transfer_bucket = self.transfer_bucket[5:]
self.backend = backend
self.common = common
self.common_inputs = set()
self._local_dir = tempfile.TemporaryDirectory()
self.local_dir = self._local_dir.name
# FIXME: This doesn't actually make sense. Unless we assume staging_dir == mount_path, then transport.normpath gives an inaccurate mount_path
with self.backend.transport() as transport:
self.staging_dir = transport.normpath(staging_dir if staging_dir is not None else str(uuid4()))
# if transport.isdir(self.staging_dir) and not force:
# raise FileExistsError("{} already exists. Supply force=True to override".format(
# self.staging_dir
# ))
self.inputs = {} # {jobId: {inputName: [(handle type, handle value), ...]}}
self.input_array_flag = {} # {jobId: {inputName: <bool: is this an array?>}}
self.clean_on_exit = True
self.project = project if project is not None else get_default_gcp_project()
self.local_download_size = {} # {jobId: size}
self.disk_key = os.urandom(4).hex()
self.local_download_dir = local_download_dir if local_download_dir is not None else '/mnt/canine-local-downloads/{}'.format(self.disk_key)
self.temporary_disk_type = temporary_disk_type
self.requester_pays = {}
def get_requester_pays(self, path: str) -> bool:
"""
Returns True if the requested gs:// object or bucket resides in a
requester pays bucket
"""
if path.startswith('gs://'):
path = path[5:]
bucket = path.split('/')[0]
if bucket not in self.requester_pays:
command = 'gsutil requesterpays get gs://{}'.format(bucket)
# We check on the remote host because scope differences may cause
# a requester pays bucket owned by this account to require -u on the controller
# better safe than sorry
rc, sout, serr = self.backend.invoke(command)
text = serr.read()
if rc == 0 or b'BucketNotFoundException: 404' not in text:
self.requester_pays[bucket] = (
b'requester pays bucket but no user project provided' in text
or 'gs://{}: Enabled'.format(bucket).encode() in sout.read()
)
else:
# Try again ls-ing the object itself
# sometimes permissions can disallow bucket inspection
# but allow object inspection
command = 'gsutil ls gs://{}'.format(path)
rc, sout, serr = self.backend.invoke(command)
text = serr.read()
self.requester_pays[bucket] = b'requester pays bucket but no user project provided' in text
if rc == 1 and b'BucketNotFoundException: 404' in text:
canine_logging.error(text.decode())
raise subprocess.CalledProcessError(rc, command)
return bucket in self.requester_pays and self.requester_pays[bucket]
def get_object_size(self, path: str) -> int:
"""
Returns the total number of bytes of the given gsutil object.
If a directory is given, this will return the total space used by all objects in the directory
"""
cmd = 'gsutil {} du -s {}'.format(
'-u {}'.format(self.project) if self.get_requester_pays(path) else '',
path
)
rc, sout, serr = self.backend.invoke(cmd)
check_call(cmd, rc, sout, serr)
return int(sout.read().split()[0])
@contextmanager
def transport_context(self, transport: typing.Optional[AbstractTransport] = None) -> typing.ContextManager[AbstractTransport]:
"""
Opens a file transport in the context.
If an existing transport is provided, it is passed through
If None (default) is provided, a new transport is opened, and closed after the context
"""
with ExitStack() as stack:
if transport is None:
transport = stack.enter_context(self.backend.transport())
yield transport
def environment(self, location: str) -> typing.Dict[str, str]:
"""
Returns environment variables relative to the given location.
Location must be one of {"local", "remote"}
"""
if location not in {"local", "remote"}:
raise ValueError('location must be one of {"local", "remote"}')
if location == 'local':
return {
'CANINE_ROOT': self.local_dir,
'CANINE_COMMON': os.path.join(self.local_dir, 'common'),
'CANINE_OUTPUT': os.path.join(self.local_dir, 'outputs'), #outputs/jobid/outputname/...files...
'CANINE_JOBS': os.path.join(self.local_dir, 'jobs'),
}
elif location == "remote":
return {
'CANINE_ROOT': self.staging_dir,
'CANINE_COMMON': os.path.join(self.staging_dir, 'common'),
'CANINE_OUTPUT': os.path.join(self.staging_dir, 'outputs'), #outputs/jobid/outputname/...files...
'CANINE_JOBS': os.path.join(self.staging_dir, 'jobs'),
}
def gs_dircp(self, src: str, dest: str, context: str, transport: typing.Optional[AbstractTransport] = None):
"""
gs_copy for directories
context must be one of {'local', 'remote'}, which specifies
where the command should be run
When uploading to gs://, the destination gs:// director does not have to exist
When downloading from gs:// the destination directory does not have to exist,
but its parent does
"""
assert context in {'local', 'remote'}
gs_obj = src if src.startswith('gs://') else dest
if not dest.startswith('gs://'):
# Download
if context == 'remote':
with self.transport_context(transport) as transport:
if not transport.exists(dest):
transport.makedirs(dest)
else:
if not os.path.exists(dest):
os.makedirs(dest)
if not src.startswith('gs://'):
# Upload
# Fix empty dirs by touching a file
# Won't fix nested directory structure containing empty dirs,
# but it seems inefficient to walk and touch directories
if context == 'remote':
self.backend.invoke('touch {}/.canine_dir_marker'.format(src))
else:
subprocess.run(['touch', '{}/.canine_dir_marker'.format(src)])
command = "gsutil -m -o GSUtil:check_hashes=if_fast_else_skip -o GSUtil:parallel_composite_upload_threshold=150M {} cp -r {} {}".format(
'-u {}'.format(self.project) if self.get_requester_pays(gs_obj) else '',
src,
dest
)
if context == 'remote':
# Invoke interactively
rc, sout, serr = self.backend.invoke(command, True)
check_call(command, rc, sout, serr)
else:
subprocess.check_call(command, shell=True)
if not dest.startswith('gs://'):
# Clean up .dir file
if context == 'remote':
self.backend.invoke('rm -f {}/*/.canine_dir_marker'.format(dest))
else:
subprocess.run(['rm', '-f', '{}/*/.canine_dir_marker'.format(dest)])
def gs_copy(self, src: str, dest: str, context: str):
"""
Copy a google storage (gs://) object
context must be one of {'local', 'remote'}, which specifies
where the command should be run
When uploading to gs://, the destination gs:// directory does not have to exist
When downloading from gs:// the destination parent directory must exist
"""
assert context in {'local', 'remote'}
gs_obj = src if src.startswith('gs://') else dest
try:
components = gs_obj[5:].split('/')
bucket = _getblob_bucket(None, components[0], None)
blobs = {
blob.name
for page in bucket.list_blobs(prefix='/'.join(components[1:]), fields='items/name,nextPageToken').pages
for blob in page
}
if len([blob for blob in blobs if blob == '/'.join(components[1:])]) == 0:
# This is a directory
canine_logging.print("Copying directory:", gs_obj)
return self.gs_dircp(src, os.path.dirname(dest), context)
except:
# If there is an exception, or the above condition is false
# Procede as a regular gs_copy
traceback.print_exc()
command = "gsutil -o GSUtil:check_hashes=if_fast_else_skip -o GSUtil:parallel_composite_upload_threshold=150M {} cp {} {}".format(
'-u {}'.format(self.project) if self.get_requester_pays(gs_obj) else '',
src,
dest
)
if context == 'remote':
rc, sout, serr = self.backend.invoke(command, True)
check_call(command, rc, sout, serr)
else:
subprocess.check_call(command, shell=True)
def sendtree(self, src: str, dest: str, transport: typing.Optional[AbstractTransport] = None, exist_okay=False):
"""
Transfers the given local folder to the given remote destination.
Source must be a local folder, and destination must not exist
"""
if isinstance(self.backend, LocalSlurmBackend):
if exist_okay:
canine_logging.warning("exist_okay not supported by LocalSlurmBackend")
return shutil.copytree(src, dest)
with self.transport_context(transport) as transport:
if not os.path.isdir(src):
raise ValueError("Not a directory: "+src)
if transport.exists(dest) and not exist_okay:
raise ValueError("Destination already exists: "+dest)
dest = transport.normpath(dest)
if self.transfer_bucket is not None:
canine_logging.print("Transferring through bucket", self.transfer_bucket)
path = os.path.join(str(uuid4()), os.path.basename(dest))
self.gs_dircp(
src,
'gs://{}/{}'.format(self.transfer_bucket, path),
'local',
transport=transport
)
if not transport.isdir(os.path.dirname(dest)):
transport.makedirs(os.path.dirname(dest))
try:
self.gs_dircp(
'gs://{}/{}'.format(self.transfer_bucket, path),
os.path.dirname(dest),
'remote',
transport=transport
)
except:
canine_logging.print(
crayons.red("ERROR:", bold=True),
"Failed to download the data on the remote system."
"Your files are still saved in",
'gs://{}/{}'.format(self.transfer_bucket, path),
file=sys.stderr, type = "error"
)
raise
cmd = "gsutil -m {} rm -r gs://{}/{}".format(
'-u {}'.format(self.project) if self.get_requester_pays(self.transfer_bucket) else '',
self.transfer_bucket,
os.path.dirname(path),
)
canine_logging.info1(cmd)
subprocess.check_call(
cmd,
shell=True
)
else:
canine_logging.info1("Transferring directly over SFTP")
transport.sendtree(src, dest)
def receivetree(self, src: str, dest: str, transport: typing.Optional[AbstractTransport] = None, exist_okay=False):
"""
Transfers the given remote folder to the given local destination.
Source must be a remote folder, and dest must not exist
"""
if isinstance(self.backend, LocalSlurmBackend):
if | |
"""Cloudhands DB server functions
This file contains functions which either make direct modifications to the
Cloudhands database, request information from the DB, or bundle a series of
functions which cause a number of DB changes to take effect.
"""
from collections import OrderedDict
#We need everything from the models
from eos_db.models import ( Artifact, Appliance, Registration,
Membership, GroupMembership,
Actor, Component, User, Ownership,
Touch, State, ArtifactState, Deboost,
Resource, Node, Password, Credit,
Specification, Base )
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func
from sqlalchemy.exc import IntegrityError
from datetime import datetime, timedelta
engine = None # Assume no default database connection
# Load config.
DB = None
try:
from eos_db.settings import DBDetails as DB
except:
# This bare except statement is legit.
# If no settings file is supplied, we connect to the database eos_db without
# a username or password - ie. rely on PostgreSQL ident auth.
pass
def with_session(f):
"""Decorator that automatically passes a Session to a function and then shuts
the session down at the end, unless a session was already passed through.
The decorator itself takes no arguments. The function must have a session
argument.
"""
def inner(*args, **kwargs):
#Note that if session is passed in kwargs the local session
#variable is never set and therefore is left for the caller to close.
session = None
if not kwargs.get('session'):
Session = sessionmaker(bind=engine, expire_on_commit=False)
session = Session()
kwargs['session'] = session
res = None
try:
res = f(*args, **kwargs)
except Exception as e:
if session: session.close()
raise e
if session:
session.commit()
session.close()
return res
return inner
def choose_engine(enginestring, replace=True):
"""
Create a connection to a database. If Postgres is selected, this will
connect to the database specified in the settings.py file. If SQLite is
selected, then the system will use an in-memory SQLite database.
As stated in
http://docs.sqlalchemy.org/en/latest/core/engines.html#configuring-logging
one should only use echo=True for blanket debugging. Use the logger
settings for sqlalchemy.engine instead.
"""
global engine
if engine and not replace:
return
if enginestring == "PostgreSQL":
if DB and DB.username:
# Password auth
engine = create_engine('postgresql://%s:%s@%s/%s'
% (DB.username,
DB.password,
DB.host,
DB.database),
echo=False)
elif DB:
engine = create_engine('postgresql:///%s'
% (DB.database),
echo=False)
else:
engine = create_engine('postgresql:///eos_db', echo=False)
elif enginestring == "SQLite":
engine = create_engine('sqlite://', echo=False)
else:
raise LookupError("Invalid server type.")
# Always do this. This bootstraps the database for us, and ensures
# any new states are added.
setup_states()
def override_engine(engine_string, echo=True):
"""Sets the target database explicitly to a different location than that
specified in the server module.
Note that this doen not deploy the tables - you need to call setup_states()
or deploy_tables() explicitly afterwards if you want to do that.
:param engine_string: A SQLAlchemy server string, eg. 'sqlite://'
"""
global engine
engine = create_engine(engine_string, echo=echo)
def deploy_tables():
"""Create tables in their current state in the currently connected
database.
"""
Base.metadata.create_all(engine)
def get_state_list():
"""The state list is a union of the internal states we need to function
plus anything else in EXTRA_STATES
"""
state_list = (
'Started',
'Stopped',
'Restarting',
'Starting',
'Starting_Boosted',
'Stopping',
'Preparing',
'Prepared',
'Pre_Deboosting',
'Pre_Deboosted',
'Deboosted',
'Boosting', # Transitional state
'Deboosting', # Transitional state
'Error'
)
try:
from eos_db.settings import MachineStates as EXTRA_STATES
return state_list + tuple(s for s in EXTRA_STATES.state_list if s not in state_list )
except:
return state_list
def setup_states(ignore_dupes=True):
""" Write the list of valid states to the database.
The states are in server.py and may be supplemented in settings.py.
With ignore_dupes=False this will throw an exception if you try to
add the same state twice, otherwise it will just ignore the error - ie.
it will just add new states and will be idempotent.
"""
Base.metadata.create_all(engine)
states_added = 0
for state in get_state_list():
try:
create_artifact_state(state)
states_added += 1
except IntegrityError as e:
if not ignore_dupes: raise e
return states_added
@with_session
def list_user_ids(session):
"""Lists all active user IDs
"""
#Note that, like for servers, if a new user is created with the same name it
#overwrites the previous record, so I need to do it like this:
for n in session.query(User.username).distinct():
yield get_user_id_from_name(n[0])
def create_user(type, handle, name, username):
"""Create a new user record. Handle/uuid must be unique e-mail address"""
Base.metadata.create_all(engine)
user_id = _create_thingy(User(name=name, username=username, uuid=handle, handle=handle))
#Add this user to a group
if type:
create_group_membership(_create_touch(user_id, None, None), type)
return user_id
def touch_to_add_user_group(username, group):
""" Adds a touch to the database, then links it to a new user group
record.
"""
# FIXME? Should this use the user_id, not username, for consistency? Not yet sure.
user_id = get_user_id_from_name(username)
touch_id = _create_touch(user_id, None, None)
create_group_membership(touch_id, group)
return touch_id
def create_group_membership(touch_id, group):
""" Create a new group membership resource. """
# FIXME2 - this is only ever used by the function above so fold the code in.
Base.metadata.create_all(engine)
#return _create_thingy(GroupMembership(group=group))
# FIXME (Tim) - touch_id was unused, so clearly this was broken. Test as-is first.
return _create_thingy(GroupMembership(group=group, touch_id=touch_id))
@with_session
def get_user_group(username, session):
""" Get the group associated with a given username. """
if username is not None:
actor_id = get_user_id_from_name(username, session=session)
group = (session
.query(GroupMembership.group)
.filter(GroupMembership.touch_id == Touch.id)
.filter(Touch.actor_id == actor_id)
.order_by(Touch.touch_dt.desc())
.first())
#print("get_user_group: User %s is in group %s" % (username, group[0]))
return group[0]
else:
return None
def create_appliance(name, uuid):
""" Create a new VApp """ # FIXME: We shoehorn VMs into the Vapp record.
# VMs should go into the "Node" object.
Base.metadata.create_all(engine)
return _create_thingy(Appliance(uuid=uuid, name=name))
def create_artifact_state(state_name):
""" Create a new artifact state. ArtifactState subclasses State. See the
relevant docs in the model. """
return _create_thingy(ArtifactState(name=state_name))
@with_session
def _create_thingy(sql_entity, session):
"""Internal call that holds the boilerplate for putting a new SQLAlchemy object
into the database. BC suggested this should be a decorator but I don't think
that aids legibility. Maybe should rename this though.
"""
session.add(sql_entity)
#Note that this commit causes the .id to be populated.
session.commit()
return sql_entity.id
def change_node_state(node_id, state_id):
"""
Unused.
"""
pass # FIXME: See above for comments related to Vapps and VMs.
def create_node():
"""
Unused.
"""
pass # FIXME: See above for comments related to Vapps and VMs.
@with_session
def list_artifacts_for_user(user_id, session):
"""Returns a list of dictionaries listing pertinent information about
user's artifacts.
:param user_id: A valid user id for which we want to list details.
:returns: List of dictionaries containing pertinent info.
"""
# This bit was _list_artifacts_for_user(user_id)
servers = (session
.query(Artifact.id, Artifact.name, Artifact.uuid)
.all())
# Because of my logic that adding a new server with an existing name masks
# the old server, we actually want to get all the servers here and then
# post-filter them, or at least that is the simplest approach.
#OrderedDict gives me the property of updating any server listed
#twice while still maintaining database order.
artifacts = OrderedDict()
for server in servers:
#Cleanup CHAR values - really should fix this in the DB
server = list(server)
server[1] = server[1].rstrip()
server[2] = server[2].rstrip()
#END workaround
if server[1] in artifacts:
del artifacts[server[1]]
if check_ownership(server[0], user_id, session=session):
artifacts[server[1]] = return_artifact_details(*server, session=session)
return artifacts.values()
@with_session
def return_artifact_details(artifact_id, artifact_name=None, artifact_uuid=None, session=None):
""" Return basic information about each server, for display.
"""
change_dt = _get_most_recent_change(artifact_id, session=session)
create_dt = _get_artifact_creation_date(artifact_id, session=session)
state = check_state(artifact_id, session=session)
boosted = _get_server_boost_status(artifact_id)
boostremaining = "N/A"
deboost_time = 0
deboost_credit = 0
#Because get_time_until_deboost() might report a deboost time for an un-boosted
#server if it was manually deboosted, check the status
if boosted == "Boosted":
time_for_deboost = get_time_until_deboost(artifact_id, session=session)
boostremaining = time_for_deboost[2] or "Not set"
# Get deboost time as UNIX seconds-since-epoch
# Any browser will be able to render this as local time by using:
# var d = new Date(0) ; d.setUTCSeconds(deboost_time)
deboost_time = time_for_deboost[0].strftime("%s") if time_for_deboost[0] else 0
deboost_credit = time_for_deboost[3]
try:
cores, ram = get_latest_specification(artifact_id, session=session)
ram = str(ram) + " GB"
except:
cores, ram = "N/A", "N/A"
if state == None:
state = "Not yet initialised"
if not artifact_uuid:
artifact_uuid = get_server_uuid_from_id(artifact_id, session=session)
if not artifact_name:
artifact_name = get_server_name_from_id(artifact_id, session=session)
return({"artifact_id": artifact_id,
"artifact_uuid": artifact_uuid,
"artifact_name": artifact_name,
"change_dt": str(change_dt[0])[0:16],
"create_dt": str(create_dt[0])[0:16],
"state": state,
"boosted": boosted,
"cores": cores,
"ram": ram,
"boostremaining": boostremaining,
"deboost_time": deboost_time,
"deboost_credit": deboost_credit
})
#FIXME - rationalise these to three functions:
# get_server_by_name
# get_sever_by_id
# get_server_by_uuid
# That all | |
#! /usr/bin/python3
## type hints are provided in 'types/systemctl3.pyi'
from __future__ import print_function
__copyright__ = "(C) 2016-2020 <NAME>, licensed under the EUPL"
__version__ = "1.5.4505"
import logging
logg = logging.getLogger("systemctl")
from types import GeneratorType
import re
import fnmatch
import shlex
import collections
import errno
import os
import sys
import signal
import time
import socket
import datetime
import string
import fcntl
import select
import hashlib
import pwd
import grp
import threading
if sys.version[0] == '3':
basestring = str
xrange = range
DEBUG_AFTER = False
DEBUG_STATUS = False
DEBUG_BOOTTIME = False
DEBUG_INITLOOP = False
DEBUG_KILLALL = False
DEBUG_FLOCK = False
TestListen = False
TestAccept = False
def logg_debug_flock(format, *args):
if DEBUG_FLOCK:
logg.debug(format, *args) # pragma: no cover
def logg_debug_after(format, *args):
if DEBUG_AFTER:
logg.debug(format, *args) # pragma: no cover
NOT_A_PROBLEM = 0 # FOUND_OK
NOT_OK = 1 # FOUND_ERROR
NOT_ACTIVE = 2 # FOUND_INACTIVE
NOT_FOUND = 4 # FOUND_UNKNOWN
# defaults for options
_extra_vars = []
_force = False
_full = False
_log_lines = 0
_no_pager = False
_now = False
_no_reload = False
_no_legend = False
_no_ask_password = False
_preset_mode = "all"
_quiet = False
_root = ""
_unit_type = None
_unit_state = None
_unit_property = None
_what_kind = ""
_show_all = False
_user_mode = False
# common default paths
_system_folder1 = "/etc/systemd/system"
_system_folder2 = "/run/systemd/system"
_system_folder3 = "/var/run/systemd/system"
_system_folder4 = "/usr/local/lib/systemd/system"
_system_folder5 = "/usr/lib/systemd/system"
_system_folder6 = "/lib/systemd/system"
_system_folderX = None
_user_folder1 = "{XDG_CONFIG_HOME}/systemd/user"
_user_folder2 = "/etc/systemd/user"
_user_folder3 = "{XDG_RUNTIME_DIR}/systemd/user"
_user_folder4 = "/run/systemd/user"
_user_folder5 = "/var/run/systemd/user"
_user_folder6 = "{XDG_DATA_HOME}/systemd/user"
_user_folder7 = "/usr/local/lib/systemd/user"
_user_folder8 = "/usr/lib/systemd/user"
_user_folder9 = "/lib/systemd/user"
_user_folderX = None
_init_folder1 = "/etc/init.d"
_init_folder2 = "/run/init.d"
_init_folder3 = "/var/run/init.d"
_init_folderX = None
_preset_folder1 = "/etc/systemd/system-preset"
_preset_folder2 = "/run/systemd/system-preset"
_preset_folder3 = "/var/run/systemd/system-preset"
_preset_folder4 = "/usr/local/lib/systemd/system-preset"
_preset_folder5 = "/usr/lib/systemd/system-preset"
_preset_folder6 = "/lib/systemd/system-preset"
_preset_folderX = None
# standard paths
_dev_null = "/dev/null"
_dev_zero = "/dev/zero"
_etc_hosts = "/etc/hosts"
_rc3_boot_folder = "/etc/rc3.d"
_rc3_init_folder = "/etc/init.d/rc3.d"
_rc5_boot_folder = "/etc/rc5.d"
_rc5_init_folder = "/etc/init.d/rc5.d"
_proc_pid_stat = "/proc/{pid}/stat"
_proc_pid_status = "/proc/{pid}/status"
_proc_pid_cmdline= "/proc/{pid}/cmdline"
_proc_pid_dir = "/proc"
_proc_sys_uptime = "/proc/uptime"
_proc_sys_stat = "/proc/stat"
# default values
SystemCompatibilityVersion = 219
SysInitTarget = "sysinit.target"
SysInitWait = 5 # max for target
MinimumYield = 0.5
MinimumTimeoutStartSec = 4
MinimumTimeoutStopSec = 4
DefaultTimeoutStartSec = 90 # official value
DefaultTimeoutStopSec = 90 # official value
DefaultTimeoutAbortSec = 3600 # officially it none (usually larget than StopSec)
DefaultMaximumTimeout = 200 # overrides all other
DefaultRestartSec = 0.1 # official value of 100ms
DefaultStartLimitIntervalSec = 10 # official value
DefaultStartLimitBurst = 5 # official value
InitLoopSleep = 5
MaxLockWait = 0 # equals DefaultMaximumTimeout
DefaultPath = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
ResetLocale = ["LANG", "LANGUAGE", "LC_CTYPE", "LC_NUMERIC", "LC_TIME", "LC_COLLATE", "LC_MONETARY",
"LC_MESSAGES", "LC_PAPER", "LC_NAME", "LC_ADDRESS", "LC_TELEPHONE", "LC_MEASUREMENT",
"LC_IDENTIFICATION", "LC_ALL"]
LocaleConf="/etc/locale.conf"
DefaultListenBacklog=2
ExitWhenNoMoreServices = False
ExitWhenNoMoreProcs = False
DefaultUnit = os.environ.get("SYSTEMD_DEFAULT_UNIT", "default.target") # systemd.exe --unit=default.target
DefaultTarget = os.environ.get("SYSTEMD_DEFAULT_TARGET", "multi-user.target") # DefaultUnit fallback
# LogLevel = os.environ.get("SYSTEMD_LOG_LEVEL", "info") # systemd.exe --log-level
# LogTarget = os.environ.get("SYSTEMD_LOG_TARGET", "journal-or-kmsg") # systemd.exe --log-target
# LogLocation = os.environ.get("SYSTEMD_LOG_LOCATION", "no") # systemd.exe --log-location
# ShowStatus = os.environ.get("SYSTEMD_SHOW_STATUS", "auto") # systemd.exe --show-status
DefaultStandardInput=os.environ.get("SYSTEMD_STANDARD_INPUT", "null")
DefaultStandardOutput=os.environ.get("SYSTEMD_STANDARD_OUTPUT", "journal") # systemd.exe --default-standard-output
DefaultStandardError=os.environ.get("SYSTEMD_STANDARD_ERROR", "inherit") # systemd.exe --default-standard-error
EXEC_SPAWN = False
EXEC_DUP2 = True
REMOVE_LOCK_FILE = False
BOOT_PID_MIN = 0
BOOT_PID_MAX = -9
PROC_MAX_DEPTH = 100
EXPAND_VARS_MAXDEPTH = 20
EXPAND_KEEP_VARS = True
RESTART_FAILED_UNITS = True
ACTIVE_IF_ENABLED=False
TAIL_CMD = "/usr/bin/tail"
LESS_CMD = "/usr/bin/less"
CAT_CMD = "/usr/bin/cat"
# The systemd default was NOTIFY_SOCKET="/var/run/systemd/notify"
_notify_socket_folder = "{RUN}/systemd" # alias /run/systemd
_journal_log_folder = "{LOG}/journal"
SYSTEMCTL_DEBUG_LOG = "{LOG}/systemctl.debug.log"
SYSTEMCTL_EXTRA_LOG = "{LOG}/systemctl.log"
_default_targets = [ "poweroff.target", "rescue.target", "sysinit.target", "basic.target", "multi-user.target", "graphical.target", "reboot.target" ]
_feature_targets = [ "network.target", "remote-fs.target", "local-fs.target", "timers.target", "nfs-client.target" ]
_all_common_targets = [ "default.target" ] + _default_targets + _feature_targets
# inside a docker we pretend the following
_all_common_enabled = [ "default.target", "multi-user.target", "remote-fs.target" ]
_all_common_disabled = [ "graphical.target", "resue.target", "nfs-client.target" ]
target_requires = { "graphical.target": "multi-user.target", "multi-user.target": "basic.target", "basic.target": "sockets.target" }
_runlevel_mappings = {} # the official list
_runlevel_mappings["0"] = "poweroff.target"
_runlevel_mappings["1"] = "rescue.target"
_runlevel_mappings["2"] = "multi-user.target"
_runlevel_mappings["3"] = "multi-user.target"
_runlevel_mappings["4"] = "multi-user.target"
_runlevel_mappings["5"] = "graphical.target"
_runlevel_mappings["6"] = "reboot.target"
_sysv_mappings = {} # by rule of thumb
_sysv_mappings["$local_fs"] = "local-fs.target"
_sysv_mappings["$network"] = "network.target"
_sysv_mappings["$remote_fs"] = "remote-fs.target"
_sysv_mappings["$timer"] = "timers.target"
def strINET(value):
if value == socket.SOCK_DGRAM:
return "UDP"
if value == socket.SOCK_STREAM:
return "TCP"
if value == socket.SOCK_RAW: # pragma: no cover
return "RAW"
if value == socket.SOCK_RDM: # pragma: no cover
return "RDM"
if value == socket.SOCK_SEQPACKET: # pragma: no cover
return "SEQ"
return "<?>" # pragma: no cover
def strYes(value):
if value is True:
return "yes"
if not value:
return "no"
return str(value)
def strE(part):
if not part:
return ""
return str(part)
def strQ(part):
if part is None:
return ""
if isinstance(part, int):
return str(part)
return "'%s'" % part
def shell_cmd(cmd):
return " ".join([strQ(part) for part in cmd])
def to_intN(value, default = None):
if not value:
return default
try:
return int(value)
except:
return default
def to_int(value, default = 0):
try:
return int(value)
except:
return default
def to_list(value):
if not value:
return []
if isinstance(value, list):
return value
if isinstance(value, tuple):
return list(value)
return str(value or "").split(",")
def int_mode(value):
try: return int(value, 8)
except: return None # pragma: no cover
def unit_of(module):
if "." not in module:
return module + ".service"
return module
def o22(part):
if isinstance(part, basestring):
if len(part) <= 22:
return part
return part[:5] + "..." + part[-14:]
return part # pragma: no cover (is always str)
def o44(part):
if isinstance(part, basestring):
if len(part) <= 44:
return part
return part[:10] + "..." + part[-31:]
return part # pragma: no cover (is always str)
def o77(part):
if isinstance(part, basestring):
if len(part) <= 77:
return part
return part[:20] + "..." + part[-54:]
return part # pragma: no cover (is always str)
def unit_name_escape(text):
# https://www.freedesktop.org/software/systemd/man/systemd.unit.html#id-1.6
esc = re.sub("([^a-z-AZ.-/])", lambda m: "\\x%02x" % ord(m.group(1)[0]), text)
return esc.replace("/", "-")
def unit_name_unescape(text):
esc = text.replace("-", "/")
return re.sub("\\\\x(..)", lambda m: "%c" % chr(int(m.group(1), 16)), esc)
def is_good_root(root):
if not root:
return True
return root.strip(os.path.sep).count(os.path.sep) > 1
def os_path(root, path):
if not root:
return path
if not path:
return path
if is_good_root(root) and path.startswith(root):
return path
while path.startswith(os.path.sep):
path = path[1:]
return os.path.join(root, path)
def path_replace_extension(path, old, new):
if path.endswith(old):
path = path[:-len(old)]
return path + new
def get_PAGER():
PAGER = os.environ.get("PAGER", "less")
pager = os.environ.get("SYSTEMD_PAGER", "{PAGER}").format(**locals())
options = os.environ.get("SYSTEMD_LESS", "FRSXMK") # see 'man timedatectl'
if not pager: pager = "cat"
if "less" in pager and options:
return [ pager, "-" + options ]
return [ pager ]
def os_getlogin():
""" NOT using os.getlogin() """
return pwd.getpwuid(os.geteuid()).pw_name
def get_runtime_dir():
explicit = os.environ.get("XDG_RUNTIME_DIR", "")
if explicit: return explicit
user = os_getlogin()
return "/tmp/run-"+user
def get_RUN(root = False):
tmp_var = get_TMP(root)
if _root:
tmp_var = _root
if root:
for p in ("/run", "/var/run", "{tmp_var}/run"):
path = p.format(**locals())
if os.path.isdir(path) and os.access(path, os.W_OK):
return path
os.makedirs(path) # "/tmp/run"
return path
else:
uid = get_USER_ID(root)
for p in ("/run/user/{uid}", "/var/run/user/{uid}", "{tmp_var}/run-{uid}"):
path = p.format(**locals())
if os.path.isdir(path) and os.access(path, os.W_OK):
return path
os.makedirs(path, 0o700) # "/tmp/run/user/{uid}"
return path
def get_PID_DIR(root = False):
if root:
return get_RUN(root)
else:
return os.path.join(get_RUN(root), "run") # compat with older systemctl.py
def get_home():
if False: # pragma: no cover
explicit = os.environ.get("HOME", "") # >> On Unix, an initial ~ (tilde) is replaced by the
if explicit: return explicit # environment variable HOME if it is set; otherwise
uid = os.geteuid() # the current users home directory is looked up in the
# # password directory through the built-in module pwd.
return pwd.getpwuid(uid).pw_name # An initial ~user i looked up directly in the
return os.path.expanduser("~") # password directory. << from docs(os.path.expanduser)
def get_HOME(root = False):
if root: return "/root"
return get_home()
def get_USER_ID(root = False):
ID = 0
if root: return ID
return os.geteuid()
def get_USER(root = False):
if root: return "root"
uid = os.geteuid()
return pwd.getpwuid(uid).pw_name
def get_GROUP_ID(root = False):
ID = 0
if root: return ID
return os.getegid()
def get_GROUP(root = False):
if root: return "root"
gid = os.getegid()
return grp.getgrgid(gid).gr_name
def get_TMP(root = False):
TMP = "/tmp"
if root: return TMP
return os.environ.get("TMPDIR", os.environ.get("TEMP", os.environ.get("TMP", TMP)))
def get_VARTMP(root = False):
VARTMP = "/var/tmp"
if root: return VARTMP
return os.environ.get("TMPDIR", os.environ.get("TEMP", os.environ.get("TMP", VARTMP)))
def get_SHELL(root = False):
SHELL = "/bin/sh"
if root: return SHELL
return os.environ.get("SHELL", SHELL)
def get_RUNTIME_DIR(root = False):
RUN = "/run"
if root: return RUN
return os.environ.get("XDG_RUNTIME_DIR", get_runtime_dir())
def get_CONFIG_HOME(root = False):
CONFIG = "/etc"
if root: return CONFIG
HOME = get_HOME(root)
return os.environ.get("XDG_CONFIG_HOME", HOME + "/.config")
def get_CACHE_HOME(root = False):
CACHE = "/var/cache"
if root: return CACHE
HOME = get_HOME(root)
return os.environ.get("XDG_CACHE_HOME", HOME + "/.cache")
def get_DATA_HOME(root = False):
SHARE = "/usr/share"
if root: return SHARE
HOME = get_HOME(root)
return os.environ.get("XDG_DATA_HOME", HOME + "/.local/share")
def get_LOG_DIR(root = False):
LOGDIR = "/var/log"
if root: return LOGDIR
CONFIG = get_CONFIG_HOME(root)
return os.path.join(CONFIG, "log")
def get_VARLIB_HOME(root = False):
VARLIB = "/var/lib"
if root: return VARLIB
CONFIG = get_CONFIG_HOME(root)
return CONFIG
def expand_path(path, root = False):
HOME = get_HOME(root)
RUN = get_RUN(root)
LOG = get_LOG_DIR(root)
XDG_DATA_HOME=get_DATA_HOME(root)
XDG_CONFIG_HOME=get_CONFIG_HOME(root)
XDG_RUNTIME_DIR=get_RUNTIME_DIR(root)
return os.path.expanduser(path.replace("${","{").format(**locals()))
def shutil_chown(path, user, group):
if user or group:
uid, gid = -1, -1
if user:
uid = pwd.getpwnam(user).pw_uid
gid = pwd.getpwnam(user).pw_gid
if group:
gid = grp.getgrnam(group).gr_gid
os.chown(path, uid, gid)
def shutil_fchown(fileno, user, group):
if | |
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
from future.utils import raise_
import unittest
import copy
import os
import numpy as num
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.geometry.polygon import is_inside_polygon
from anuga.abstract_2d_finite_volumes.util import file_function
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
from anuga.config import g
from anuga.shallow_water.boundaries import Reflective_boundary, \
Field_boundary, Transmissive_momentum_set_stage_boundary, \
Transmissive_stage_zero_momentum_boundary
from anuga.abstract_2d_finite_volumes.generic_boundary_conditions\
import Transmissive_boundary, Dirichlet_boundary, \
Time_boundary, File_boundary, AWI_boundary
from anuga.file.sww import get_mesh_and_quantities_from_file
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.abstract_2d_finite_volumes.mesh_factory \
import rectangular_cross, rectangular
from anuga.shallow_water.sww_interrogate import get_maximum_inundation_elevation, \
get_maximum_inundation_location, get_maximum_inundation_data, \
get_flow_through_cross_section, get_energy_through_cross_section
class Test_sww_Interrogate(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
for file in ['flowtest.sww', 'flowtest_uniquely.sww', 'runup_test_2.sww']:
try:
os.remove(file)
except:
pass
def test_get_maximum_inundation_de0(self):
"""Test that sww information can be converted correctly to maximum
runup elevation and location (without and with georeferencing)
This test creates a slope and a runup which is maximal (~11m) at around 10s
and levels out to the boundary condition (1m) at about 30s.
"""
import time, os
from anuga.file.netcdf import NetCDFFile
verbose = False
#Setup
#from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
# Create basic mesh (100m x 100m)
points, vertices, boundary = rectangular(20, 5, 100, 50)
# Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.set_flow_algorithm('DE0')
domain.set_low_froude(0)
domain.set_minimum_storable_height(0.01)
filename = 'runup_test_3'
domain.set_name(filename)
swwfile = domain.get_name() + '.sww'
domain.set_datadir('.')
domain.format = 'sww'
domain.smooth = True
# FIXME (Ole): Backwards compatibility
# Look at sww file and see what happens when
# domain.tight_slope_limiters = 1
domain.tight_slope_limiters = 0
domain.use_centroid_velocities = 0 # Backwards compatibility (7/5/8)
Br = Reflective_boundary(domain)
Bd = Dirichlet_boundary([1.0,0,0])
#---------- First run without geo referencing
domain.set_quantity('elevation', lambda x,y: -0.2*x + 14) # Slope
domain.set_quantity('stage', -6)
domain.set_boundary( {'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
for t in domain.evolve(yieldstep=1, finaltime = 50):
pass
# Check maximal runup
runup, location, max_time = get_maximum_inundation_data(swwfile, return_time=True)
if verbose:
print('Runup, location', runup, location, max_time)
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332, 43.333332])
assert num.allclose(max_time, 10.0)
# Check runup in restricted time interval
runup, location, max_time = get_maximum_inundation_data(swwfile, time_interval=[0,9], return_time=True)
if verbose:
print('Runup, location:',runup, location, max_time)
assert num.allclose(runup, 2.66666674614)
assert num.allclose(location, [56.666668, 16.666666])
assert num.allclose(max_time, 9.0)
# Check final runup
runup, location = get_maximum_inundation_data(swwfile, time_interval=[45,50])
if verbose:
print('Runup, location:',runup, location, max_time)
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332, 33.333332])
#assert num.allclose(max_time, 45.0)
# Check runup restricted to a polygon
p = [[50,1], [99,1], [99,40], [50,40]]
runup, location = get_maximum_inundation_data(swwfile, polygon=p)
#runup = get_maximum_inundation_elevation(swwfile, polygon=p)
#location = get_maximum_inundation_location(swwfile, polygon=p)
#print runup, location, max_time
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332, 33.333332])
#assert num.allclose(max_time, 11.0)
# Check that mimimum_storable_height works
fid = NetCDFFile(swwfile, netcdf_mode_r) # Open existing file
stage = fid.variables['stage_c'][:]
z = fid.variables['elevation_c'][:]
xmomentum = fid.variables['xmomentum_c'][:]
ymomentum = fid.variables['ymomentum_c'][:]
for i in range(stage.shape[0]):
h = stage[i]-z # depth vector at time step i
# Check every node location
for j in range(stage.shape[1]):
# Depth being either exactly zero implies
# momentum being zero.
# Or else depth must be greater than or equal to
# the minimal storable height
if h[j] == 0.0:
assert xmomentum[i,j] == 0.0
assert ymomentum[i,j] == 0.0
else:
assert h[j] >= 0.0
fid.close()
# Cleanup
os.remove(swwfile)
#------------- Now the same with georeferencing
domain.time=0.0
E = 308500
N = 6189000
#E = N = 0
domain.geo_reference = Geo_reference(56, E, N)
domain.set_quantity('elevation', lambda x,y: -0.2*x + 14) # Slope
domain.set_quantity('stage', -6)
domain.set_boundary( {'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
for t in domain.evolve(yieldstep=1, finaltime = 50):
pass
# Check maximal runup
runup, location = get_maximum_inundation_data(swwfile)
#print 'Runup, location', runup, location, max_time
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332+E, 43.333332+N])
#assert num.allclose(max_time, 10.0)
# Check runup in restricted time interval
runup, location = get_maximum_inundation_data(swwfile, time_interval=[0,9])
#print 'Runup, location:',runup, location, max_time
assert num.allclose(runup, 2.66666674614)
assert num.allclose(location, [56.666668+E, 16.666666+N])
#assert num.allclose(max_time, 9.0)
# Check final runup
runup, location = get_maximum_inundation_data(swwfile, time_interval=[45,50])
#print 'Runup, location:',runup, location, max_time
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332+E, 33.333332+N])
#assert num.allclose(max_time, 45.0)
# Check runup restricted to a polygon
p = num.array([[50,1], [99,1], [99,40], [50,40]], num.int) + num.array([E, N], num.int)
runup, location = get_maximum_inundation_data(swwfile, polygon=p)
#print runup, location, max_time
assert num.allclose(runup, 3.33333325386)
assert num.allclose(location, [53.333332+E, 33.333332+N])
#assert num.allclose(max_time, 11.0)
# Cleanup
os.remove(swwfile)
def test_get_maximum_inundation_1_5(self):
"""Test that sww information can be converted correctly to maximum
runup elevation and location (without and with georeferencing)
This test creates a slope and a runup which is maximal (~11m) at around 10s
and levels out to the boundary condition (1m) at about 30s.
"""
import time, os
from anuga.file.netcdf import NetCDFFile
#Setup
#from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
# Create basic mesh (100m x 100m)
points, vertices, boundary = rectangular(20, 5, 100, 50)
# Create shallow water domain
domain = Domain(points, vertices, boundary)
domain.set_flow_algorithm('1_5')
domain.default_order = 2
domain.set_minimum_storable_height(0.01)
filename = 'runup_test_3'
domain.set_name(filename)
swwfile = domain.get_name() + '.sww'
domain.set_datadir('.')
domain.format = 'sww'
domain.smooth = True
# FIXME (Ole): Backwards compatibility
# Look at sww file and see what happens when
# domain.tight_slope_limiters = 1
domain.tight_slope_limiters = 0
domain.use_centroid_velocities = 0 # Backwards compatibility (7/5/8)
Br = Reflective_boundary(domain)
Bd = Dirichlet_boundary([1.0,0,0])
#---------- First run without geo referencing
domain.set_quantity('elevation', lambda x,y: -0.2*x + 14) # Slope
domain.set_quantity('stage', -6)
domain.set_boundary( {'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
for t in domain.evolve(yieldstep=1, finaltime = 50):
pass
# Check maximal runup
runup = get_maximum_inundation_elevation(swwfile)
location = get_maximum_inundation_location(swwfile)
#print 'Runup, location', runup, location
assert num.allclose(runup, 6.33333333) or \
num.allclose(runup, 6) or \
num.allclose(runup, 12) # old limiters
assert num.allclose(location[0], 38.33333333) or \
num.allclose(location[0], 40.0) or \
num.allclose(location[0], 10)
# Check final runup
runup = get_maximum_inundation_elevation(swwfile, time_interval=[45,50])
location = get_maximum_inundation_location(swwfile, time_interval=[45,50])
#print 'Runup, location:',runup, location
assert num.allclose(runup, 1.666666666)
assert num.allclose(location[0], 61.666666)
# Check runup restricted to a polygon
p = [[50,1], [99,1], [99,49], [50,49]]
runup = get_maximum_inundation_elevation(swwfile, polygon=p)
location = get_maximum_inundation_location(swwfile, polygon=p)
#print runup, location
assert num.allclose(runup, 3.6666666)
assert num.allclose(location[0], 51.6666666)
# Check that mimimum_storable_height works
fid = NetCDFFile(swwfile, netcdf_mode_r) # Open existing file
stage = fid.variables['stage'][:]
z = fid.variables['elevation'][:]
xmomentum = fid.variables['xmomentum'][:]
ymomentum = fid.variables['ymomentum'][:]
for i in range(stage.shape[0]):
h = stage[i]-z # depth vector at time step i
# Check every node location
for j in range(stage.shape[1]):
# Depth being either exactly zero implies
# momentum being zero.
# Or else depth must be greater than or equal to
# the minimal storable height
if h[j] == 0.0:
assert xmomentum[i,j] == 0.0
assert ymomentum[i,j] == 0.0
else:
assert h[j] >= domain.minimum_storable_height
fid.close()
# Cleanup
os.remove(swwfile)
#------------- Now the same with georeferencing
domain.time=0.0
E = 308500
N = 6189000
#E = N = 0
domain.geo_reference = Geo_reference(56, E, N)
domain.set_quantity('elevation', lambda x,y: -0.2*x + 14) # Slope
domain.set_quantity('stage', -6)
domain.set_boundary( {'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})
for t in domain.evolve(yieldstep=1, finaltime = 50):
pass
# Check maximal runup
runup = get_maximum_inundation_elevation(swwfile)
location = get_maximum_inundation_location(swwfile)
#print runup, location
assert num.allclose(runup,6.33333333) or \
num.allclose(runup, 6) or \
num.allclose(runup, 12) # old limiters
assert num.allclose(location[0], 38.34+E) or \
num.allclose(location[0], 40+E) or \
num.allclose(location[0], 10+E)
# Check final runup
runup = get_maximum_inundation_elevation(swwfile, time_interval=[45,50])
location = get_maximum_inundation_location(swwfile, time_interval=[45,50])
#print runup, location
#1.66666666667 [308561.66, 6189006.5]
assert num.allclose(runup, 1.666666666)
assert num.allclose(location[0], 61.66+E)
# Check runup restricted to a polygon
p = num.array([[50,1], [99,1], [99,49], [50,49]], num.int) + num.array([E, N], num.int) #array default#
runup = get_maximum_inundation_elevation(swwfile, polygon=p)
location = get_maximum_inundation_location(swwfile, polygon=p)
#print runup, location
assert num.allclose(runup, 3.66666666)
assert num.allclose(location[0], 51.66+E)
# Cleanup
os.remove(swwfile)
def test_get_flow_through_cross_section(self):
"""test_get_flow_through_cross_section(self):
Test that the total flow through a cross section can be
correctly obtained from an sww file.
This test creates a flat bed with a known flow through it and tests
that the function correctly returns the expected flow.
The specifics are
u = 2 m/s
h = 1 m
w = 3 m (width of channel)
q = u*h*w = 6 m^3/s
#---------- First run | |
# @title Zurich Instruments HDAWG instrument driver
# @author <NAME>
# @contrib <NAME>, <NAME>, <NAME>
# @date 2020-09-14
# @version v0.835.1
# @other The author of this driver takes no responsibility for
# any and all bugs and frustration caused by Labber and/or
# affiliated Zurich Instruments hardware and software.
# Correspondence should be with the author.
#
#######################################################
""" Labber driver for the Zurich Instruments HDAWG. """
#######################################################
# Python rudimentaries
from __future__ import print_function
from BaseDriver import LabberDriver, Error, IdError
from datetime import datetime
import glob
import inspect
import numpy as np
import os
import psutil
import re
import shutil
import textwrap
import time
# Zurich Instruments functionality
import zhinst.ziPython as ziPython
import zhinst.utils as ziUtils
# Main Labber driver class
class Driver(LabberDriver):
'''This class implements a Labber driver.
In order to establish a connection to the HDAWG instrument, please select
the Zurich Instruments HDAWG driver in the 'Add instruments' dialogue.
Select USB or TCPIP in the interface list, followed by providing the
device serial. The serial is provided on the device on the form 'DEV$$$$',
Should such a serial not be provided, the driver allows for auto-connecting
to an instrument should the phrase <autodetect> or <autoconnect> be
provided. This is not recommended in cases where there are multiple
Zurich Instruments devices connected to the Instrument server PC.
'''
def performOpen(self, options={}):
'''Perform the action of opening the instrument.
'''
# Instantiate the instrument connection, the ZI API, AWG module,
# and more.
self.instantiateInstrumentConnection()
# Create an initial configuration of stored waveforms.
# These will constitute a local set used to track what waveforms are
# used / changed etc.
self.defaultWaveformConfiguration()
# If configured to, signal LEDs after completing startup
if self.getValue('Signal LEDs on startup'):
self.daq.setInt('/' + self.dev + '/system/identify', 1)
def performClose(self, bError=False, options={}):
'''Perform the close instrument connection operation.
'''
# It has been chosen not to include a true power-off at this stage
# (/system/shutdown, 1) as enabling and disabling the instrument in
# such a recurring fashion would cause a lot of delay.
# A try-exception is done since the API session might not have
# been instantiated.
try:
self.daq.setInt('/'+str(self.dev)+'/awgs/0/enable', 0)
# If configured to, turn off all outputs when closing the device
if self.getValue('Disable outputs on close'):
for i in range(0, self.n_ch):
self.daq.setInt('/'+str(self.dev)+'/sigouts/'+str(i)+'/direct',0)
self.setValue('Channel '+str(i+1)+' - Bypass DAC to port', False)
self.daq.setInt('/'+str(self.dev)+'/sigouts/'+str(i)+'/on',0)
self.setValue('Channel '+str(i+1)+' - Output', False)
self.daq.setInt('/'+str(self.dev)+'/sigouts/'+str(i)+'/filter', 0)
self.setValue('Channel '+str(i+1)+' - Filter', False)
# If configured to, signal LEDs when disconnecting
if self.getValue('Signal LEDs on close'):
self.daq.setInt('/' + self.dev + '/system/identify', 1)
except:
# TODO So ZIAPINotFoundException is generated. How will we define a suitable exception to be thrown at this instance?
# TODO Likely using some ziUtils.ZIAPINotFoundException or similar. Ask ZI.
self.log( \
"Could not close the device; " + \
"there is likely no connection to the ZI API.",level=30)
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
'''Perform the Set Value instrument operation.
Variables are subject to change between one experiment and another.
To my knowledge, there is no way of registering whether a
variable changed in the measurement editor. Thus, all waveforms must
be seen as subject to change. The solution is to keep a record
of all waveforms locally, fetch each and every waveform at the
start of a new measurement, and then compare them for differences.
This in turn is somewhat wasteful, but there is no other algorithmic
way of guaranteeing that every waveform will be according to the
user specification in the Measurement editor.
This function should return the actual value set by the instrument.
'''
# isFirstCall is true each and every time the 'program pointer' of
# the measurement setup is pointing at the top.
if self.isFirstCall(options):
pass
# Is performSetValue attempting to execute a standard ZI API call?
# (or a command based on the string / other datatype?)
if '/%s/' in quant.set_cmd:
if 'double /' in quant.set_cmd:
self.daq.setDouble( \
quant.set_cmd.replace('double ','') % self.dev, \
value if not (quant.datatype > 1) \
else float(quant.getCmdStringFromValue(value)) \
)
elif 'int /' in quant.set_cmd:
self.daq.setInt( \
quant.set_cmd.replace('int ','') % self.dev, \
value if not (quant.datatype > 1) \
else int(quant.getCmdStringFromValue(value)) \
)
elif 'boolean /' in quant.set_cmd:
if quant.datatype == 1:
self.daq.setInt( \
quant.set_cmd.replace('boolean ','') % self.dev, \
(1 if value else 0) \
)
elif quant.datatype == 2:
# Throw suboptimal warning
self.log( \
"Note: setting booleans using combinational " + \
"lists is very suboptimal due to ambiguity in " + \
"the APIs.\nConsider changing the instruction " + \
"set_cmd type to integer, using the cmd_defs " + \
"1 and 0 for \'True\' and \'False\' " + \
"respectively ("+quant.name+")." , level=30)
fetch_bool = quant.getCmdStringFromValue(value).lower()
if (fetch_bool == 'false') or (fetch_bool == '0'):
# Do False-case
self.daq.setInt( \
quant.set_cmd.replace(\
'boolean ','') % self.dev \
, 0 \
)
elif (fetch_bool == 'true') or (fetch_bool == '1'):
# Do True-case
self.daq.setInt( \
quant.set_cmd.replace(\
'boolean ','') % self.dev \
, 1 \
)
else:
raise ValueError( \
"Unrecognised boolean value for quantity " + \
"name \'"+quant.name+"\' (received \'" + \
str(value)+"\').")
else:
raise ValueError( \
"Bad datatype for quantity \'" + quant.name + \
"\,' expected boolean or combo (of booleans).")
elif 'other /' in quant.set_cmd:
# Due to the nature of the 'other' datatype, this driver
# constructs a 'Python switch-case' where every entry spells
# out a prepared version of the quant.name string.
def Minimise_inter_device_asynchronous_jitter(self, value):
''' TODO missing text
'''
# If this command is run (and value is True),
# we must update the sequencer.
self.sequencer_demands_updating = True
# Prep. the sequencer generation stage 3:
# 'SYNCHRONISE_TO_BEATING_FREQUENCY'
self.update_local_awg_program[3] = True
# The sequencer program generation in turn checks the
# 'Minimise inter-device asynchronous jitter' flag which
# at this time may be False, since value is returned
# *after* isFinalCall has run. Thus, we must force-set
# the flag from here.
self.setValue( \
'Minimise inter-device asynchronous jitter', \
value \
)
# Modifications may be done to the internal trigger period
self.perform_repetition_check = True
# Setting this value to true, since it involves the
# usage of oscillators, may change the channel grouping
# type.
if value or \
self.getValue('Use oscillator-based repetition delay'):
# A channel grouping of 4x2 is required.
self.daq.setInt( \
'/'+self.dev+'/system/awg/channelgrouping', 0)
else:
# A channel grouping of 1x8 is sufficient.
if self.daq.getInt( \
'/'+self.dev+'/system/awg/channelgrouping') != 2:
# The grouping should be changed.
self.daq.setInt( \
'/'+self.dev+'/system/awg/channelgrouping', 2)
def Beat_frequency(self, value):
''' TODO missing text
'''
# Set oscillator 1 to the beat frequency of the sequencers.
beat_frequency = abs(value)
previous_osc_freq = \
self.daq.getDouble('/'+str(self.dev)+'/oscs/0/freq')
iterfreq = 2
while(iterfreq <= 32):
setval = beat_frequency / iterfreq
if setval < 299000000:
self.daq.setDouble( \
'/'+str(self.dev)+'/oscs/0/freq', \
setval)
self.daq.sync()
if self.daq.getDouble( \
'/'+str(self.dev)+'/oscs/0/freq') == setval:
# All is fine. Update value and break.
self.setValue('Beat frequency', setval)
break
iterfreq *= 2
# Check whether the set was successfull
if iterfreq > 32:
# Not fine, reset and raise error.
self.daq.setDouble( \
'/'+str(self.dev)+'/oscs/0/freq',\
previous_osc_freq )
raise ArithmeticError( \
"Cannot set oscillator 1 to an even dividend " + \
"of "+str(beat_frequency)+" Sa/s)" )
# TODO This may be solvable by moving commands around in the 'other' datatype category right here.
self.log('WARNING: Changing the beat frequency was fine and all but we must now also change the internal repetition rate if that was set to match the beat frequency.',level=30)
def Internal_trigger_period(self, value):
'''TODO missing text
'''
# Is the user changing the internal trigger period,
# while the system is set to use an oscillator as the
# internal repetition delay source?
# Is the system set to use | |
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(columns=2)) # none rows
# Assert append more widgets than number of rows*columns
column_menu = MenuUtils.generic_menu(columns=2, rows=4, enabled=False)
for _ in range(8):
column_menu.add.button('test', pygame_menu.events.BACK)
self.assertRaises(RuntimeError, lambda: column_menu.mainloop(surface, bgfun=dummy_function, disable_loop=True))
column_menu._move_selected_left_right(-1)
column_menu._move_selected_left_right(1)
column_menu.disable()
self.assertRaises(RuntimeError, lambda: column_menu.draw(surface))
column_menu.enable()
column_menu.draw(surface)
column_menu.disable()
self.assertEqual(len(column_menu._widgets), 8)
self.assertRaises(RuntimeError, lambda: column_menu.draw(surface))
self.assertRaises(pygame_menu.menu._MenuWidgetOverflow,
lambda: column_menu.add.button('test', pygame_menu.events.BACK))
column_menu._update_widget_position()
self.assertEqual(len(column_menu._widgets), 8) # Widget not added
# Test max width
self.assertRaises(AssertionError,
lambda: MenuUtils.generic_menu(columns=3, rows=4, column_max_width=[500, 500, 500, 500]))
column_menu = MenuUtils.generic_menu(columns=3, rows=4, column_max_width=0) # max menu width
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(columns=3, rows=4, column_max_width=-1))
column_menu = MenuUtils.generic_menu(columns=3, rows=4, column_max_width=500) # max menu width
self.assertEqual(len(column_menu._column_max_width), 3)
for i in range(3):
self.assertEqual(column_menu._column_max_width[i], 500)
# Test min width
self.assertRaises(AssertionError,
lambda: MenuUtils.generic_menu(columns=3, rows=4, column_min_width=[500, 500, 500, 500]))
column_menu = MenuUtils.generic_menu(columns=3, rows=4, column_min_width=100) # max menu width
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(columns=3, rows=4, column_min_width=-100))
column_menu = MenuUtils.generic_menu(columns=3, rows=4, column_min_width=500) # max menu width
self.assertEqual(len(column_menu._column_min_width), 3)
for i in range(3):
self.assertEqual(column_menu._column_min_width[i], 500)
self.assertRaises(AssertionError,
lambda: MenuUtils.generic_menu(columns=3, rows=4, column_min_width=None))
# Test max width should be greater than min width
self.assertRaises(AssertionError,
lambda: MenuUtils.generic_menu(columns=2, rows=4, column_min_width=[500, 500],
column_max_width=[100, 500]))
self.assertRaises(AssertionError,
lambda: MenuUtils.generic_menu(columns=2, rows=4, column_min_width=[500, 500],
column_max_width=[500, 100]))
self.assertRaises(AssertionError,
lambda: MenuUtils.generic_menu(rows=4, column_min_width=10, column_max_width=1))
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(columns=-1, rows=4, column_max_width=500))
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(rows=0, column_max_width=500))
MenuUtils.generic_menu(column_max_width=[500])
# Test different rows
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(columns=2, rows=[3, 3, 3]))
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(columns=2, rows=[3, -3]))
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(columns=2, rows=[3]))
# Create widget positioning
width = 600
menu = MenuUtils.generic_menu(columns=3, rows=2, width=width)
btn1 = menu.add.button('btn')
btn2 = menu.add.button('btn')
btn3 = menu.add.button('btn')
btn4 = menu.add.button('btn')
btn5 = menu.add.button('btn')
btn6 = menu.add.button('btn')
self.assertEqual(btn1.get_col_row_index(), (0, 0, 0))
self.assertEqual(btn2.get_col_row_index(), (0, 1, 1))
self.assertEqual(btn3.get_col_row_index(), (1, 0, 2))
self.assertEqual(btn4.get_col_row_index(), (1, 1, 3))
self.assertEqual(btn5.get_col_row_index(), (2, 0, 4))
self.assertEqual(btn6.get_col_row_index(), (2, 1, 5))
# Check size
self.assertEqual(len(menu._column_widths), 3)
for col_w in menu._column_widths:
self.assertEqual(col_w, width / 3)
# If removing widget, all column row should change
menu.remove_widget(btn1)
self.assertEqual(btn1.get_col_row_index(), (-1, -1, -1))
self.assertEqual(btn2.get_col_row_index(), (0, 0, 0))
self.assertEqual(btn3.get_col_row_index(), (0, 1, 1))
self.assertEqual(btn4.get_col_row_index(), (1, 0, 2))
self.assertEqual(btn5.get_col_row_index(), (1, 1, 3))
self.assertEqual(btn6.get_col_row_index(), (2, 0, 4))
# Hide widget, the column layout should change
btn2.hide()
menu.render()
self.assertEqual(btn2.get_col_row_index(), (-1, -1, 0))
self.assertEqual(btn3.get_col_row_index(), (0, 0, 1))
self.assertEqual(btn4.get_col_row_index(), (0, 1, 2))
self.assertEqual(btn5.get_col_row_index(), (1, 0, 3))
self.assertEqual(btn6.get_col_row_index(), (1, 1, 4))
# Show again
btn2.show()
menu.render()
self.assertEqual(btn1.get_col_row_index(), (-1, -1, -1))
self.assertEqual(btn2.get_col_row_index(), (0, 0, 0))
self.assertEqual(btn3.get_col_row_index(), (0, 1, 1))
self.assertEqual(btn4.get_col_row_index(), (1, 0, 2))
self.assertEqual(btn5.get_col_row_index(), (1, 1, 3))
self.assertEqual(btn6.get_col_row_index(), (2, 0, 4))
# Remove button
menu.remove_widget(btn2)
self.assertEqual(btn3.get_col_row_index(), (0, 0, 0))
self.assertEqual(btn4.get_col_row_index(), (0, 1, 1))
self.assertEqual(btn5.get_col_row_index(), (1, 0, 2))
self.assertEqual(btn6.get_col_row_index(), (1, 1, 3))
self.assertEqual(len(menu._column_widths), 2)
for col_w in menu._column_widths:
self.assertEqual(col_w, width / 2) # 600/2
# Add a new button
btn7 = menu.add.button('btn')
# Layout:
# btn3 | btn5 | btn7
# btn4 | btn6 |
# Select second button
self.assertRaises(ValueError, lambda: menu.select_widget(btn2))
menu.select_widget(btn4)
self.assertTrue(btn4.is_selected())
# Move to right, btn6 should be selected
menu._move_selected_left_right(1)
self.assertFalse(btn4.is_selected())
self.assertTrue(btn6.is_selected())
self.assertFalse(btn7.is_selected())
# Move right, as third column only has 1 widget, that should be selected
menu._move_selected_left_right(1)
self.assertFalse(btn6.is_selected())
self.assertTrue(btn7.is_selected())
# Move right, moves from 3 to 1 column, then button 3 should be selected
menu._move_selected_left_right(1)
self.assertFalse(btn7.is_selected())
self.assertTrue(btn3.is_selected())
# Set btn4 as floating, then the layout should be
# btn3,4 | btn6
# btn5 | btn7
btn4.set_float()
menu.render()
self.assertEqual(btn3.get_col_row_index(), (0, 0, 0))
self.assertEqual(btn4.get_col_row_index(), (0, 0, 1))
self.assertEqual(btn5.get_col_row_index(), (0, 1, 2))
self.assertEqual(btn6.get_col_row_index(), (1, 0, 3))
self.assertEqual(btn7.get_col_row_index(), (1, 1, 4))
# Test sizing
# btn3 | btn6
# btn4,5 | btn7
btn4.set_float(False)
btn5.set_float()
menu.render()
self.assertEqual(btn3.get_width(apply_selection=True), 63)
for col_w in menu._column_widths:
self.assertEqual(col_w, width / 2)
# Scale 4, this should not change menu column widths
btn4.scale(5, 5)
menu.render()
for col_w in menu._column_widths:
self.assertEqual(col_w, width / 2)
# Scale 3, this should change menu column widths
btn3.scale(5, 1)
btn3_sz = btn3.get_width(apply_selection=True)
btn6_sz = btn6.get_width(apply_selection=True)
menu.render()
col_width1 = (width * btn3_sz) / (btn3_sz + btn6_sz)
col_width2 = width - col_width1
self.assertAlmostEqual(menu._column_widths[0], math.ceil(col_width1))
self.assertAlmostEqual(menu._column_widths[1], math.ceil(col_width2))
# Test different rows per column
menu = MenuUtils.generic_menu(columns=3, rows=[2, 1, 2], width=width, column_max_width=[300, None, 100])
btn1 = menu.add.button('btn')
btn2 = menu.add.button('btn')
btn3 = menu.add.button('btn')
btn4 = menu.add.button('btn')
btn5 = menu.add.button('btn')
self.assertEqual(btn1.get_col_row_index(), (0, 0, 0))
self.assertEqual(btn2.get_col_row_index(), (0, 1, 1))
self.assertEqual(btn3.get_col_row_index(), (1, 0, 2))
self.assertEqual(btn4.get_col_row_index(), (2, 0, 3))
self.assertEqual(btn5.get_col_row_index(), (2, 1, 4))
btn1.scale(10, 1)
self.assertRaises(pygame_menu.menu._MenuSizingException, lambda: menu.render())
btn1.resize(300, 10)
menu.render()
self.assertEqual(menu._column_widths, [300, 200, 100])
self.assertEqual(menu._column_pos_x, [150, 400, 550])
# btn1 | btn3 | btn4
# btn2 | | btn5
# Change menu max column width, this should
# fulfill third column to its maximum possible less than 300
# col2 should keep its current width
menu._column_max_width = [300, None, 300]
menu.render()
self.assertEqual(menu._column_widths, [300, 63, 238])
self.assertEqual(menu._column_pos_x, [150, 331, 482])
# Chance maximum width of third column and enlarge button 4, then
# middle column 3 will take 600-300-100 = 200
menu._column_max_width = [300, None, 100]
btn5.resize(100, 10)
menu.render()
self.assertEqual(menu._column_widths, [300, 200, 100])
# Test minimum width
menu = MenuUtils.generic_menu(columns=3, rows=[2, 1, 2], width=width,
column_max_width=[200, None, 150], column_min_width=[150, 150, 150])
# btn1 | btn3 | btn4
# btn2 | | btn5
btn1 = menu.add.button('btn')
menu.add.button('btn')
menu.add.button('btn')
menu.add.button('btn')
menu.add.button('btn')
btn1.resize(200, 10)
menu.render() # This should scale 2 column
self.assertEqual(menu._column_widths, [200, 250, 150])
menu = MenuUtils.generic_menu(columns=3, rows=[2, 1, 2], width=width,
column_max_width=[200, 150, 150], column_min_width=[150, 150, 150])
btn1 = menu.add.button('btn')
btn2 = menu.add.button('btn')
btn3 = menu.add.button('btn')
menu.add.button('btn')
menu.add.button('btn')
btn1.resize(200, 10)
btn2.resize(150, 1)
btn3.resize(150, 1)
menu.render()
self.assertEqual(menu._column_widths, [200, 150, 150])
def test_screen_dimension(self) -> None:
"""
Test screen dim.
"""
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(title='mainmenu', screen_dimension=1))
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(title='mainmenu', screen_dimension=(-1, 1)))
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(title='mainmenu', screen_dimension=(1, -1)))
# The menu is 600x400, so using a lower screen throws an error
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(title='mainmenu', screen_dimension=(1, 1)))
menu = MenuUtils.generic_menu(title='mainmenu', screen_dimension=(888, 999))
self.assertEqual(menu.get_window_size(), (888, 999))
def test_touchscreen(self) -> None:
"""
Test menu touchscreen behaviour.
"""
self.assertRaises(AssertionError, lambda: MenuUtils.generic_menu(title='mainmenu', touchscreen=False,
touchscreen_motion_selection=True))
menu = MenuUtils.generic_menu(title='mainmenu', touchscreen=True, enabled=False)
self.assertRaises(RuntimeError, lambda: menu.mainloop(surface, bgfun=dummy_function))
# Add a menu and a method that set a function
event_val = [False]
def _some_event() -> str:
event_val[0] = True
return 'the value'
# Add some widgets
button = menu.add.button('button', _some_event)
# Check touch
if SYS_PLATFORM_OSX:
return
if hasattr(pygame, 'FINGERUP'):
click_pos = button.get_rect(to_real_position=True).center
menu.enable()
# Event must be normalized
menu.update(
PygameEventUtils.touch_click(click_pos[0], click_pos[1], normalize=False))
self.assertFalse(event_val[0])
menu.update(PygameEventUtils.touch_click(click_pos[0], click_pos[1], menu=menu))
self.assertTrue(event_val[0])
event_val[0] = False
self.assertEqual(menu.get_selected_widget().get_id(), button.get_id())
btn = menu.get_selected_widget()
self.assertTrue(btn.get_selected_time() >= 0)
def test_remove_widget(self) -> None:
"""
Test widget remove.
"""
menu = MenuUtils.generic_menu()
f = menu.add.frame_h(100, 200)
menu._update_frames.append(f)
btn = menu.add.button('epic')
menu._update_widgets.append(btn)
menu.remove_widget(f)
self.assertNotIn(f, menu._update_frames)
menu.remove_widget(btn)
self.assertNotIn(btn, menu._update_widgets)
# noinspection SpellCheckingInspection
def test_reset_value(self) -> None:
"""
Test menu reset value.
"""
menu = MenuUtils.generic_menu(title='mainmenu')
menu2 = MenuUtils.generic_menu(title='other')
color = menu.add.color_input('title', default='ff0000', color_type='hex')
text = menu.add.text_input('title', default='epic')
selector = menu.add.selector('title', items=[('a', 1), ('b', 2)], default=1)
text2 = menu2.add.text_input('titlesub', default='not epic')
menu.add.label('mylabel')
menu.add.button('submenu', menu2)
# Change values
color.set_value('aaaaaa')
text.set_value('changed')
text2.set_value('changed2')
selector.set_value(0)
# Reset values
color.reset_value()
self.assertEqual(color.get_value(as_string=True), '#ff0000')
color.set_value('aaaaaa')
# Check values changed
self.assertEqual(color.get_value(as_string=True), '#aaaaaa')
self.assertEqual(text.get_value(), 'changed')
self.assertEqual(selector.get_index(), 0)
# Reset values
menu.reset_value(recursive=True)
self.assertEqual(color.get_value(as_string=True), '#ff0000')
self.assertEqual(text.get_value(), 'epic')
self.assertEqual(text2.get_value(), 'not epic')
self.assertEqual(selector.get_index(), 1)
def test_mainloop_kwargs(self) -> None:
"""
Test menu mainloop kwargs.
"""
test = [False, False]
def test_accept_menu(m: 'pygame_menu.Menu') -> None:
"""
This method accept menu as argument.
"""
assert isinstance(m, pygame_menu.Menu)
test[0] = True
def test_not_accept_menu() -> None:
"""
This method does not accept menu as argument.
"""
test[1] = True
menu = MenuUtils.generic_menu()
self.assertFalse(test[0])
menu.mainloop(surface, test_accept_menu, disable_loop=True)
self.assertTrue(test[0])
self.assertFalse(test[1])
menu.mainloop(surface, test_not_accept_menu, disable_loop=True)
self.assertTrue(test[0])
self.assertTrue(test[1])
# test wait for events
test = [False, 0]
def bgfun() -> None:
"""
Function executed on each mainloop iteration for testing
waiting for events.
"""
test[0] = not test[0]
test[1] += 1
pygame.event.post(PygameEventUtils.joy_center(inlist=False))
menu = MenuUtils.generic_menu()
menu.set_onupdate(menu.disable)
menu.enable()
menu.mainloop(surface, bgfun, wait_for_event=True)
# Test mainloop for a number of frames
test = [0]
menu = MenuUtils.generic_menu()
def bgfun() -> None:
"""
Checks the number of frames.
"""
test[0] += 1
if test[0] == 20:
self.assertEqual(test[0], menu._stats.loop)
menu.disable()
menu.mainloop(surface, bgfun)
# noinspection PyArgumentList
def test_invalid_args(self) -> None:
"""
Test menu invalid args.
"""
self.assertRaises(TypeError, lambda: pygame_menu.Menu(height=100, width=100, title='nice', fake_option=True))
def test_set_title(self) -> None:
"""
Test menu title.
"""
menu = MenuUtils.generic_menu(title='menu')
theme = menu.get_theme()
menubar = menu.get_menubar()
self.assertEqual(menu.get_title(), 'menu')
self.assertEqual(menubar.get_title_offset()[0], theme.title_offset[0])
self.assertEqual(menubar.get_title_offset()[1], theme.title_offset[1])
menu.set_title('nice')
self.assertEqual(menu.get_title(), 'nice')
self.assertEqual(menubar.get_title_offset()[0], theme.title_offset[0])
self.assertEqual(menubar.get_title_offset()[1], theme.title_offset[1])
menu.set_title('nice', offset=(9, 10))
self.assertEqual(menu.get_title(), 'nice')
| |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import os
import numpy as np
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from tqdm import tqdm
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
class BertForReranking(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForReranking, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
entity_mask=None,
):
num_choices = input_ids.shape[1]
# from batch_size x cands x tokens -> (batch_size x cands) x tokens
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1))
if token_type_ids is not None
else None
)
flat_attention_mask = (
attention_mask.view(-1, attention_mask.size(-1))
if attention_mask is not None
else None
)
flat_position_ids = (
position_ids.view(-1, position_ids.size(-1))
if position_ids is not None
else None
)
outputs = self.bert(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
entity_mask = (1.0 - entity_mask) * -1000.0
reshaped_logits = reshaped_logits + entity_mask
outputs = (reshaped_logits,)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs
class BertReranker:
def __init__(self, parameters):
if "path_to_model" not in parameters:
parameters["path_to_model"] = parameters["bert_model"]
self.parameters = parameters
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not parameters["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# Load the fine-tuned model and the tokenizer used by it
self.model = BertReranker.get_model(parameters)
self.model.to(self.device)
self.tokenizer = BertReranker.get_tokenizer(parameters)
print("The reranking model is loaded")
def rerank(self, mentions, sentences):
model = self.model
tokenizer = self.tokenizer
p = self.parameters
device = self.device
data, tensor_data = BertReranker._process_mentions_for_model(
p["context_key"],
mentions,
tokenizer,
p["max_seq_length"],
p["top_k"],
p["silent"],
sentences=sentences,
)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=p["evaluation_batch_size"]
)
softmax = torch.nn.Softmax(dim=1)
for input_ids, input_mask, segment_ids, mention_ids, entity_mask in tqdm(
dataloader, desc="Inferring"
):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
mention_ids = mention_ids.numpy()
entity_mask = entity_mask.to(device)
with torch.no_grad():
logits = self.model(
input_ids, segment_ids, input_mask, entity_mask=entity_mask
)[0]
probs = softmax(logits)
logits = logits.detach().cpu().numpy()
probs = probs.detach().cpu().numpy()
predictions = np.argmax(logits, axis=1)
for idx, mention_idx in enumerate(mention_ids):
pred = predictions[idx].item()
mentions[mention_idx]["predicted_candidate_idx"] = pred
mentions[mention_idx]["prob_assigned_to_candidate"] = probs[idx][
pred
].item()
return mentions
def get_scheduler_and_optimizer(self, parameters, train_tensor_data, logger):
model = self.model
num_train_optimization_steps = (
int(
len(train_tensor_data)
/ parameters["train_batch_size"]
/ parameters["gradient_accumulation_steps"]
)
* parameters["num_train_epochs"]
)
num_warmup_steps = int(
num_train_optimization_steps * parameters["warmup_proportion"]
)
param_optimizer = list(model.named_parameters())
param_optimizer = [n for n in param_optimizer]
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=parameters["learning_rate"],
correct_bias=False,
)
scheduler = WarmupLinearSchedule(
optimizer,
warmup_steps=num_warmup_steps,
t_total=num_train_optimization_steps,
)
logger.info(" Num optimization steps = %d", num_train_optimization_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return optimizer, scheduler
@staticmethod
def get_model(parameters):
model = BertForReranking.from_pretrained(
parameters["path_to_model"],
num_labels=parameters["top_k"],
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), "local"),
)
if parameters["dataparallel_bert"]:
model.bert = torch.nn.DataParallel(model.bert)
print("Data parallel Bert")
return model
@staticmethod
def get_tokenizer(parameters):
tokenizer = BertTokenizer.from_pretrained(
parameters["path_to_model"], do_lower_case=parameters["lowercase_flag"]
)
return tokenizer
@staticmethod
def _get_candidate_representation(
context_tokens, candidate_desc, tokenizer, max_seq_length, max_sub_seq_length
):
"""Tokenizes and truncates description; combines it with the tokenized context and generates one input sample for bert"""
candidate_desc_tokens = tokenizer.tokenize(candidate_desc)
candidate_desc_tokens = candidate_desc_tokens[:max_sub_seq_length]
tokens = (
["[CLS]"] + context_tokens + ["[SEP]"] + candidate_desc_tokens + ["[SEP]"]
)
segment_ids = [0] * (len(context_tokens) + 2) + [1] * (
len(candidate_desc_tokens) + 1
)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return {
"tokens": tokens,
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
}
@staticmethod
def _get_mention_context_end2end(mention, sentences):
"""Given a mention and a list of sentences that follow the blink conventions, it returns a left and right context for the mention"""
sent_idx = mention["sent_idx"]
prev_sent = sentences[sent_idx - 1] if sent_idx > 0 else ""
next_sent = sentences[sent_idx + 1] if sent_idx + 1 < len(sentences) else ""
prev_sent = sentences[sent_idx - 1] if False else ""
next_sent = sentences[sent_idx + 1] if False else ""
sent = sentences[sent_idx]
curr_sent_prev = sent[: mention["start_pos"]].strip()
curr_sent_next = sent[mention["end_pos"] :].strip()
left_context = "{} {}".format(prev_sent, curr_sent_prev).strip()
right_context = "{} {}".format(curr_sent_next, next_sent).strip()
return (left_context, right_context)
@staticmethod
def _select_field(samples, field):
"""Helper function that returns a list of lists, each of which contains the information for all candidates for each sample"""
return [
[cand[field] for cand in sample["candidate_features"]] for sample in samples
]
@staticmethod
def _get_context_token_representation(
context_key,
sample,
tokenizer,
max_sub_seq_length,
start_token,
end_token,
mention_text_key="text",
tagged=True,
):
"""Tags the mention, trims the context and concatenates everything to form the context representation"""
mention_tokens = (
[start_token] + tokenizer.tokenize(sample[mention_text_key]) + | |
the game.
Note that I do store usernames and command usage records in the database for use in feature improvement.
Your username will NEVER be shared with anyone for any reason.
Use !brdybotleave in your channel to remove yourself from my channel list."""
operants = getOperants(username)
commandDict = getCommands()
threading.Thread(target=ircListen, args=(conn, token, botName, username, server, operants,commandDict)).start()
elif channeloperantid[0][0] == 1:
message = username+" - I should be operating in your channel. If I'm not, message brdy on Discord to correct the error."
return message
def removeClient(channel):
sql = "INSERT INTO bot.channeldeletion (channelid) values (SELECT ch.channelid FROM bot.channel ch WHERE ch.channelname = '"+channel+"') RETURNING channelid"
channelid = performSQL(sql)
message = channel+" - Successfully removed you from the channel list."
return message
def getMoveID(moveName):
moveID = performSQL(""" WITH ldist as (SELECT mv.moveid,LEAST(pokemon.levenshtein(mv.movename, '"""+moveName+"""'),
pokemon.levenshtein(mn.movenickname, '"""+moveName+"""')) AS distance FROM pokemon.move mv
LEFT JOIN pokemon.movenickname mn ON mv.moveid = mn.moveid)
SELECT moveid,distance FROM ldist WHERE distance < 5 ORDER BY distance LIMIT 1""")
moveID = str(moveID[0][0])
return moveID
def combineParameters(parameters):
name = ""
for parameter in parameters:
name += parameter + " "
name = name[:len(name)-1].title()
return name
def getMonID(monName,channel):
monName = monName.replace("'","''")
monID = performSQL("""WITH ldist as (SELECT DISTINCT mon.pokemonid,LEAST(pokemon.levenshtein(mon.pokemonname,'"""+monName+"""'),
pokemon.levenshtein(pn.pokemonnickname,'"""+monName+"""')) AS distance FROM pokemon.pokemon mon
LEFT JOIN pokemon.pokemonnickname pn ON mon.pokemonid = pn.pokemonid)
SELECT pokemonid,distance FROM ldist WHERE distance < 5 ORDER BY distance LIMIT 1""")
if monID == []:
errorString = "Could not find Pokemon "+monName+"."
return None,errorString
monID = str(monID[0][0])
monName = performSQL("""SELECT DISTINCT mon.pokemonname FROM pokemon.pokemon mon
WHERE mon.pokemonid = """+monID)
monName = str(monName[0][0])
return monID,monName
def getMonInfo(parameters,channel):
if len(parameters) < 1:
monInfo = "The !mon command requires the name of a pokemon as a parameter. (ex: '!mon charizard')"
return monInfo
monName = combineParameters(parameters)
monID,monName = getMonID(monName,channel)
game = getGame(channel)
if monID == None:
return monName
availability = performSQL("""SELECT DISTINCT pa.pokemonavailabilitytypeid
FROM pokemon.pokemongameavailability pa
LEFT JOIN pokemon.game ga ON pa.gameid = ga.gameid
LEFT JOIN pokemon.gamegroup gg ON gg.gamegroupid = ga.gamegroupid
WHERE pa.pokemonid = """+monID+" AND gg.gamegroupabbreviation = '"+game+"'")
if availability[0][0] == 18:
message = monName + " is not available in " + game + "."
return message
#this section gets all the info to be compiled in a string at the end of this function
monName,monDex,monGrowth,monCaptureRate = performSQL("""SELECT DISTINCT mon.pokemonname,mon.pokemonpokedexnumber,
lr.levelingratename,mon.pokemoncapturerate
FROM pokemon.pokemon mon
LEFT JOIN pokemon.levelingrate lr ON mon.levelingrateid = lr.levelingrateid
WHERE pokemonid = """+monID)[0]
monDex = str(monDex)
monCaptureRate = getCaptureRate(monCaptureRate, channel)
monTypes = getMonTypes(monID, channel)
monBST = getMonBST(monID, channel)
monXPYield = getXPYield(monID, channel,5,5)
monEvos = getMonEvos(monID, channel)
monMoves = getMonMoves(monID, channel)
#compiling all of the bits of info into one long string for return
monInfo = "#" + monDex +" " + monName + " ("+game+") " + monTypes + " | Catch: "+monCaptureRate+"% | BST: " + monBST + " | L5 XP: " + monXPYield + " | " + monGrowth + " | " + monEvos + " | " + monMoves
return monInfo
def getMonGrowth(monID,channel):
sql = "SELECT lr.levelingratename FROM pokemon.levelingrate lr LEFT JOIN pokemon.pokemon mon ON lr.levelingrateid = mon.levelingrateid WHERE mon.pokemonid = "+monID
rate = str(performSQL(sql)[0][0])
return rate
def getGeneration(channel):
generation = performSQL("""SELECT gen.generationid FROM bot.channel ch
LEFT JOIN pokemon.game gm ON ch.gameid = gm.gameid
LEFT JOIN pokemon.gamegroup gg ON gm.gamegroupid = gg.gamegroupid
LEFT JOIN pokemon.generation gen ON gg.generationid = gen.generationid
WHERE ch.channelname = '"""+channel+"'")[0][0]
generation = str(generation)
return generation
def getMonDex(monID, channel):
sql = """SELECT DISTINCT mon.pokemonpokedexnumber FROM pokemon.pokemon mon"""
sql += " WHERE mon.pokemonid = "+monID
dexArray = performSQL(sql)
monDex = str(dexArray[0][0])
return monDex
def getMonTypes(monID, channel):
gen = getGeneration(channel)
monTypes = """WITH monTypes as (SELECT pokemonid,type1id,type2id
FROM pokemon.crosstab('select pokemonid, typeid as type1id, typeid as type2id
FROM pokemon.pokemontype pt WHERE pt.generationid = """+gen+"""
AND pt.pokemonid = """+monID+"""
GROUP BY pokemonid,type1id,type2id ORDER BY pokemonid,type1id,type2id')
AS ct( pokemonid int, type1id int, type2id int)) \r\n"""
mainSelect = """SELECT type1.typename,type2.typename FROM monTypes
LEFT JOIN pokemon.type type1 ON monTypes.type1id = type1.typeid
LEFT JOIN pokemon.type type2 ON monTypes.type2id = type2.typeid"""
typeArray = performSQL(monTypes+mainSelect)
#if there are two types, store as (Type1/Type2)
#print(str(typeArray))
types = "("+str(typeArray[0][0])
if typeArray[0][1] != None:
types += "/"+str(typeArray[0][1])+")"
#otherwise, store as (Type)
else:
types += ")"
return types
def getMonBST(monID, channel):
gen = getGeneration(channel)
sql = """SELECT SUM(ps.pokemonstatvalue) bst, ps.generationid gen
FROM pokemon.pokemonstat ps """
sql += "LEFT JOIN pokemon.pokemon mon ON ps.pokemonid = mon.pokemonid WHERE mon.pokemonid ="+monID
sql += " AND ps.generationid <= "+gen+" GROUP BY gen ORDER BY gen DESC LIMIT 1"
bstArray = performSQL(sql)
monBST = str(bstArray[0][0])
return monBST
def getCaptureRate(captureRate,channel):
#this formula approximates the catch rate to within about .1% and will work for future catch rates not currently being used
captureRate = 0.0000000000566758779982193 * math.pow(captureRate,5) - 0.0000000427601042779669*math.pow(captureRate,4) + 0.0000125235963016363*math.pow(captureRate,3) - 0.00191121035271638*math.pow(captureRate,2) + 0.311407303213974*captureRate + 0.846589688792571
captureRate = round(captureRate, 1)
captureRate = str(captureRate)
return captureRate
def getXPYield(monID, channel,enemylevel,monlevel):
gen = getGeneration(channel)
sql = "SELECT DISTINCT xp.experienceyieldvalue,xp.generationid gen FROM pokemon.pokemonexperienceyield xp "
sql += "WHERE xp.pokemonid = "+monID+" "
sql += "AND xp.generationid <= "+gen+" ORDER BY gen DESC LIMIT 1"
xpYieldArray = performSQL(sql)
if xpYieldArray == []:
xp="unknown"
else:
gen = int(gen)
monyield = xpYieldArray[0][0]
xp = monyield*enemylevel/7
xp=str(int(round(xp,0)))
return xp
def getMonEvos(monID, channel):
gen = getGeneration(channel)
sql = "SELECT DISTINCT mon.pokemonname"
sql += """, pel.pokemonevolutionlevel,
i.itemname, l.locationname, pet.evolutiontypeid, pes.pokemonevolutionuniquestring, m.movename, gg.generationid
FROM pokemon.pokemonevolution pe """
sql += """LEFT JOIN pokemon.pokemon mon ON pe.targetpokemonid = mon.pokemonid """
sql +="""LEFT JOIN pokemon.pokemonevolutionlevel pel ON pe.pokemonevolutionid = pel.pokemonevolutionid
LEFT JOIN pokemon.pokemonevolutionmove pem ON pe.pokemonevolutionid = pem.pokemonevolutionid
LEFT JOIN pokemon.move m ON pem.moveid = m.moveid
LEFT JOIN pokemon.pokemonevolutionitem pei ON pe.pokemonevolutionid = pei.pokemonevolutionid
LEFT JOIN pokemon.item i ON pei.itemid = i.itemid
LEFT JOIN pokemon.pokemonevolutionlocation ploc ON pe.pokemonevolutionid = ploc.pokemonevolutionid
LEFT JOIN pokemon.location l ON ploc.locationid = l.locationid
LEFT JOIN pokemon.pokemonevolutiontype pet ON pe.pokemonevolutionid = pet.pokemonevolutionid
LEFT JOIN pokemon.gamegroup gg ON pe.gamegroupid = gg.gamegroupid
LEFT JOIN pokemon.pokemonevolutionstring pes ON pe.pokemonevolutionid = pes.pokemonevolutionid"""
sql += " WHERE pe.basepokemonid = "+monID+" "
sql += """ AND gg.generationid = (SELECT MAX(gg.generationid) FROM pokemon.pokemonevolution pe
LEFT JOIN pokemon.gamegroup gg ON pe.gamegroupid = gg.gamegroupid
WHERE gg.generationid <="""+gen+""" AND pe.basepokemonid = """+monID+""")
ORDER BY generationid DESC"""
evoArray = performSQL(sql)
if evoArray == []:
evoInfo = "Does not evolve"
else:
evoMon = str(evoArray[0][0])
evoLevel = str(evoArray[0][1])
evoItem = str(evoArray[0][2])
evoLocation = str(evoArray[0][3])
evoType = evoArray[0][4]
evoUnique = str(evoArray[0][5])
evoMove = str(evoArray[0][6])
evoInfo = "Evolves into " + evoMon
if evoType == 2 or evoType == 11:
evoInfo += " via trade"
elif evoType == 3:
evoInfo += " via high friendship"
elif evoType == 12:
evoInfo += " as a female"
elif evoType == 13:
evoInfo += " as a male"
elif evoType == 16:
evoInfo += " during the day"
elif evoType == 17:
evoInfo += " at night"
elif evoType == 20:
evoInfo += " in the rain"
elif evoType == 21:
evoInfo += " via high beauty"
if not evoLevel == 'None':
evoInfo += " at level "+evoLevel
if not evoItem == 'None':
if evoType == 4:
evoInfo += " after being exposed to " + evoItem
else:
evoInfo += " while holding " + evoItem
if not evoLocation == 'None':
evoInfo += " at " + evoLocation
if not evoMove == 'None':
evoInfo += " while knowing " + evoMove
if not evoUnique == 'None':
evoInfo += " " + evoUnique
return evoInfo
def getMonMoves(monID, channel):
game = getGame(channel)
sql = """SELECT DISTINCT mv.movename,pm.pokemonmovelevel FROM pokemon.pokemonmove pm
LEFT JOIN pokemon.move mv ON pm.moveid = mv.moveid
LEFT JOIN pokemon.generationmove gm ON mv.moveid = gm.moveid
LEFT JOIN pokemon.gamegroup gg ON pm.gamegroupid = gg.gamegroupid """
sql += "WHERE pm.pokemonid ="+monID
sql+=" AND pokemonmovelevel > 1 AND gg.gamegroupabbreviation ='"+game+"' ORDER BY pm.pokemonmovelevel ASC"
movesArray = performSQL(sql)
if movesArray == []:
moveList = "Does not learn moves"
else:
moveList = "Learns moves at "
for move in movesArray:
moveList += str(move[1])+", "
#remove the extra comma and space after
moveList = moveList[0:len(moveList)-2]
return moveList
def getMoveInfo(parameters, channel):
| |
save(
self.doc, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=self.name, relative_uris=relative_uris
)
if self.outputBinding is not None:
r["outputBinding"] = save(
self.outputBinding,
top=False,
base_url=self.name,
relative_uris=relative_uris,
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["name", "doc", "type", "outputBinding"])
class OutputRecordSchema(RecordSchema, OutputSchema):
def __init__(
self,
type: Any,
fields: Optional[Any] = None,
label: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.fields = fields
self.type = type
self.label = label
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "OutputRecordSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "fields" in _doc:
try:
fields = load_field(
_doc.get("fields"),
idmap_fields_union_of_None_type_or_array_of_OutputRecordFieldLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `fields` field is not valid because:",
SourceLine(_doc, "fields", str),
[e],
)
)
else:
fields = None
try:
type = load_field(
_doc.get("type"),
typedsl_Record_symbolLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `type` field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "label" in _doc:
try:
label = load_field(
_doc.get("label"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `label` field is not valid because:",
SourceLine(_doc, "label", str),
[e],
)
)
else:
label = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `fields`, `type`, `label`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'OutputRecordSchema'", None, _errors__)
return cls(
fields=fields,
type=type,
label=label,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.fields is not None:
r["fields"] = save(
self.fields, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.label is not None:
r["label"] = save(
self.label, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["fields", "type", "label"])
class OutputEnumSchema(EnumSchema, OutputSchema):
def __init__(
self,
symbols: Any,
type: Any,
label: Optional[Any] = None,
outputBinding: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.symbols = symbols
self.type = type
self.label = label
self.outputBinding = outputBinding
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "OutputEnumSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
symbols = load_field(
_doc.get("symbols"),
uri_array_of_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `symbols` field is not valid because:",
SourceLine(_doc, "symbols", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Enum_symbolLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `type` field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "label" in _doc:
try:
label = load_field(
_doc.get("label"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `label` field is not valid because:",
SourceLine(_doc, "label", str),
[e],
)
)
else:
label = None
if "outputBinding" in _doc:
try:
outputBinding = load_field(
_doc.get("outputBinding"),
union_of_None_type_or_CommandOutputBindingLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `outputBinding` field is not valid because:",
SourceLine(_doc, "outputBinding", str),
[e],
)
)
else:
outputBinding = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `symbols`, `type`, `label`, `outputBinding`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'OutputEnumSchema'", None, _errors__)
return cls(
symbols=symbols,
type=type,
label=label,
outputBinding=outputBinding,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.symbols is not None:
u = save_relative_uri(self.symbols, base_url, True, None, relative_uris)
if u:
r["symbols"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.label is not None:
r["label"] = save(
self.label, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.outputBinding is not None:
r["outputBinding"] = save(
self.outputBinding,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["symbols", "type", "label", "outputBinding"])
class OutputArraySchema(ArraySchema, OutputSchema):
def __init__(
self,
items: Any,
type: Any,
label: Optional[Any] = None,
outputBinding: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.items = items
self.type = type
self.label = label
self.outputBinding = outputBinding
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "OutputArraySchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
items = load_field(
_doc.get("items"),
uri_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_False_True_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `items` field is not valid because:",
SourceLine(_doc, "items", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Array_symbolLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `type` field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "label" in _doc:
try:
label = load_field(
_doc.get("label"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `label` field is not valid because:",
SourceLine(_doc, "label", str),
[e],
)
)
else:
label = None
if "outputBinding" in _doc:
try:
outputBinding = load_field(
_doc.get("outputBinding"),
union_of_None_type_or_CommandOutputBindingLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `outputBinding` field is not valid because:",
SourceLine(_doc, "outputBinding", str),
[e],
)
)
else:
outputBinding = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `items`, `type`, `label`, `outputBinding`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'OutputArraySchema'", None, _errors__)
return cls(
items=items,
type=type,
label=label,
outputBinding=outputBinding,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.items is not None:
u = save_relative_uri(self.items, base_url, False, 2, relative_uris)
if u:
r["items"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.label is not None:
r["label"] = save(
self.label, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.outputBinding is not None:
r["outputBinding"] = save(
self.outputBinding,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["items", "type", "label", "outputBinding"])
class InputParameter(Parameter):
def __init__(
self,
id: Any,
label: Optional[Any] = None,
secondaryFiles: Optional[Any] = None,
streamable: Optional[Any] = None,
doc: Optional[Any] = None,
format: Optional[Any] = None,
inputBinding: Optional[Any] = None,
default: Optional[Any] = None,
type: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.label = label
| |
520,
560,
618,
640,
697,
728,
654,
744,
702,
683,
633,
525,
568,
477,
390,
420,
410,
350,
382,
387,
397,
502,
566,
695,
677,
266,
203,
207,
223,
203,
217,
171,
217,
210,
192,
195,
755,
522,
335,
253,
255,
214,
257,
216,
230,
229,
243,
543,
1025,
1447,
2843,
3592,
3671,
2647,
2032,
1726,
1775,
1833,
1883,
1885,
1809,
1568,
1579,
1866,
1688,
1763,
1681,
1607,
1567,
1479,
1439,
1366,
1275,
1412,
1352,
1211,
1086,
1075,
1118,
1064,
1029,
994,
1090,
1153,
1102,
1057,
1024,
928,
1012,
1027,
1054,
992,
842,
859,
850,
877,
1009,
1042,
1142,
1114,
1228,
1369,
1319,
1208,
1326,
1114,
865,
790,
928,
1053,
780,
739,
700,
737,
664,
534,
525,
552,
603,
694,
701,
786,
842,
789,
843,
863,
743,
732,
686,
650,
620,
507,
408,
448,
368,
337,
339,
393,
373,
415,
427,
500,
658,
417,
218,
201,
208,
206,
220,
220,
223,
209,
216,
166,
222,
283,
240,
210,
204,
214,
190,
235,
200,
190,
240,
324,
1101,
1232,
1299,
1729,
2845,
3699,
3183,
2209,
1794,
1733,
1725,
1786,
1795,
1691,
1717,
1736,
1801,
1734,
1724,
1665,
1605,
1508,
1339,
1398,
1273,
1177,
1205,
1412,
1295,
1205,
1123,
1065,
1138,
1011,
960,
998,
982,
1096,
1019,
963,
1007,
1001,
1110,
1152,
1078,
929,
810,
837,
790,
915,
1059,
1157,
1119,
1253,
1320,
1435,
1315,
1307,
1148,
901,
834,
900,
961,
730,
706,
732,
787,
630,
598,
523,
620,
700,
863,
868,
903,
912,
893,
953,
878,
896,
728,
693,
613,
670,
461,
407,
385,
370,
324,
342,
326,
367,
349,
432,
460,
630,
341,
202,
209,
214,
180,
212,
213,
215,
183,
194,
199,
196,
191,
207,
200,
198,
202,
198,
222,
222,
204,
429,
1154,
1403,
1130,
980,
1056,
2106,
2993,
3633,
2475,
1976,
1680,
1656,
1788,
1801,
1825,
1742,
1851,
1814,
1716,
1822,
1685,
1431,
1298,
1202,
1287,
1311,
1246,
1195,
1259,
1149,
1176,
1139,
1258,
1148,
1101,
1054,
1024,
978,
1068,
1005,
1144,
924,
961,
1027,
1064,
1020,
973,
888,
857,
891,
1046,
1027,
1140,
1196,
1277,
1243,
1355,
1370,
1254,
1066,
952,
842,
895,
949,
822,
696,
788,
680,
656,
548,
655,
855,
952,
955,
1014,
1074,
1048,
1047,
998,
920,
822,
770,
669,
621,
645,
534,
415,
342,
338,
367,
341,
358,
354,
360,
364,
453,
522,
239,
213,
198,
210,
209,
216,
187,
219,
215,
216,
214,
241,
210,
195,
205,
198,
194,
188,
200,
208,
235,
411,
889,
1726,
1049,
853,
691,
993,
2327,
3338,
3507,
2441,
1958,
1752,
1706,
1707,
1739,
1787,
1779,
1780,
1714,
1650,
1519,
1359,
1230,
1217,
1245,
1202,
1184,
1221,
1186,
1179,
1077,
1084,
1165,
1132,
1012,
970,
919,
929,
1000,
1036,
1049,
964,
882,
944,
1003,
1100,
914,
963,
973,
881,
889,
1049,
1045,
1234,
1348,
1237,
1318,
1453,
1310,
1209,
881,
909,
850,
908,
697,
721,
718,
679,
630,
601,
682,
968,
1332,
1159,
1112,
1145,
1123,
1165,
1075,
983,
966,
877,
697,
600,
547,
471,
385,
328,
411,
351,
381,
349,
348,
355,
342,
453,
497,
220,
212,
196,
208,
178,
211,
206,
218,
206,
207,
208,
194,
186,
199,
203,
197,
208,
202,
187,
210,
193,
265,
496,
1497,
1289,
843,
716,
680,
1388,
2673,
3608,
3251,
2318,
2000,
1814,
1772,
1845,
1812,
1799,
1948,
1666,
1522,
1508,
1401,
1317,
1299,
1285,
1282,
1274,
1170,
1301,
1146,
1156,
1110,
1107,
1076,
1007,
927,
899,
926,
952,
977,
981,
934,
995,
955,
970,
887,
832,
933,
987,
923,
895,
895,
1078,
1257,
1349,
1229,
1315,
1256,
1301,
1293,
964,
768,
835,
923,
795,
700,
685,
692,
688,
544,
811,
1042,
1204,
1263,
1243,
1276,
1277,
1176,
1099,
1005,
935,
825,
716,
586,
515,
472,
344,
333,
333,
354,
356,
332,
354,
354,
344,
478,
332,
229,
219,
185,
201,
146,
182,
216,
200,
206,
177,
187,
217,
180,
184,
226,
221,
208,
230,
199,
212,
226,
215,
287,
442,
938,
1218,
864,
735,
682,
1656,
2949,
3614,
3101,
2248,
1887,
1842,
1724,
1728,
1815,
1877,
1897,
1615,
1402,
1428,
1337,
1341,
1334,
1266,
1182,
1271,
1225,
1203,
1166,
1113,
1164,
1049,
1031,
885,
780,
942,
969,
929,
941,
1022,
1034,
998,
948,
943,
971,
952,
972,
925,
804,
926,
1023,
1220,
1285,
1293,
1277,
1276,
1210,
1338,
1018,
874,
864,
924,
804,
697,
734,
640,
541,
634,
862,
1072,
1254,
1231,
1247,
1289,
1224,
1105,
1091,
1001,
914,
835,
616,
575,
510,
432,
375,
332,
313,
328,
351,
317,
338,
377,
395,
473,
331,
210,
205,
186,
181,
224,
229,
209,
194,
187,
232,
195,
205,
199,
194,
223,
216,
225,
200,
206,
213,
216,
211,
216,
262,
375,
696,
1236,
1160,
827,
1119,
2456,
3284,
3507,
2760,
2231,
1997,
1672,
1623,
1652,
1726,
1718,
1704,
1440,
1361,
1311,
1344,
1308,
1238,
1173,
1223,
1248,
1196,
1212,
1209,
1117,
1019,
774,
664,
778,
934,
967,
964,
957,
1001,
943,
883,
876,
923,
1089,
1045,
983,
924,
910,
956,
1071,
1167,
1271,
1221,
1217,
1192,
1229,
1251,
894,
826,
861,
832,
792,
684,
759,
662,
670,
867,
1142,
1345,
1284,
1255,
1227,
1090,
1061,
1069,
1094,
956,
894,
755,
656,
482,
481,
347,
361,
321,
324,
334,
302,
310,
370,
391,
404,
464,
232,
201,
192,
206,
200,
186,
202,
197,
195,
215,
217,
207,
222,
221,
208,
210,
193,
214,
211,
200,
223,
192,
202,
216,
214,
240,
332,
378,
372,
300,
471,
1336,
2393,
3487,
3510,
2721,
2216,
1910,
1780,
1680,
1718,
1619,
1600,
1480,
1337,
1209,
1379,
1354,
1353,
1253,
1233,
1300,
1269,
1167,
1106,
1115,
1052,
936,
766,
744,
785,
894,
942,
972,
958,
917,
1011,
894,
854,
1021,
985,
963,
956,
959,
1028,
1000,
1150,
1276,
1236,
1146,
1140,
1314,
1332,
1014,
898,
803,
906,
770,
715,
747,
723,
683,
910,
1133,
1232,
1169,
1219,
1219,
1114,
1165,
1076,
993,
849,
756,
625,
589,
495,
435,
347,
303,
347,
322,
325,
298,
349,
342,
382,
425,
433,
215,
206,
199,
227,
206,
167,
187,
202,
186,
236,
192,
187,
213,
218,
195,
191,
204,
212,
192,
197,
191,
233,
223,
211,
199,
202,
230,
238,
210,
207,
314,
521,
1750,
2824,
3841,
3125,
2366,
1991,
1702,
1614,
1485,
1474,
1371,
1479,
1423,
1380,
1383,
1437,
1377,
1408,
1411,
1360,
1264,
1263,
1285,
1214,
1088,
1041,
979,
950,
859,
893,
1003,
978,
909,
906,
902,
825,
841,
988,
956,
1000,
1033,
1000,
920,
971,
1081,
1101,
1222,
1136,
1184,
1184,
1181,
1051,
825,
872,
937,
781,
717,
685,
646,
618,
1025,
1191,
1284,
1170,
1084,
1129,
1141,
1084,
1087,
893,
843,
653,
701,
512,
404,
333,
315,
316,
348,
340,
350,
332,
365,
347,
394,
507,
315,
201,
209,
217,
225,
205,
214,
219,
192,
200,
220,
197,
204,
191,
213,
239,
193,
190,
204,
216,
212,
192,
199,
214,
197,
208,
182,
207,
212,
210,
220,
200,
271,
663,
1826,
2979,
3830,
2923,
2258,
1817,
1700,
1548,
1285,
1276,
1335,
1394,
1501,
1396,
1426,
1438,
1452,
1499,
1605,
1550,
1389,
1403,
1113,
1208,
1177,
1185,
1122,
1035,
1115,
1089,
985,
908,
867,
820,
870,
933,
947,
1126,
1108,
1021,
948,
970,
1009,
998,
972,
1189,
1163,
1223,
1202,
1168,
1021,
840,
843,
902,
817,
721,
722,
675,
745,
941,
1139,
1227,
1290,
1178,
1061,
1026,
954,
985,
900,
685,
575,
509,
409,
392,
383,
319,
313,
294,
331,
329,
349,
353,
376,
424,
565,
294,
222,
210,
208,
219,
215,
221,
208,
221,
186,
188,
174,
204,
208,
202,
201,
237,
213,
200,
201,
210,
218,
208,
220,
198,
195,
210,
195,
195,
213,
209,
229,
379,
485,
1075,
2262,
3294,
3772,
2727,
2185,
1766,
1610,
1268,
1228,
1273,
1206,
1284,
1492,
1423,
1457,
1365,
1469,
1728,
1681,
1427,
1398,
1455,
1371,
1187,
1143,
1190,
1118,
1088,
1067,
1014,
957,
903,
920,
| |
# iterates over all the end matches
for end_match in end_matches:
# retrieves the match start and end values and uses them to
# construct the match value that is going to be used for the
# construction of the match orderer structure to be added
match_start = end_match.start()
match_end = end_match.end()
match_value = file_contents[match_start:match_end]
# creates the match orderer for the current (end) match
# signaling it as a end value (for later reference)
match_orderer = MatchOrderer(end_match, END_VALUE, match_value)
match_orderer_l.append(match_orderer)
# retrieves the single matches iterator
single_matches = SINGLE_TAG_REGEX.finditer(file_contents)
# iterates over all the single matches
for single_match in single_matches:
# retrieves the match start and end values and uses them to
# construct the match value that is going to be used for the
# construction of the match orderer structure to be added
match_start = single_match.start()
match_end = single_match.end()
match_value = file_contents[match_start:match_end]
# creates the match orderer for the current (single) match
# signaling it as a single value (for later reference)
match_orderer = MatchOrderer(single_match, SINGLE_VALUE, match_value)
match_orderer_l.append(match_orderer)
# orders the match orderer list so that the items are ordered from
# the beginning to the latest as their are meant to be sorted
match_orderer_l.sort(reverse = True)
# creates the temporary literal match orderer list
literal_orderer_l = []
# creates the initial previous end
previous_end = 0
# iterates over all the matches in the match orderer list
# to be able to create the complete set of literal parts
# of the template with pure contents
for match_orderer in match_orderer_l:
# retrieves the match orderer match start position
# as the "original" match start value
match_start_o = match_orderer.match.start()
# in case the current match orderer value start is not the same
# as the previous end plus one, this means that there's a literal
# value in between both matches and so that literal value must be
# added to the current match orderer container
if not match_start_o == previous_end:
# calculates the both the start and the end of the literal value
# in between and then retrieves the same value from the current
# file buffer/contents so that a orderer value may be created
match_start = previous_end
match_end = match_start_o
match_value = file_contents[match_start:match_end]
# creates the literal match object with the match start and
# and end values and then uses it to create the orderer
literal_match = LiteralMatch(match_start, match_end)
match_orderer_lit = MatchOrderer(literal_match, LITERAL_VALUE, match_value)
# appends the match orderer object to the list of literal match
# orderer list, this list will later be fused with the "normal"
# match orderer list (as expected)
literal_orderer_l.append(match_orderer_lit)
# updates the previous end value with the end of the current
# literal value, this is considered to be the iteration housekeeping
previous_end = match_orderer.match.end()
# in case there is still a final literal to be processed, it
# must be processed as a special case with special requirements
if not previous_end == len(file_contents):
# calculates the literal match start as the previous end
# value and the end as the final index of the file contents
# data and then retrieves the value as that chunk
match_start = previous_end
match_end = len(file_contents)
match_value = file_contents[match_start:match_end]
# creates the literal match object with the match start and
# and end values and then uses it to create the orderer
literal_match = LiteralMatch(match_start, match_end)
match_orderer = MatchOrderer(literal_match, LITERAL_VALUE, match_value)
# appends the match orderer object to the list of literal match
# orderer list, this list will later be fused with the "normal"
# match orderer list (as expected)
literal_orderer_l.append(match_orderer)
# adds the elements of the literal math orderer list
# to the match orderer list and then re-sorts the
# match ordered list one more time in the reverse order
match_orderer_l += literal_orderer_l
match_orderer_l.sort(reverse = True)
# creates the root node and starts the stack of tree nodes
# with the root node inserted in it, the stack will be used
# for the proper handling of start and end values
root_node = ast.RootNode()
stack = [root_node]
# iterates over all the matches in the match orderer list
# to create the complete abstract syntax tree representing
# the template that has just been parsed, this same tree
# may be latter percolated for the generation process
for match_orderer in match_orderer_l:
# retrieves the match orderer type for the
# current iteration, this value will condition
# the way the nodes are going to be created
mtype = match_orderer.get_type()
if mtype == OUTPUT_VALUE:
value = match_orderer.get_value()
node = ast.OutputNode(value, xml_escape = xml_escape)
parent_node = stack[-1]
parent_node.add_child(node)
elif mtype == EVAL_VALUE:
value = match_orderer.get_value()
node = ast.EvalNode(value)
parent_node = stack[-1]
is_end = node.is_end()
is_open = node.is_open()
if is_end:
node.assert_end(parent_node.type)
stack.pop()
else:
parent_node.add_child(node)
if is_open: stack.append(node)
elif mtype == START_VALUE:
node = ast.CompositeNode(
[match_orderer],
regex = ATTRIBUTE_REGEX,
literal_regex = ATTRIBUTE_LITERAL_REGEX
)
parent_node = stack[-1]
parent_node.add_child(node)
stack.append(node)
elif mtype == END_VALUE:
node = stack.pop()
node.value.append(match_orderer)
elif mtype == SINGLE_VALUE:
node = ast.SingleNode(
match_orderer,
regex = ATTRIBUTE_REGEX,
literal_regex = ATTRIBUTE_LITERAL_REGEX
)
parent_node = stack[-1]
parent_node.add_child(node)
elif mtype == LITERAL_VALUE:
node = ast.LiteralNode(match_orderer)
parent_node = stack[-1]
parent_node.add_child(node)
# creates the template file structure that is going to be
# used to represent the template in a abstract way this is
# going to be the interface structure with the end user
template_file = TemplateFile(
manager = self,
base_path = base_path,
file_path = file_path,
encoding = encoding,
root_node = root_node
)
# attaches the currently given process methods and locale
# bundles to the template file so that they may be used
# latter for the processing of the file
template_file.attach_process_methods(process_methods_list)
template_file.attach_locale_bundles(locale_bundles)
# loads the system variable in the template file, this
# will allow access to the global system status from inside
# the template file (for diagnosis and debugging)
template_file.load_system_variable()
# loads the complete set of based functions that should be
# made accessible to the template for be able to perform
# common operations like conversion and localization
template_file.load_functions()
# returns the final template file template file to the caller
# method so that it may be used for rendering
return template_file
def _extension(self, file_path):
_head, tail = os.path.split(file_path)
tail_s = tail.split(".", 1)
if len(tail_s) > 1: return "." + tail_s[1]
return None
def _extension_in(self, extension, sequence):
for item in sequence:
valid = extension.endswith(item)
if not valid: continue
return True
return False
class MatchOrderer(object):
"""
The match orderer class, that is used to re-sort
the various matched of the template engine in
the proper order.
"""
match = None
""" The match object to be ordered, this value
should be the internal regex library value for
the match operation """
type = None
""" The type of the match object to be ordered,
this value should reflect the kind of match that
has been accomplished for the value """
value = None
""" The value of the match object to be ordered
this should be a literal string value of it """
def __init__(self, match, type, value):
self.match = match
self.type = type
self.value = value
def __cmp__(self, other):
return other.match.start() - self.match.start()
def __lt__(self, other):
return self.match.start() > other.match.start()
def get_type(self):
return self.type
def set_type(self, type):
self.type = type
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
class LiteralMatch(object):
start_index = None
""" The start index value, this should be an offset
position inside the current document's string data value """
end_index = None
""" The end index value, that should close the
current literal value starting in the start index """
def __init__(self, start_index = None, end_index = None):
self.start_index = start_index
self.end_index = end_index
def start(self):
return self.start_index
def end(self):
return self.end_index
class TemplateFile(object):
"""
The template file class, this is the most abstract
representation of the template | |
described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* If binning is active, value is in binned pixels.
* Defaults to :py:attr:`CameraYSize` with :py:attr:`StartY` = 0
(full frame) on initial camera startup.
Attention:
* No error check is performed for incompatibilty with :py:attr:`BinY`,
and :py:attr:`StartY`, If these values are incompatible, you will
receive an **InvalidValueException** from a subsequent call to
:py:meth:`StartExposure()`.
"""
return self._get("numy")
@NumY.setter
def NumY(self, NumY: int):
self._put("numy", NumY=NumY)
@property
def Offset(self) -> int:
"""(Read/Write) Gets or sets the current offset value or index (**see Notes**)
Raises:
InvalidValueException: If the supplied value is not valid
NotImplementedException: If neither **offsets index** mode nor **offsets value**
mode are supported.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
The Offset property is used to adjust the offset setting of the camera and has
two modes of operation:
* **Offsets-Index:** The Offset property is the selected offset's index within
the :py:attr:`Offsets` array of textual offset descriptions.
* In this mode the Offsets method returns a *0-based* array of strings, which
describe available offset settings.
* :py:attr:`OffsetMin` and :py:attr:`OffsetMax` will throw a **NotImplementedException**.
* **Offsets-Value:** The Offset property is a direct numeric representation
of the camera's offset.
* In this mode the :py:attr:`OffsetMin` and :py:attr:`OffsetMax` properties must
return integers specifying the valid range for Offset.
* The :py:attr:`Offsets` array property will throw a **NotImplementedException**.
A driver can support none, one or both offset modes depending on the camera's capabilities.
However, only one mode can be active at any one moment because both modes share the
Offset property to return the offset value. Your application can determine
which mode is operational by reading the :py:attr:`OffsetMin`, :py:attr:`OffsetMax`
property and this Offset property. If a property can be read then its associated mode
is active, if it throws a **NotImplementedException** then the mode is not active.
Important:
The :py:attr:`ReadoutMode` may in some cases affect the offset of the camera; if so,
the driver must ensure that the two properties do not conflict if both are used.
"""
return self._get("offset")
@Offset.setter
def Offset(self, Offset: int):
self._put("offset", Offset=Offset)
@property
def OffsetMax(self) -> int:
"""Maximum offset value that this camera supports (see notes and :py:attr:`Offset`)
Raises:
NotImplementedException: If the :py:attr:`Offset` property is not
implemented or is operating in **offsets-index** mode.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
When :py:attr:`Offset` is operating in **offsets-value** mode:
* OffsetMax must return the camera's highest valid :py:attr:`Offset` setting
* The :py:attr:`Offsets` property will throw **NotImplementedException**
OffsetMax and :py:attr:`OffsetMin` act together and that either both will
return values, or both will throw **NotImplementedException**.
* It is recommended that this property be retrieved only after a connection is
established with the camera hardware, to ensure that the driver is
aware of the capabilities of the specific camera model.
"""
return self._get("offsetmax")
@property
def OffsetMin(self) -> int:
"""Minimum offset value that this camera supports (see notes and :py:attr:`Offset`)
Raises:
NotImplementedException: If the :py:attr:`Offset` property is not
implemented or is operating in **offsets-index** mode.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
When :py:attr:`Offset` is operating in **offsets-value** mode:
* OffsetMin must return the camera's highest valid :py:attr:`Offset` setting
* The :py:attr:`Offsets` property will throw **NotImplementedException**
OffsetMin and :py:attr:`OffsetMax` act together and that either both will
return values, or both will throw **NotImplementedException**.
* It is recommended that this property be retrieved only after a connection is
established with the camera hardware, to ensure that the driver is
aware of the capabilities of the specific camera model.
"""
return self._get("offsetmin")
@property
def Offsets(self) -> List[str]:
"""List of Offset *names* supported by the camera (see notes and :py:attr:`Offset`)
Raises:
NotImplementedException: If the :py:attr:`Offset` property is not
implemented or is operating in **offsets-value** mode.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
When :py:attr:`Offset` is operating in the **offsets-index** mode:
* The Offsets property returns a list of available offset setting *names*.
* The :py:attr:`OffsetMax` and :py:attr:`OffsetMin` properties will throw
**NotImplementedException**.
The returned offset names could, for example, be a list of ISO settings
for a DSLR camera or a list of offset names for a CMOS camera. Typically
the application software will display the returned offset names in a
drop list, from which the astronomer can select the required value.
The application can then configure the required offset by setting the
camera's Offset property to the *array index* of the selected description.
* It is recommended that this property be retrieved only after a connection is
established with the camera hardware, to ensure that the driver is
aware of the capabilities of the specific camera model.
"""
return self._get("offsets")
@property
def PercentCompleted(self) -> int:
"""The percentage completeness of this operation
Raises:
InvalidOperationException: When it is inappropriate to ask for a
completion percentage.
NotImplementedException: If this optional property is not implemented.
NotConnectedException: If the device is not connected.
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions. See Attention
below. The device did not *successfully* complete the request.
Notes:
* If valid, returns an integer between 0 and 100, where 0 indicates 0% progress
(function just started) and 100 indicates 100% progress (i.e. completion).
* At the discretion of the device, PercentCompleted may optionally be valid
when :py:attr:`CameraState` is in any or all of the following states:
* cameraExposing
* cameraWaiting
* cameraReading
* cameraDownload
In all other states an **InvalidOperationException** will be raised.
Attention:
* If the camera encounters a problem which prevents or prevented it from
*successfully* completing the operation, the driver will raise an
exception when you attempt to read PercentComplete.
"""
return self._get("percentcompleted")
@property
def PixelSizeX(self) -> float:
"""The width (microns) of the camera sensor elements.
Raises:
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* It is recommended that this property be retrieved only after a connection is
established with the camera hardware, to ensure that the driver is
aware of the capabilities of the specific camera model.
"""
return self._get("pixelsizex")
@property
def PixelSizeY(self) -> float:
"""The height (microns) of the camera sensor elements.
Raises:
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* It is recommended that this property be retrieved only after a connection is
established with the camera hardware, to ensure that the driver is
aware of the capabilities of the specific camera model.
"""
return self._get("pixelsizey")
@property
def ReadoutMode(self) -> int:
"""(Read/Write) Gets or sets the current camera readout mode (**see Notes**)
Raises:
InvalidValueException: If the supplied value is not valid (index out of range)
NotImplementedException: If :py:attr:`CanFastReadout` is True.
NotConnectedException: If the device is not | |
from link import Wrapper
from link.utils import list_to_dataframe
from contextlib import closing
import defaults
MYSQL_CONNECTION_ERRORS = (2006, 2013)
class DBCursorWrapper(Wrapper):
"""
Wraps a select and makes it easier to tranform the data
"""
def __init__(self, cursor, query = None, wrap_name = None, args=None):
self.cursor = cursor
self._data = None
self._columns = None
self.query = query
self.args = args or ()
super(DBCursorWrapper, self).__init__(wrap_name, cursor)
@property
def columns(self):
if not self._columns:
self._columns = [x[0].lower() for x in self.cursor.description]
return self._columns
@property
def data(self):
if not self._data:
with closing(self.cursor) as cursor:
self._data = cursor.fetchall()
# since we want to close cursor after we pull the data...
self._columns = [x[0].lower() for x in self.cursor.description]
return self._data
def as_dataframe(self):
try:
from pandas import DataFrame
except:
raise Exception("pandas required to select dataframe. Please install" +
"sudo easy_install pandas")
columns = self.columns
#check to see if they have duplicate column names
if len(columns)>len(set(columns)):
raise Exception("Cannot have duplicate column names "
"in your query %s, please rename" % columns)
return list_to_dataframe(self.data, columns)
def _create_dict(self, row):
return dict(zip(self.columns, row))
def as_dict(self):
return map(self._create_dict,self.data)
def __iter__(self):
return self.data.__iter__()
def __call__(self, query = None, args=()):
"""
Creates a cursor and executes the query for you
"""
args = args or self.args
query = query or self.query
#sqlite db does not take in args...so i have to do this
#TODO: Create custom dbcursor wrappers for different database types
if args:
self.cursor.execute(query, args=args)
else:
self.cursor.execute(query)
return self
class DBConnectionWrapper(Wrapper):
"""
wraps a database connection and extends the functionality
to do tasks like put queries into dataframes
"""
CURSOR_WRAPPER = DBCursorWrapper
def __init__(self, wrap_name = None, chunked=False, **kwargs):
if kwargs:
self.__dict__.update(kwargs)
#get the connection and pass it to wrapper os the wrapped object
self.chunked = chunked
self._chunks = None
connection = self.create_connection()
super(DBConnectionWrapper, self).__init__(wrap_name, connection)
@property
def chunks(self):
return self._chunks
def chunk(self, chunk_name):
"""
this is the default lookup of one of the database chunks
"""
if self.chunks == None:
raise Exception('This is not a chunked connection ')
return self.chunks.get(chunk_name)
def execute(self, query, args = ()):
"""
Creates a cursor and executes the query for you
"""
cursor = self._wrapped.cursor()
return self.CURSOR_WRAPPER(cursor, query, args=args)()
#go native with the dataframe selection from sql
#todo: add support for args [ use cursor.mogrify for psql world ]
#for mysql,sqlite3 what to do...
def select_dataframe_native(self, query):
"""
Select everything into a datafrome
pronto
"""
try:
from pandas import DataFrame
import pandas.io.sql as psql
except:
raise Exception("pandas required to select dataframe. Please install" +
"sudo easy_install pandas")
df = psql.frame_query(query, self._wrapped)
return df
#TODO: Add in the ability to pass in params and also index
def select_dataframe(self, query, args=()):
"""
Select everything into a datafrome with the column names
being the names of the colums in the dataframe
"""
try:
from pandas import DataFrame
import pandas.io.sql as psql
except:
raise Exception("pandas required to select dataframe. Please install" +
"sudo easy_install pandas")
cursor = self.execute(query, args = args)
return cursor.as_dataframe()
def select(self, query=None, chunk_name = None, args=()):
"""
Run a select and just return everything. If you have pandas installed it
is better to use select_dataframe if you want to do data manipulation
on the results
"""
cursor = None
if chunk_name:
#look up the db chunk that you want to read from
cursor = self.chunk(chunk_name).cursor()
else:
cursor = self._wrapped.cursor()
if not cursor:
raise Exception("no cursor found")
return self.CURSOR_WRAPPER(cursor, query, args=args)()
def create_connection(self):
"""
Override this function to create a depending on the type
of database
:returns: connection to the database you want to use
"""
pass
def use(self, database):
"""
Switch to using a specific database
"""
pass
def databases(self):
"""
Returns the databases that are available
"""
pass
def tables(self):
"""
Returns the tables that are available
"""
pass
def now(self, offset=None):
"""
Returns the time now according to the database. You can also pass in an
offset so that you can add or subtract hours from the current
"""
try:
return self.select('select now()').data[0][0]
except:
raise Exception("the default select now() does not work on this database"
+ " override this function if you would like this "
+ "feature for your database ")
@property
def command(self):
"""
Here is the command for doing the mysql command
"""
raise NotImplementedError('no shell command for using this database')
class SqliteDB(DBConnectionWrapper):
"""
A connection wrapper for a sqlite database
"""
def __init__(self, wrap_name=None, path=None, chunked = False,
create_db = True):
"""
A connection for a SqlLiteDb. Requires that sqlite3 is
installed into python
:param path: Path to the sqllite db.
:param create_db: if True Create if it does not exist in the
file system. Otherwise throw an error
:param chunked: True if this in a path to a chunked sqlitedb
"""
self.create_db = create_db
if not path:
raise Exception("Path Required to create a sqllite connection")
super(SqliteDBConnectionWrapper, self).__init__(wrap_name=wrap_name,
path=path, chunked = chunked)
def create_connection(self):
"""
Override the create_connection from the DbConnectionWrapper
class which get's called in it's initializer
"""
# if we are chunking and this is not a db then don't try to make a
# connection
if self.chunked and not self.path.endswith('.db'):
return None
return self._connection_from_path(self.path)
def _connection_from_path(self, path):
import sqlite3
db = sqlite3.connect(path)
return db
@property
def chunks(self):
"""
For sqlite we are chunking by making many files that are of smaller size
This makes it easy to distribute out certain parts of it. Directory
structure looks like this::
test_db.db --> sqlitedb
test_db/
my_chunk.db --> another small chunk
"""
if self._chunks:
return self._chunks
if self.chunked:
self._chunks = self._get_chunks()
return self._chunks
raise Exception("This database is not chunked")
def chunk(self, chunk_name):
"""
Get a chunk and if its not connected yet then connect it
"""
chunk = self.chunks.get(chunk_name)
if chunk:
#if its a string then create the connection and put it in _chunks
if isinstance(chunk,str) or isinstance(chunk,unicode):
chunk = self._connection_from_path(chunk)
self._chunks[chunk_name] = chunk
return chunk
raise Exception("there is no chunk")
def _get_chunks(self):
"""
creates connections for each chunk in the set of them
"""
import os
dir = self.path
#rstrip will remove too much if you you path is /path/test_db.db
if dir.endswith('.db'):
dir = dir[:-3]
dir = dir.rstrip('/')
dbs = os.listdir(dir)
return dict([
(name, '%s/%s' % (dir, name))
for name in dbs
]
)
def __call__(self):
"""
Run's the command line sqlite application
"""
self.run_command('sqlite3 %s' % self.path)
def execute(self, query):
"""
Creates a cursor and executes the query for you
"""
cursor = self._wrapped.cursor()
return DBCursorWrapper(cursor, query)()
SqliteDBConnectionWrapper = SqliteDB
class NetezzaDB(DBConnectionWrapper):
def __init__(self, wrap_name=None, user=None, password=<PASSWORD>,
host=None, database=None):
self.user = user
self.password = password
self.host = host
self.database = database
super(NetezzaDB, self).__init__(wrap_name=wrap_name)
def create_connection(self):
"""
Override the create_connection from the Netezza
class which get's called in it's initializer
"""
import pyodbc
connection_str="DRIVER={%s};SERVER=%s;DATABASE=%s;UID=%s;PWD=%s" % (
"NetezzaSQL",self.host, self.database, self.user, self.password)
#connect to a netezza database, you need ansi=True or it's all garbled
return pyodbc.connect(connection_str, ansi=True)
class VerticaDB(DBConnectionWrapper):
def __init__(self, wrap_name=None, user=None, password=<PASSWORD>,
host=None, database=None):
self.user = user
self.password = password
self.host = host
self.database = database
super(VerticaDB, self).__init__(wrap_name=wrap_name)
def create_connection(self):
"""
Override the create_connection from the VerticaDB
class which get's called in it's initializer
"""
import pyodbc
connection_str=(
"DRIVER={%s};SERVER=%s;DATABASE=%s;UID=%s;PWD=%s"
%
("VerticaSQL",self.host, self.database, self.user, self.password)
)
#connect to a netezza database, you need ansi=True or it's all garbled
return pyodbc.connect(connection_str, ansi=True)
class MysqlDB(DBConnectionWrapper):
def __init__(self, wrap_name=None, user=None, password=<PASSWORD>,
host=None, database=None, port=defaults.MYSQL_DEFAULT_PORT,
autocommit=True):
"""
A connection for a Mysql Database. Requires that
MySQLdb is installed
:param user: your user name for that database
:param password: <PASSWORD> the database
:param host: host name or ip of the database server
:param database: name of the database on that server
"""
self.user = user
self.password = password
self.host = host
self.database = database
self.port=port
self.autocommit = autocommit
super(MysqlDB, self).__init__(wrap_name=wrap_name)
def execute(self, query, args = ()):
"""
Creates a cursor and executes the query for you
"""
import MySQLdb
try:
cursor = self._wrapped.cursor()
return self.CURSOR_WRAPPER(cursor, query, args=args)()
except MySQLdb.OperationalError, e:
if e[0] in MYSQL_CONNECTION_ERRORS:
self._wrapped.close()
self._wrapped | |
if MQTT_ENABLED == 'true' else 'disabled'}.")
elif len(args) > 1 and args[0] in ['publish', 'unpublish']:
self._mqtt_prop_cmds(args[0], args[1])
else:
print(f"ERROR: Unsupported argument: {args[0]}")
def _mqtt_prop_cmds(self, cmd, prop_name):
global MQTT_PROPERTIES
global mqtt_client
global wp
global wpdef
if prop_name not in wpdef["properties"]:
print(f"ERROR: Undefined property '{prop_name}'!")
elif cmd == "publish" and prop_name not in MQTT_PROPERTIES:
MQTT_PROPERTIES.append(prop_name)
elif cmd == "unpublish" and prop_name in MQTT_PROPERTIES:
MQTT_PROPERTIES.remove(prop_name)
def complete_mqtt(self, text, line, begidx, endidx):
token = line.split(' ')
if len(token) == 2:
return self._complete_list(['properties', 'publish', 'start', 'status', 'stop', 'unpublish'], text)
elif len(token) == 3 and token[1] == 'publish':
return self._complete_list([p for p in self._complete_propname(text, available_only=True) if p not in MQTT_PROPERTIES], text)
elif len(token) == 3 and token[1] == 'unpublish':
return self._complete_list(MQTT_PROPERTIES, text)
return []
def do_properties(self, arg: str) -> bool | None:
"""List property definitions and values
Usage: properties [propRegex]"""
global wpdef
if not self._ensure_connected():
return
props = self._get_props_matching_regex(arg, available_only=False)
if not props:
print(f"No matching properties found!")
return
print(f"Properties:")
for prop_name, value in sorted(props.items()):
self._print_prop_info(wpdef["properties"][prop_name], value)
print()
def complete_properties(self, text, line, begidx, endidx):
return self._complete_propname(text, rw=False, available_only=False) + ['<propRegex>']
def do_rawvalues(self, arg: str) -> bool | None:
"""List raw values of properties (without value mapping)
Usage: rawvalues [propRegex] [valueRegex]"""
global wp
if not self._ensure_connected():
return
print(f"List raw values of properties (without value mapping):")
props = self._get_props_matching_regex(arg)
for pd, value in sorted(props.items()):
print(f"- {pd}: {utils_value2json(value)}")
print()
def complete_rawvalues(self, text, line, begidx, endidx):
return self._complete_values(text, line)
def do_server(self, arg: str) -> bool | None:
"""Start in server mode (infinite wait loop)
Usage: server"""
if not self._ensure_connected():
return
_LOGGER.info("Server started.")
try:
Event().wait()
except KeyboardInterrupt:
_LOGGER.info("Server shutting down.")
return True
def do_set(self, arg: str) -> bool | None:
"""Set a property value
Usage: set <propName> <value>"""
global wp
global wpdef
args = arg.split(' ')
if not self._ensure_connected():
return
if len(args) < 2 or arg == '':
print(f"ERROR: Wrong number of arguments!")
elif args[0] not in wp.allProps:
print(f"ERROR: Unknown property: {args[0]}")
else:
if args[1].lower() in ["false", "true"]:
v = json.loads(args[1].lower())
elif str(args[1]).isnumeric():
v = int(args[1])
elif str(args[1]).isdecimal():
v = float(args[1])
else:
v = str(args[1])
wp.send_update(args[0], mqtt_get_decoded_property(
wpdef["properties"][args[0]], v))
def complete_set(self, text, line, begidx, endidx):
global wpdef
token = line.split(' ')
if len(token) == 2:
return self._complete_propname(text, rw=True, available_only=True)
elif len(token) == 3 and token[1] in wpdef["properties"]:
pd = wpdef["properties"][token[1]]
if "jsonType" in pd and pd["jsonType"] == 'boolean':
return [v for v in ['false', 'true'] if v.startswith(text)]
elif "valueMap" in pd:
return [v for v in pd["valueMap"].values() if v.startswith(text)]
elif "jsonType" in pd:
return [f"<{pd['jsonType']}>"]
return []
def do_unwatch(self, arg: str) -> bool | None:
"""Unwatch a message or property
Usage: unwatch <message|property> <msgType|propName>"""
global wp
args = arg.split(' ')
if not self._ensure_connected():
return
if len(args) < 2 or arg == '':
print(f"ERROR: Wrong number of arguments!")
elif args[0] == 'message' and args[1] not in self.watching_messages:
print(f"ERROR: Message of type '{args[1]}' is not watched")
elif args[0] == 'message':
self.watching_messages.remove(args[1])
if len(self.watching_messages) == 0:
wp.unregister_message_callback()
elif args[0] == 'property' and args[1] not in self.watching_properties:
print(f"ERROR: Property with name '{args[1]}' is not watched")
elif args[0] == 'property':
self.watching_properties.remove(args[1])
if len(self.watching_properties) == 0:
wp.unregister_property_callback()
else:
print(f"ERROR: Unknown watch type: {args[0]}")
def complete_unwatch(self, text, line, begidx, endidx):
token = line.split(' ')
if len(token) == 2:
return self._complete_list(['message', 'property'], text)
elif len(token) == 3 and token[1] == 'message':
return self._complete_list(self.watching_messages, text)
elif len(token) == 3 and token[1] == 'property':
return self._complete_list(self.watching_properties, text)
return []
def do_values(self, arg: str) -> bool | None:
"""List values of properties (with value mapping enabled)
Usage: values [propRegex] [valueRegex]"""
global wp
global wpdef
if not self._ensure_connected():
return
print(f"List values of properties (with value mapping):")
props = self._get_props_matching_regex(arg)
for pd, value in sorted(props.items()):
print(
f"- {pd}: {mqtt_get_encoded_property(wpdef['properties'][pd],value)}")
print()
def complete_values(self, text, line, begidx, endidx):
return self._complete_values(text, line)
def do_watch(self, arg: str) -> bool | None:
"""Watch message or a property
Usage: watch <message|property> <msgType|propName>"""
global wp
global wpdef
args = arg.split(' ')
if not self._ensure_connected():
return
if len(args) < 2 or arg == '':
print(f"ERROR: Wrong number of arguments!")
elif args[0] == 'message' and args[1] not in wpdef['messages']:
print(f"ERROR: Unknown message type: {args[1]}")
elif args[0] == 'message':
msg_type = args[1]
if len(self.watching_messages) == 0:
wp.register_message_callback(self._watched_message_received)
if msg_type not in self.watching_messages:
self.watching_messages.append(msg_type)
elif args[0] == 'property' and args[1] not in wp.allProps:
print(f"ERROR: Unknown property: {args[1]}")
elif args[0] == 'property':
prop_name = args[1]
if len(self.watching_properties) == 0:
wp.register_property_callback(self._watched_property_changed)
if prop_name not in self.watching_properties:
self.watching_properties.append(prop_name)
else:
print(f"ERROR: Unknown watch type: {args[0]}")
def complete_watch(self, text, line, begidx, endidx):
global wpdef
token = line.split(' ')
if len(token) == 2:
return self._complete_list(['message', 'property'], text)
elif len(token) == 3 and token[1] == 'message':
return self._complete_message(text, 'server')
elif len(token) == 3 and token[1] == 'property':
return self._complete_propname(text, rw=False, available_only=True) + ['<propRegex>']
return []
def _print_prop_info(self, pd, value):
global wp
_LOGGER.debug(f"Property definition: {pd}")
title = ""
desc = ""
alias = ""
rw = ""
if 'alias' in pd:
alias = f", alias:{pd['alias']}"
if 'rw' in pd:
rw = f", rw:{pd['rw']}"
if 'title' in pd:
title = pd['title']
if 'description' in pd:
desc = pd['description']
print(f"- {pd['key']} ({pd['jsonType']}{alias}{rw}): {title}")
if desc:
print(f" Description: {desc}")
if pd['key'] in wp.allProps.keys():
print(
f" Value: {mqtt_get_encoded_property(pd,value)}{' (raw:' + utils_value2json(value) + ')' if 'valueMap' in pd else ''}")
else:
print(
f" NOTE: This property is currently not provided by the connected device!")
def _watched_property_changed(self, name, value):
global wpdef
if name in self.watching_properties:
pd = wpdef["properties"][name]
_LOGGER.info(
f"Property {name} changed to {mqtt_get_encoded_property(pd,value)}")
def _watched_message_received(self, wp, wsapp, msg, msg_json):
if msg.type in self.watching_messages:
_LOGGER.info(f"Message of type {msg.type} received: {msg}")
def _ensure_connected(self):
global wp
if not wp:
print('Not connected to wattpilot!')
return False
return True
def _get_props_matching_regex(self, arg, available_only=True):
global wp
global wpdef
args = arg.split(' ')
prop_regex = '.*'
if len(args) > 0 and args[0] != '':
prop_regex = args[0]
props = {k: v for k, v in wp_get_all_props(available_only).items() if re.match(
r'^'+prop_regex+'$', k, flags=re.IGNORECASE)}
value_regex = '.*'
if len(args) > 1:
value_regex = args[1]
props = {k: v for k, v in props.items() if re.match(r'^'+value_regex+'$',
str(mqtt_get_encoded_property(wpdef["properties"][k], v)), flags=re.IGNORECASE)}
return props
#### MQTT Functions ####
def mqtt_get_mapped_value(pd, value):
mapped_value = value
if value == None:
mapped_value = None
elif "valueMap" in pd:
if str(value) in list(pd["valueMap"].keys()):
mapped_value = pd["valueMap"][str(value)]
else:
_LOGGER.warning(
f"Unable to map value '{value}' of property '{pd['key']} - using unmapped value!")
return mapped_value
def mqtt_get_mapped_property(pd, value):
if value and "jsonType" in pd and pd["jsonType"] == "array":
mapped_value = []
for v in value:
mapped_value.append(mqtt_get_mapped_value(pd, v))
else:
mapped_value = mqtt_get_mapped_value(pd, value)
return mapped_value
def mqtt_get_remapped_value(pd, mapped_value):
remapped_value = mapped_value
if "valueMap" in pd:
if mapped_value in pd["valueMap"].values():
remapped_value = json.loads(str(list(pd["valueMap"].keys())[
list(pd["valueMap"].values()).index(mapped_value)]))
else:
_LOGGER.warning(
f"Unable to remap value '{mapped_value}' of property '{pd['key']} - using mapped value!")
return remapped_value
def mqtt_get_remapped_property(pd, mapped_value):
if "jsonType" in pd and pd["jsonType"] == "array":
remapped_value = []
for v in mapped_value:
remapped_value.append(mqtt_get_remapped_value(pd, v))
else:
remapped_value = mqtt_get_remapped_value(pd, mapped_value)
return remapped_value
def mqtt_get_encoded_property(pd, value):
mapped_value = mqtt_get_mapped_property(pd, value)
if value == None or "jsonType" in pd and (
pd["jsonType"] == "array"
or pd["jsonType"] == "object"
or pd["jsonType"] == "boolean"):
return json.dumps(mapped_value, cls=JSONNamespaceEncoder)
else:
return mapped_value
def mqtt_get_decoded_property(pd, value):
if "jsonType" in pd and (pd["jsonType"] == "array" or pd["jsonType"] == "object"):
decoded_value = json.loads(value)
else:
decoded_value = value
return mqtt_get_remapped_property(pd, decoded_value)
def mqtt_publish_property(wp, mqtt_client, pd, value, force_publish=False):
prop_name = pd["key"]
if not (force_publish or MQTT_PROPERTIES == [''] or prop_name in MQTT_PROPERTIES):
_LOGGER.debug(f"Skipping publishing of property '{prop_name}' ...")
return
property_topic = mqtt_subst_topic(MQTT_TOPIC_PROPERTY_STATE, {
"baseTopic": MQTT_TOPIC_BASE,
"serialNumber": wp.serial,
"propName": prop_name,
})
encoded_value = mqtt_get_encoded_property(pd, value)
_LOGGER.debug(
f"Publishing property '{prop_name}' with value '{encoded_value}' to MQTT ...")
mqtt_client.publish(property_topic, encoded_value, retain=True)
if WATTPILOT_SPLIT_PROPERTIES and "childProps" in pd:
_LOGGER.debug(
f"Splitting child props of property {prop_name} as {pd['jsonType']} for value {value} ...")
for cpd in pd["childProps"]:
_LOGGER.debug(f"Extracting child property {cpd['key']}, ...")
split_value = wp_get_child_prop_value(cpd['key'])
_LOGGER.debug(
f"Publishing sub-property {cpd['key']} with value {split_value} to MQTT ...")
mqtt_publish_property(wp, mqtt_client, cpd, split_value, True)
def mqtt_publish_message(wp, wsapp, msg, msg_json):
global mqtt_client
global MQTT_PUBLISH_MESSAGES
global MQTT_TOPIC_BASE
global MQTT_PUBLISH_PROPERTIES
global MQTT_TOPIC_MESSAGES
global wpdef
if mqtt_client == None:
_LOGGER.debug(f"Skipping MQTT message publishing.")
return
msg_dict = json.loads(msg_json)
if MQTT_PUBLISH_MESSAGES | |
{'type': 'object',
'properties': {'input': {'type': 'string'},
'output': {'type': 'string'}},
'additionalProperties': False}],
'default': False},
'client_of': {'type': 'array', 'items': {'type': 'string'},
'default': []},
'timesync': {
'anyOf': [
{'type': 'boolean'}, {'type': 'string'},
{'type': 'object',
'required': ['name'],
'properties': {
'name': {'type': 'string', 'default': 'timesync'},
'inputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]},
'outputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]}}},
{'type': 'array',
'items': {
'anyOf': [
{'type': 'string'},
{'type': 'object',
'required': ['name'],
'properties': {
'name': {'type': 'string',
'default': 'timesync'},
'inputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]},
'outputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]}}}]}}],
'default': False},
'with_strace': {'type': 'boolean', 'default': False},
'strace_flags': {'type': 'array',
'default': ['-e', 'trace=memory'],
'items': {'type': 'string'}},
'with_valgrind': {'type': 'boolean', 'default': False},
'valgrind_flags': {'type': 'array',
'default': ['--leak-check=full',
'--show-leak-kinds=all'], # '-v'
'items': {'type': 'string'}},
'outputs_in_inputs': {'type': 'boolean'},
'logging_level': {'type': 'string', 'default': ''},
'allow_threading': {'type': 'boolean'},
'copies': {'type': 'integer', 'default': 1, 'minimum': 1},
'repository_url': {'type': 'string'},
'repository_commit': {'type': 'string'},
'description': {'type': 'string'},
'contact_email': {'type': 'string'},
'validation_command': {'type': 'string'},
'dependencies': {
'type': 'array',
'items': {'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['package'],
'properties': {
'package': {'type': 'string'},
'package_manager': {'type': 'string'},
'arguments': {'type': 'string'}},
'additionalProperties': False}]}},
'additional_dependencies': {
'type': 'object',
'additionalProperties': {
'type': 'array',
'items': {'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['package'],
'properties': {
'package': {'type': 'string'},
'package_manager': {'type': 'string'},
'arguments': {'type': 'string'}},
'additionalProperties': False}]}}}}
_schema_excluded_from_class = ['name', 'language', 'args', 'working_dir']
_schema_excluded_from_class_validation = ['inputs', 'outputs']
language = None
language_ext = None
language_aliases = []
base_languages = []
executable_type = None
interface_library = None
interface_directories = []
interface_dependencies = []
supported_comms = []
supported_comm_options = {}
external_libraries = {}
internal_libraries = {}
type_map = None
inverse_type_map = None
function_param = None
version_flags = ['--version']
full_language = True
outputs_in_inputs = False
include_arg_count = False
include_channel_obj = False
is_typed = False
types_in_funcdef = True
interface_inside_exec = False
dont_declare_channel = False
is_dsl = False
brackets = None
zero_based = True
max_line_width = None
no_executable = False
comms_implicit = False
python_interface = {'table_input': 'YggAsciiTableInput',
'table_output': 'YggAsciiTableOutput',
'array_input': 'YggArrayInput',
'array_output': 'YggArrayOutput',
'pandas_input': 'YggPandasInput',
'pandas_output': 'YggPandasOutput'}
_library_cache = {}
_config_keys = []
_config_attr_map = []
_executable_search_dirs = None
_disconnect_attr = (Driver._disconnect_attr
+ ['queue', 'queue_thread',
'event_process_kill_called',
'event_process_kill_complete',
'model_process'])
_mpi_tags = {'ENV': 1,
'START': 2,
'STOP_RANK0': 3, # Stopped by partner
'STOP_RANKX': 4, # Stopped by root
'BUILDFILE': 5,
'LOCK_BUILDFILE': 6,
'UNLOCK_BUILDFILE': 7}
def __init__(self, name, args, model_index=0, copy_index=-1, clients=[],
preparsed_function=None, outputs_in_inputs=None,
mpi_rank=0, mpi_tag_start=None, **kwargs):
self._inv_mpi_tags = {v: k for k, v in self._mpi_tags.items()}
self.model_outputs_in_inputs = outputs_in_inputs
self.preparsed_function = preparsed_function
super(ModelDriver, self).__init__(name, **kwargs)
if self.overwrite is None:
self.overwrite = (not self.preserve_cache)
# Setup process things
self.model_process = None
self.queue = multitasking.Queue()
self.queue_thread = None
self.event_process_kill_called = multitasking.Event()
self.event_process_kill_complete = multitasking.Event()
# Strace/valgrind
if self.with_strace and self.with_valgrind:
raise RuntimeError("Trying to run with strace and valgrind.")
if (((self.with_strace or self.with_valgrind)
and platform._is_win)): # pragma: windows
raise RuntimeError("strace/valgrind options invalid on windows.")
self.model_index = model_index
self.copy_index = copy_index
self.clients = clients
self.env_copy = ['LANG', 'PATH', 'USER']
self._exit_line = b'EXIT'
for k in self.env_copy:
if k in os.environ:
self.env[k] = os.environ[k]
if not self.is_installed():
raise RuntimeError("%s is not installed" % self.language)
self.raw_model_file = None
self.model_function_file = None
self.model_function_info = None
self.model_function_inputs = None
self.model_function_outputs = None
self.model_file = None
self.model_args = []
self.model_dir = None
self.model_src = None
self.args = args
self.modified_files = []
self.wrapper_products = []
self._mpi_comm = False
self._mpi_rank = 0
self._mpi_size = 1
self._mpi_requests = {}
self._mpi_tag = (len(self._mpi_tags) * self.model_index)
if mpi_tag_start is not None:
self._mpi_tag += mpi_tag_start
if multitasking._on_mpi:
self._mpi_comm = multitasking.MPI.COMM_WORLD
self._mpi_rank = self._mpi_comm.Get_rank()
self._mpi_size = self._mpi_comm.Get_size()
self._mpi_partner_rank = mpi_rank
# Update for function
if self.function:
args = [self.init_from_function(args)]
# Parse arguments
self.debug(str(args))
self.parse_arguments(args)
assert(self.model_file is not None)
# Remove products
if self.overwrite:
self.remove_products()
# Write wrapper
if self.function:
self.wrapper_products.append(args[0])
self.wrapper_products += self.write_wrappers()
# Install dependencies
if self.dependencies:
self.install_model_dependencies(self.dependencies)
if self.additional_dependencies:
for language, v in self.additional_dependencies.items():
drv = import_component('model', language)
drv.install_model_dependencies(v)
@staticmethod
def before_registration(cls):
r"""Operations that should be performed to modify class attributes prior
to registration including things like platform dependent properties and
checking environment variables for default settings.
"""
Driver.before_registration(cls)
cls.inverse_type_map = None
cls._language = cls.language
cls._language_aliases = cls.language_aliases
if (((cls.language_ext is not None)
and (not isinstance(cls.language_ext, (list, tuple))))):
cls.language_ext = [cls.language_ext]
@staticmethod
def after_registration(cls, cfg=None, second_pass=False):
r"""Operations that should be performed to modify class attributes after
registration. For compiled languages this includes selecting the
default compiler. The order of precedence is the config file 'compiler'
option for the language, followed by the environment variable set by
_compiler_env, followed by the existing class attribute.
Args:
cfg (YggConfigParser, optional): Config class that should
be used to set options for the driver. Defaults to
None and yggdrasil.config.ygg_cfg is used.
second_pass (bool, optional): If True, the class as already
been registered. Defaults to False.
"""
if cfg is None:
from yggdrasil.config import ygg_cfg
cfg = ygg_cfg
cfg.reload()
Driver.after_registration(cls)
cls.cfg = cfg
for x in cls._config_attr_map:
ka = x['attr']
k0 = x.get('key', ka)
setattr(cls, ka, cls.cfg.get(cls.language, k0,
getattr(cls, ka)))
@staticmethod
def finalize_registration(cls):
r"""Operations that should be performed after a class has been fully
initialized and registered."""
global _map_language_ext
for x in cls.get_language_ext():
if x not in _map_language_ext:
_map_language_ext[x] = []
_map_language_ext[x].append(cls.language)
@classmethod
def mpi_partner_init(cls, self):
r"""Actions initializing an MPIPartnerModel."""
pass
@classmethod
def mpi_partner_cleanup(cls, self):
r"""Actions cleaning up an MPIPartnerModel."""
pass
@classmethod
def get_inverse_type_map(cls):
r"""Get the inverse type map.
Returns:
dict: Mapping from native type to JSON type.
"""
if cls.inverse_type_map is None:
cls.inverse_type_map = {}
for k, v in cls.type_map.items():
if k != 'flag':
cls.inverse_type_map[v] = k
return cls.inverse_type_map
@classmethod
def get_language_for_source(cls, fname, languages=None, early_exit=False,
**kwargs):
r"""Determine the language that can be used with the provided source
file(s). If more than one language applies to a set of multiple files,
the language that applies to the most files is returned.
Args:
fname (str, list): The full path to one or more files. If more than
one
languages (list, optional): The list of languages that are acceptable.
Defaults to None and any language will be acceptable.
early_exit (bool, optional): If True, the first language identified
will be returned if fname is a list of files. Defaults to False.
**kwargs: Additional keyword arguments are passed to recursive calls.
Returns:
str: The language that can operate on the specified file.
"""
if isinstance(fname, list):
lang_dict = {}
for f in fname:
try:
ilang = cls.get_language_for_source(f, languages=languages,
**kwargs)
if early_exit:
return ilang
except ValueError:
continue
lang_dict.setdefault(ilang, 0)
lang_dict[ilang] += 1
if lang_dict:
return max(lang_dict, key=lang_dict.get)
else:
ext = os.path.splitext(fname)[-1]
for ilang in cls.get_map_language_ext().get(ext, []):
if (languages is None) or (ilang in languages):
return ilang
raise ValueError("Cannot determine language for file(s): '%s'" % fname)
@classmethod
def get_map_language_ext(cls):
r"""Return the mapping of all language extensions."""
return _map_language_ext
@classmethod
def get_all_language_ext(cls):
r"""Return the list of all language extensions."""
return list(_map_language_ext.keys())
@classmethod
def get_language_dir(cls):
r"""Return the langauge directory."""
return languages.get_language_dir(cls.language)
@classmethod
def get_language_ext(cls):
r"""Return the language extension, including from the base classes."""
out = cls.language_ext
if out is None:
out = []
for x in cls.base_languages:
out += import_component('model', x).get_language_ext()
return out
def parse_arguments(self, args, default_model_dir=None):
r"""Sort model arguments to determine which one is the executable
and which ones are arguments.
Args:
args (list): List of arguments provided.
default_model_dir (str, optional): Path to directory that should be
used to normalize the model file path if it is not absolute.
Defaults to None and is set to the working_dir.
"""
if isinstance(args, (str, bytes)):
args = args.split()
for i in range(len(args)):
args[i] = str(args[i])
assert(isinstance(args, list))
if default_model_dir is None:
default_model_dir = self.working_dir
self.raw_model_file = args[0]
self.model_file = self.raw_model_file
self.model_args = args[1:]
if (self.language != 'executable') and (not os.path.isabs(self.model_file)):
model_file = os.path.normpath(os.path.join(default_model_dir,
self.model_file))
self.model_file = model_file
self.model_dir = os.path.dirname(self.model_file)
self.debug("model_file = '%s', model_dir = '%s', model_args = '%s'",
self.model_file, self.model_dir, self.model_args)
def init_from_function(self, args):
r"""Initialize model parameters based on the wrapped function."""
if not self.preparsed_function:
yml_mock = dict(self.yml,
name=self.name,
| |
#!/usr/bin/python
#Sorts based on top 50 CMetric, all callPaths - CMetric
#, all call paths - call path count and all samples
from __future__ import print_function
from bcc import BPF, PerfType, PerfSWConfig
from bcc import BPF
import sys
import ctypes as ct # For mapping the 'C' structure to Python
import argparse #For parsing command line arguments
import datetime
import os
import operator
import subprocess
import re
# arg validation
def positive_int(val):
try:
ival = int(val)
except ValueError:
raise argparse.ArgumentTypeError("must be an integer")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
def positive_nonzero_int(val):
ival = positive_int(val)
if ival == 0:
raise argparse.ArgumentTypeError("must be nonzero")
return ival
parser = argparse.ArgumentParser(description="Generates stack traces for critical code sections")
parser.add_argument("-x", metavar="<Path to executable>", dest = "targetPath", required = True, help = "Full path to the executable file to be profiled - Required")
parser.add_argument("-t", metavar="<Threshold>", dest = "threshold", type = positive_int, required = False, help = "Number active threads to trigger stack trace. Default = total no. of threads/2" )
parser.add_argument("-f", metavar="<Sampling Frequency>", dest = "sample_freq", type = positive_int, required = False, help = "Sampling frequency in Hz. Default = 333Hz (equivalent to 3 ms)" )
parser.add_argument("-d", metavar="<Stack Depth>", dest = "stack_depth", type = positive_int, required = False, help = "Maximum Stack depth for stack unwinding. Default = 10" )
parser.add_argument("-b", metavar="<Ring buffer Size>", dest = "buffer", type = positive_int, required = False, help = "Number of pages to be allocated for the ring buffer, Default = 64" )
parser.add_argument("--threads_only", help = "Trace threads alone", action = "store_true")
parser.add_argument("--process_only", help = "Trace processes alone", action = "store_true")
parser.add_argument("--trace_lib", help = "Include library paths in tracing", action = "store_true")
parser.add_argument("--kernel_stack", help = "Get kernel stack traces", action = "store_true")
args = parser.parse_args()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <uapi/linux/bpf_perf_event.h>
#include <linux/sched.h>
#include <linux/types.h>
//Structure to pass information from the kernel probe to the user probe
struct key_t {
u32 tid; //Thread ID
u32 tgid; // Parent thread ID
u64 cm; //CMetric
int source; // 0 - sampling, 1 - critical time slice, 2 - non-critical time slice
int user_stackid;
int kernel_stackid;
u64 inst_ptr;
int store_stackTop;
};
BPF_HASH(threadList, u32, u32); //Stores threadIds of participating threads - Global
BPF_HASH(threadCount, u32, u32, 1); //Stores number of active threads - Global
BPF_HASH(tsp, u32, u64, 1); //Stores timestamp of previous event
BPF_ARRAY(count, u32, 1); //Stores the total thread count (parent not included)
BPF_HASH(global_CM, u32, u64, 1); //Keeps track of cumulative sum of CMetric - Global
BPF_PERCPU_ARRAY(local_CM, u64, 1); // To store the snapshot of global_CM when a thread is switched in
BPF_HASH(CM_hash, u32, u64); // Criticality Metric hash map for each thread
BPF_HASH(GLOBAL_WT_TC, u32, u64,1); //Stores the cumulative sum of weighted thread Count - Global
BPF_PERCPU_ARRAY(LOCAL_WT_TC, u64,1); //Stores the snapshot of GLOBAL_WT_TC - CPU Local
BPF_PERCPU_ARRAY(inTS, u64, 1); //Store the time at which a thread was switched in - CPU Local
BPF_PERF_OUTPUT(events); //Buffer to write event details
BPF_STACK_TRACE(user_stacktraces, 4086);
BPF_STACK_TRACE(kernel_stacktraces, 4086);
/*sched_switch_args {
// from /sys/kernel/debug/tracing/events/sched/sched_switch/format
u64 __unused__;
char prev_comm[16];
pid_t prev_pid;
int prev_prio;
long prev_state;
char next_comm[16];
pid_t next_pid;
int next_prio;
};
*/
TRACEPOINT_PROBE(task, task_rename){
u32 threadId, totalCount;
char comm[16];
u32 zero32 = 0, one = 1;
int len = bpf_probe_read_str(&comm, sizeof(args->newcomm), args->newcomm);
if(!len)
return 0;
//Compare the command argument with traced command
if(PGM_FILTER){
bpf_probe_read(&threadId, sizeof(threadId), &args->pid);
threadList.insert(&threadId, &zero32); //Store the thread ID in the hash startTracing.lookup_or_init(&threadId, &zero32);
u32 *countVal = count.lookup_or_init(&zero32, &zero32);
lock_xadd(countVal,1);
}
return 0;
}
TASK_NEWTASK
int do_perf_event(struct bpf_perf_event_data *ctx){
u32 zero32 = 0;
u32 threadId = bpf_get_current_pid_tgid();
u32 *val = threadList.lookup(&threadId);
if(!val)
return 0;
u32 *activeCount = threadCount.lookup(&zero32);
if(!activeCount)
{return 0;}
u32 tempCount;
bpf_probe_read(&tempCount, sizeof(tempCount), activeCount);
u32 *totalThreadCount = count.lookup(&zero32);
if(!totalThreadCount)
return 0;
u32 totalCount;
bpf_probe_read(&totalCount, sizeof(totalCount), totalThreadCount);
if( (tempCount <= STACK_FILTER) || tempCount ==1 ){
struct key_t key = {};
key.tid = bpf_get_current_pid_tgid();
key.tgid = bpf_get_current_pid_tgid()>>32;
key.cm = 0;
key.source = 0;
if(TRACE_THREADS_ONLY){
key.inst_ptr = PT_REGS_IP(&ctx->regs); //Get the instruction pointer
events.perf_submit(ctx, &key, sizeof(key)); //Write details to the ring buffer
}
}
return 0;
}
TRACEPOINT_PROBE(sched, sched_process_exit){
u32 zero32 = 0;
//Get the current tid
u32 threadId;
bpf_probe_read(&threadId, sizeof(threadId), &args->pid);
//Check if the thread ID belongs to the application
u32 *val = threadList.lookup(&threadId);
if(!val)
return 0;
//Decrement the number of threads
u32 *countVal = count.lookup(&zero32);
if(!countVal)
return 0;
//lock_xadd(countVal, -1);
countVal -= 1;
return 0;
}
TRACEPOINT_PROBE(sched, sched_wakeup){
u32 targetID, zero32 = 0, status, one32 = 1;
//Check if thread being woken up belongs to the application
bpf_probe_read(&targetID, sizeof(targetID), &args->pid);
u32 *list = threadList.lookup(&targetID);
if (!list)
return 0;
/////////////////////////////////////////////////////////////////////
if(args->success){ //If waking was successful
u32 *activeCount = threadCount.lookup(&zero32);
if(!activeCount)
{return 0;}
u32 prev_tCount; //Local variable to store thread count
bpf_probe_read(&prev_tCount, sizeof(prev_tCount), activeCount);
//Increment thread count if thread was inactive
bpf_probe_read(&status, sizeof(status), list);
if(status == 0)
lock_xadd(activeCount,1);
//Set thread as active
threadList.update(&targetID,&one32);
}
return 0;
}
//Tracepoint probe for the Sched_Switch tracepoint
TRACEPOINT_PROBE(sched, sched_switch){
u32 one32=1, arrayKey=0, zero32=0;
u32 *listVal, *listVal1; //Pointers to entries in threadList map
u32 next_pid, prev_pid;
u64 zero64 = 0;
//Copy data to BPF stack
bpf_probe_read(&next_pid, sizeof(next_pid), &args->next_pid);
bpf_probe_read(&prev_pid, sizeof(prev_pid), &args->prev_pid);
//Look up thread ids in the list created by sys_clone()
listVal1 = threadList.lookup(&next_pid);
listVal = threadList.lookup(&prev_pid);
u32 prev=0, next=0;
if(listVal){
bpf_probe_read(&prev, sizeof(prev),listVal);
prev = 1;
}
if(listVal1){
bpf_probe_read(&next, sizeof(next),listVal1);
next = 1;
}
//Return if the switching threads do not belong to the application
if( !prev && !next)
return 0;
//////////////////////////////////////////////////////////////////////
//Calculate values common for all switching events
u64 interval, intervalCM;
u64 *oldTS = tsp.lookup_or_init(&arrayKey, &zero64);
if(!oldTS)
{return 0;}
u64 tempTS;
bpf_probe_read(&tempTS, sizeof(tempTS), oldTS); //Copy Old time from bpf map to local variable
u64 newTS = bpf_ktime_get_ns();
tsp.update(&arrayKey, &newTS); //Update time stamp
//The thread count is initialized to one as the first switch in event is always missed.
u32 *ptr_threadCount = threadCount.lookup_or_init(&arrayKey, &one32);
if(!ptr_threadCount)
{return 0;}
int prev_tc; //Temporary variable to store thread count for the previous switching interval
bpf_probe_read(&prev_tc, sizeof(prev_tc),ptr_threadCount);
if(newTS < tempTS)//Very rarely, event probes are triggered out of order, which are ignored
return 0;
if(tempTS==0 || prev_tc==0){ //If first event or no active threads in during the previous interval, prev interval = 0
interval = 0;
}
else
interval = (newTS - tempTS); //Switching interval
u64 *ptr_globalCM = global_CM.lookup_or_init(&arrayKey, &zero64);
if(!ptr_globalCM)
return 0;
//Calculate the CMetric for previous interval and add it to global_CM
if (interval != 0){
intervalCM = interval/prev_tc;
lock_xadd(ptr_globalCM, intervalCM);
}
//Calculate weighted thread count for previous interval
u64 wt_threadCount = (interval) * prev_tc;
u64 *g_wt_threadCount = GLOBAL_WT_TC.lookup_or_init(&arrayKey, &zero64);
if(!g_wt_threadCount)
return 0;
lock_xadd(g_wt_threadCount, wt_threadCount); //Add to global weighted thread count
//////////////////////////////////////////////////////////////////////
//If previous thread was a peer thread
if(prev){
//Decrement active thread count only if thread switched out is not in RUNNING (0) state
if(args->prev_state != TASK_RUNNING){
if(prev_tc > 0 ){
lock_xadd(ptr_threadCount, -1);
}
//Mark the thread as inactive in the threadList hash map
threadList.update(&prev_pid,&zero32);
}
else
//Mark the thread as active as thread is switched out to TASK_RUNNING state
threadList.update(&prev_pid,&one32);
u64 temp;
//Get updated CM
bpf_probe_read(&temp, sizeof(temp),ptr_globalCM);
//Get snapshot of global_CM which was stored in local_CM when prev_pid was switched in
u64 *cpuCM = local_CM.lookup_or_init(&arrayKey, &zero64);
if(!cpuCM)
{return 0;}
//Update the CM of the thread by adding the CM for the time slice
u64 updateCM = temp - (*cpuCM);
u64 *tCM = CM_hash.lookup_or_init(&prev_pid, &zero64);
if(!tCM)
{return 0;}
*tCM = *tCM + updateCM;
//Get LOCAL_WT_TC, the thread's weighted threadCount at the time it was switched in.
u64 *t_wt_threadCount;
t_wt_threadCount = LOCAL_WT_TC.lookup_or_init(&arrayKey, &zero64);
if(!t_wt_threadCount)
{return 0;}
u64 temp_g_wt_threadCount, temp_t_wt_threadCount;
bpf_probe_read(&temp_g_wt_threadCount, sizeof(temp_g_wt_threadCount), g_wt_threadCount);
bpf_probe_read(&temp_t_wt_threadCount, sizeof(temp_t_wt_threadCount), t_wt_threadCount);
//Reset the per-CPU CMetric counter
local_CM.update(&arrayKey, &zero64);
//Reset local weighted ThreadCount counter
LOCAL_WT_TC.update(&arrayKey, &zero64);
//Get time when this thread was switched in
oldTS = inTS.lookup_or_init(&arrayKey, &zero64);
if(!oldTS)
return 0;
u64 switch_in_time, timeSlice;
bpf_probe_read(&switch_in_time, sizeof(switch_in_time), oldTS);
timeSlice = (newTS - switch_in_time);
//Reset switch in time
inTS.update(&arrayKey, &zero64);
u32 *totalThreadCount = count.lookup(&zero32);
if(!totalThreadCount)
return 0;
u32 totalCount;
bpf_probe_read(&totalCount, sizeof(totalCount), totalThreadCount);
//Calculate the average number of threads
u32 ratio = (temp_g_wt_threadCount - temp_t_wt_threadCount) / timeSlice;
struct key_t key = {};
key.tid = prev_pid;
key.tgid = bpf_get_current_pid_tgid()>>32;
key.cm = updateCM;
if( (ratio <= STACK_FILTER || ratio == 1) && TRACE_THREADS_ONLY){ //If thread_avg < threshold and not parent thread
key.user_stackid = user_stacktraces.get_stackid(args, BPF_F_USER_STACK);
if (GET_KERNEL_STACK && args->prev_state != TASK_RUNNING)
key.kernel_stackid= kernel_stacktraces.get_stackid(args, 0);
else
key.kernel_stackid = -1;
key.source = 1;
}
else{
key.user_stackid = 0;
| |
<reponame>hohe12ly/inundation-mapping
#!/usr/bin/env python3
import time
import pandas as pd
import geopandas as gpd
from pathlib import Path
from tools_shared_functions import get_metadata, get_datum, ngvd_to_navd_ft, get_rating_curve, aggregate_wbd_hucs, get_thresholds, flow_data
from dotenv import load_dotenv
import os
import argparse
import sys
sys.path.append('/foss_fim/src')
from utils.shared_variables import PREP_PROJECTION
'''
This script calls the NOAA Tidal API for datum conversions. Experience shows that
running script outside of business hours seems to be most consistent way
to avoid API errors. Currently configured to get rating curve data within
CONUS. Tidal API call may need to be modified to get datum conversions for
AK, HI, PR/VI.
'''
#import variables from .env file
load_dotenv()
API_BASE_URL = os.getenv("API_BASE_URL")
WBD_LAYER = os.getenv("WBD_LAYER")
EVALUATED_SITES_CSV = os.getenv("EVALUATED_SITES_CSV")
NWM_FLOWS_MS = os.getenv("NWM_FLOWS_MS")
def get_all_active_usgs_sites():
'''
Compile a list of all active usgs gage sites that meet certain criteria.
Return a GeoDataFrame of all sites.
Returns
-------
None.
'''
#Get metadata for all usgs_site_codes that are active in the U.S.
metadata_url = f'{API_BASE_URL}/metadata'
#Define arguments to retrieve metadata and then get metadata from WRDS
select_by = 'usgs_site_code'
selector = ['all']
must_include = 'usgs_data.active'
metadata_list, metadata_df = get_metadata(metadata_url, select_by, selector, must_include = must_include, upstream_trace_distance = None, downstream_trace_distance = None )
#Filter out sites based quality of site. These acceptable codes were initially
#decided upon and may need fine tuning. A link where more information
#regarding the USGS attributes is provided.
#https://help.waterdata.usgs.gov/code/coord_acy_cd_query?fmt=html
acceptable_coord_acc_code = ['H','1','5','S','R','B','C','D','E']
#https://help.waterdata.usgs.gov/code/coord_meth_cd_query?fmt=html
acceptable_coord_method_code = ['C','D','W','X','Y','Z','N','M','L','G','R','F','S']
#https://help.waterdata.usgs.gov/codes-and-parameters/codes#SI
acceptable_alt_acc_thresh = 1
#https://help.waterdata.usgs.gov/code/alt_meth_cd_query?fmt=html
acceptable_alt_meth_code = ['A','D','F','I','J','L','N','R','W','X','Y','Z']
#https://help.waterdata.usgs.gov/code/site_tp_query?fmt=html
acceptable_site_type = ['ST']
#Cycle through each site and filter out if site doesn't meet criteria.
acceptable_sites_metadata = []
for metadata in metadata_list:
#Get the usgs info from each site
usgs_data = metadata['usgs_data']
#Get site quality attributes
coord_accuracy_code = usgs_data.get('coord_accuracy_code')
coord_method_code = usgs_data.get('coord_method_code')
alt_accuracy_code = usgs_data.get('alt_accuracy_code')
alt_method_code = usgs_data.get('alt_method_code')
site_type = usgs_data.get('site_type')
#Check to make sure that none of the codes were null, if null values are found, skip to next.
if not all([coord_accuracy_code, coord_method_code, alt_accuracy_code, alt_method_code, site_type]):
continue
#Test if site meets criteria.
if (coord_accuracy_code in acceptable_coord_acc_code and
coord_method_code in acceptable_coord_method_code and
alt_accuracy_code <= acceptable_alt_acc_thresh and
alt_method_code in acceptable_alt_meth_code and
site_type in acceptable_site_type):
#If nws_lid is not populated then add a dummy ID so that 'aggregate_wbd_hucs' works correctly.
if not metadata.get('identifiers').get('nws_lid'):
metadata['identifiers']['nws_lid'] = 'Bogus_ID'
#Append metadata of acceptable site to acceptable_sites list.
acceptable_sites_metadata.append(metadata)
#Get a geospatial layer (gdf) for all acceptable sites
dictionary, gdf = aggregate_wbd_hucs(acceptable_sites_metadata, Path(WBD_LAYER), retain_attributes = False)
#Get a list of all sites in gdf
list_of_sites = gdf['identifiers_usgs_site_code'].to_list()
#Rename gdf fields
gdf.columns = gdf.columns.str.replace('identifiers_','')
return gdf, list_of_sites, acceptable_sites_metadata
##############################################################################
#Generate categorical flows for each category across all sites.
##############################################################################
def write_categorical_flow_files(metadata, workspace):
'''
Writes flow files of each category for every feature_id in the input metadata.
Written to supply input flow files of all gage sites for each flood category.
Parameters
----------
metadata : DICT
Dictionary of metadata from WRDS (e.g. output from get_all_active_usgs_sites).
workspace : STR
Path to workspace where flow files will be saved.
Returns
-------
None.
'''
threshold_url = f'{API_BASE_URL}/nws_threshold'
workspace = Path(workspace)
workspace.mkdir(parents = True, exist_ok = True)
#For each site in metadata
all_data = pd.DataFrame()
for site in metadata:
#Get the feature_id and usgs_site_code
feature_id = site.get('identifiers').get('nwm_feature_id')
usgs_code = site.get('identifiers').get('usgs_site_code')
nws_lid = site.get('identifiers').get('nws_lid')
#thresholds only provided for valid nws_lid.
if nws_lid == 'Bogus_ID':
continue
#if invalid feature_id skip to next site
if feature_id is None:
continue
#Get the stages and flows
stages, flows = get_thresholds(threshold_url, select_by = 'nws_lid', selector = nws_lid, threshold = 'all')
#For each flood category
for category in ['action','minor','moderate','major']:
#Get flow
flow = flows.get(category, None)
#If flow or feature id are not valid, skip to next site
if flow is None:
continue
#Otherwise, write 'guts' of a flow file and append to a master DataFrame.
else:
data = flow_data([feature_id], flow, convert_to_cms = True)
data['recurr_interval'] = category
data['nws_lid'] = nws_lid
data['location_id'] = usgs_code
data = data.rename(columns = {'discharge':'discharge_cms'})
#Append site data to master DataFrame
all_data = all_data.append(data, ignore_index = True)
#Write CatFIM flows to file
final_data = all_data[['feature_id','discharge_cms', 'recurr_interval']]
final_data.to_csv(workspace / f'catfim_flows_cms.csv', index = False)
return all_data
###############################################################################
def usgs_rating_to_elev(list_of_gage_sites, workspace=False, sleep_time = 1.0):
'''
Returns rating curves, for a set of sites, adjusted to elevation NAVD.
Currently configured to get rating curve data within CONUS. Tidal API
call may need to be modified to get datum conversions for AK, HI, PR/VI.
Workflow as follows:
1a. If 'all' option passed, get metadata for all acceptable USGS sites in CONUS.
1b. If a list of sites passed, get metadata for all sites supplied by user.
2. Extract datum information for each site.
3. If site is not in contiguous US skip (due to issue with datum conversions)
4. Convert datum if NGVD
5. Get rating curve for each site individually
6. Convert rating curve to absolute elevation (NAVD) and store in DataFrame
7. Append all rating curves to a master DataFrame.
Outputs, if a workspace is specified, are:
usgs_rating_curves.csv -- A csv containing USGS rating curve as well
as datum adjustment and rating curve expressed as an elevation (NAVD88).
ONLY SITES IN CONUS ARE CURRENTLY LISTED IN THIS CSV. To get
additional sites, the Tidal API will need to be reconfigured and tested.
log.csv -- A csv containing runtime messages.
(if all option passed) usgs_gages.gpkg -- a point layer containing ALL USGS gage sites that meet
certain criteria. In the attribute table is a 'curve' column that will indicate if a rating
curve is provided in "usgs_rating_curves.csv"
Parameters
----------
list_of_gage_sites : LIST
List of all gage site IDs. If all acceptable sites in CONUS are desired
list_of_gage_sites can be passed 'all' and it will use the get_all_active_usgs_sites
function to filter out sites that meet certain requirements across CONUS.
workspace : STR
Directory, if specified, where output csv is saved. OPTIONAL, Default is False.
sleep_time: FLOAT
Amount of time to rest between API calls. The Tidal API appears to
error out more during business hours. Increasing sleep_time may help.
Returns
-------
all_rating_curves : Pandas DataFrame
DataFrame containing USGS rating curves adjusted to elevation for
all input sites. Additional metadata also contained in DataFrame
'''
#Define URLs for metadata and rating curve
metadata_url = f'{API_BASE_URL}/metadata'
rating_curve_url = f'{API_BASE_URL}/rating_curve'
#If 'all' option passed to list of gages sites, it retrieves all acceptable sites within CONUS.
print('getting metadata for all sites')
if list_of_gage_sites == ['all']:
acceptable_sites_gdf, acceptable_sites_list, metadata_list = get_all_active_usgs_sites()
#Otherwise, if a list of sites is passed, retrieve sites from WRDS.
else:
#Define arguments to retrieve metadata and then get metadata from WRDS
select_by = 'usgs_site_code'
selector = list_of_gage_sites
#Since there is a limit to number characters in url, split up selector if too many sites.
max_sites = 150
if len(selector)>max_sites:
chunks = [selector[i:i+max_sites] for i in range(0,len(selector),max_sites)]
#Get metadata for each chunk
metadata_list = []
metadata_df = pd.DataFrame()
for chunk in chunks:
chunk_list, chunk_df = get_metadata(metadata_url, select_by, chunk, must_include = None, upstream_trace_distance = None, downstream_trace_distance = None )
#Append chunk data to metadata_list/df
metadata_list.extend(chunk_list)
metadata_df = metadata_df.append(chunk_df)
else:
#If selector has less than max sites, then get metadata.
metadata_list, metadata_df = get_metadata(metadata_url, select_by, selector, must_include = None, upstream_trace_distance = None, downstream_trace_distance = None )
#Create DataFrame to store all appended rating curves
print('processing metadata')
all_rating_curves = pd.DataFrame()
regular_messages = []
api_failure_messages=[]
#For each site in metadata_list
for metadata in metadata_list:
#Get datum information for site (only need usgs_data)
nws, usgs = get_datum(metadata)
#Filter out sites that are not in contiguous US. If this section is removed be sure to test with datum adjustment section (region will need changed)
if usgs['state'] in ['Alaska', 'Puerto Rico', 'Virgin Islands', 'Hawaii']:
continue
#Get rating curve for site
location_ids = usgs['usgs_site_code']
curve = get_rating_curve(rating_curve_url, location_ids = [location_ids])
#If no rating curve was returned, skip site.
if curve.empty:
message = f'{location_ids}: has no | |
want other people to decide what you are going to do. You have free will, you have the ability to choose whatever path you want to take.")
raw_input("There is no more text message coming from BOB. You think you are safe now.")
raw_input("You feel tired, after all of the things that have happened today. You sit on the couch, turn on the television, and fall asleep.")
raw_input("'Pause the operation!'")
raw_input("'BOB fails to interact with the subject, guys, we are stuck.")
raw_input("'I think we have encounter a BUG of this system, ha, those developers said they had removed all the BUGs. This is why I do not agree to cooperate with those people, we can do a much better job alone.'")
raw_input("'Restart BOB, quick! We don't have much time left, let's just hope next time, we will find the right path and the answer.'")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet.......")
if choice6.lower() == "a" or choice6.lower() == "accept":
raw_input("You think the safest way right now is to what BOB says. You head toward your bed room.")
raw_input("You are about to go pass your kitchen, there are knives in the kitchen...")
choice7= raw_input("a. Get a knife it may be useful later b. Keep walking you are running out of time...")
while choice7.lower() != "b" and choice7.lower() != "a":
print "You cannot always run away from making decison. It is your obligation to decide."
choice7= raw_input("Quick, make a decision, we don't have that much time left: a. Get the knife b. Keep going...")
#go to kitch
if choice7.lower() == "a":
raw_input("You open the door of the kitchen, you hear the exhasut fan running.")
raw_input("Weird, you do not remember when did you turn on the fan.")
raw_input("You saw two people standing in your kitchen, you recognize one of them.")
raw_input("'Sarah!' You yell at your ex-fiancee, but she doesn't seem to recognize you.")
raw_input("They knock you down, 'The other one must be BOB', you realize")
raw_input("They turn off the exhaust fan, you smell sulfur.")
raw_input("BOB punches you in your stomach, you are going to loose consciousness. You know they are going to blow up your house.")
raw_input("Your mind starts to wonder. You are confused why Sarah wants to kill you, you loved her so much that you even bought her a really expensive engagement ring.")
raw_input("Maybe it is because she finds out your secret, the secret you hide from the world.")
raw_input("If you could go back in time, before Sarah broke up with you in the park, things could end up differently...")
raw_input("'This is by far the biggest clue we have found!'")
raw_input("'Guys, I think we are on the right track, but we still need more information.'")
raw_input("'We should try a slightly different path, maybe we will find a better answer...'")
#keep walking
if choice7.lower() == "b":
raw_input("Forget about the knife, BOB is probably watching your action somewhere, you do not want to make him angry.")
raw_input("You enter your bedroom.")
raw_input("...")
raw_input("There is no one in your bedroom, you thought BOB will be there.")
raw_input("You open your drawer. Beside the from the dice, you also see a golden ring.")
raw_input("That was your engagment ring, your fiancee just broke up with you. What a stupid way it is to break up in your favorite park.")
raw_input("You text BOB: 'I have find the dice.")
raw_input("...")
raw_input("'Great! The game we are going to play is really simple. You just need to roll the dice and text me the number. I will also roll my six-sided dice, but my dice is slightly different than yours, there are only three different numbers. I can't tell you what those numbers are, but if the number you rolled out is the same with the number I rolled out, you will loose the game.'")
choice8= raw_input("You decide to: a. Roll the dice b. Runaway...")
while choice8.lower() != "b" and choice8.lower() != "a":
print "You cannot always run away from making decison. It is your obligation to decide."
choice8= raw_input("This is a really important! Make a decision!: a. Roll the dice b. Runaway...")
if choice8.lower() == "b":
raw_input("You runaway, everything is insane! You have to get help!")
raw_input("BANG...")
raw_input("You have been shot. You fall on the floor. You saw two people, one is holding the gun, and the other one is Sarah, you ex-fiancee.")
raw_input("The one holding the gun must be BOB. You assumed that Sarah hires BOB to kill you, becasue she found out your deepest secret. You were planning to kill her and burry her, but she has already taken action before you do...")
raw_input("...")
raw_input("'It ends here?'")
raw_input("'These are enough to proof subject's intention, but we don't know where did the subject burry the victim")
raw_input("'Run BOB again, we need to find a better path.")
raw_input("...")
raw_input("You are sitting in your living room, just checking your phone. There is nothing big happening around your area, it is very safe and peaceful here. Actually, this is why you chose live here, a small and quiet neighborhood, a lot better than the one you had.")
raw_input("But today is too quiet......")
if choice8.lower() == "a":
raw_input("You roll the dice.")
raw_input("...")
import random
directions2 = {
("1", "1"): True,
("1", "2"): False,
("1", "3"): False,
("1", "4"): False,
("1", "5"): False,
("1", "6"): False,
("2", "1"): False,
("2", "2"): True,
("2", "3"): False,
("2", "4"): False,
("2", "5"): False,
("2", "6"): False,
("6", "1"): False,
("6", "2"): False,
("6", "3"): False,
("6", "4"): False,
("6", "5"): False,
("6", "6"): True,
}
def game3(bob, dice):
return directions2[(bob, dice)]
def bob():
return random.choice(["1","2","6"])
def dice():
return random.choice(["1","2","3","4","5","6"])
bob = bob()
dice = dice()
result2 = game3(bob, dice)
raw_input("It is " + dice)
raw_input("You text BOB, 'I rolled out " + dice)
raw_input("...")
if result2 is True:
raw_input("'Oh~ How unlucky, I also rolled out " + bob + " I guess you lose. Try to run, someone is coming for you~")
raw_input("The bedroom door is opened, you saw someone holding a baseball bat.")
raw_input("It is Sarah, your ex-fiancee.")
raw_input("You are in extreme terror, you are afraid of Sarah, just like she is someone who has just risen from death. You cannot move.")
raw_input("She hits you with the bat, as she hits harder, her face deforms. She starts to look like other people, some of them you recognize, some of them seems familiar. Those faces all look rotten and grievous.")
raw_input("You are beaten to a point, even the weapon she holds starts to change. Gun, arrow, metal rod, knife...")
raw_input("...")
raw_input("'That's it? There must be more!")
raw_input("'I guess this is not the right path to the answer, but we find out the weapons used by the subject.'")
raw_input("'If we pick a different path, we might find out where he burried his 30th victim...'")
elif result2 is False:
raw_input("I rolled out " + bob + " How lucky! Now, go and see what is under your bed, I left you a weapon. You will need that for later. Someone is coming for you~")
raw_input("You look under your bed, there is a baseball bat.")
raw_input("Your bedroom door is opened.")
raw_input("You saw Sarah, your ex-fiancee, also holding a baseball bat.")
raw_input("She tries to attack you, but you block her attack, and knock her down to the floor.")
raw_input("You lose control, you start hitting her until she is covered with blood.")
raw_input("What have you done! You just killed your ex-fiancee!")
raw_input("You receive a text message 'Opps, seems like you killed Sarah~ Now, you have to find somewhere | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import re
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import InvalidPage, Paginator
from django.forms import model_to_dict
from django.urls import reverse
from django.db import transaction
from django.http import Http404
from future import standard_library
from geonode.api.api import ProfileResource
from geonode.api.authorization import GeoNodeAuthorization
from geonode.api.resourcebase_api import (CommonMetaApi, LayerResource,
MapResource, CommonModelApi)
from geonode.maps.models import MapLayer
from geonode.people.models import Profile
from geonode.security.utils import get_visible_resources
from guardian.shortcuts import get_objects_for_user
from taggit.models import Tag
from tastypie import fields, http
from tastypie.authorization import Authorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
from cartoview.app_manager.models import App, AppInstance, AppStore, AppType
from cartoview.apps_handler.config import CartoviewApp
from cartoview.log_handler import get_logger
from .installer import AppInstaller, RestartHelper
from .utils import populate_apps
logger = get_logger(__name__)
standard_library.install_aliases()
class LayerFilterExtensionResource(LayerResource):
def build_filters(self, filters=None, **kwargs):
if filters is None:
filters = {}
orm_filters = super(LayerFilterExtensionResource, self).build_filters(
filters, **kwargs)
if ('permission' in filters):
permission = filters['permission']
orm_filters.update({'permission': permission})
# NOTE: We change this filter name because it overrides
# geonode type filter(vector,raster)
if 'geom_type' in filters:
layer_type = filters['geom_type']
orm_filters.update({'geom_type': layer_type})
return orm_filters
def apply_filters(self, request, applicable_filters):
permission = applicable_filters.pop('permission', None)
# NOTE: We change this filter name from type to geom_type because it
# overrides geonode type filter(vector,raster)
layer_geom_type = applicable_filters.pop('geom_type', None)
filtered = super(LayerFilterExtensionResource, self).apply_filters(
request, applicable_filters)
if layer_geom_type:
filtered = filtered.filter(
attribute_set__attribute_type__icontains=layer_geom_type)
if permission is not None:
try:
permitted_ids = get_objects_for_user(request.user,
permission).values('id')
except BaseException:
permitted_ids = get_objects_for_user(
request.user, permission, klass=filtered).values('id')
filtered = filtered.filter(id__in=permitted_ids)
return filtered
class Meta(LayerResource.Meta):
resource_name = "layers"
filtering = dict(LayerResource.Meta.filtering, **dict(typename=ALL))
class GeonodeMapLayerResource(ModelResource):
class Meta(object):
queryset = MapLayer.objects.distinct()
class AppStoreResource(ModelResource):
class Meta:
always_return_data = True
authorization = Authorization()
queryset = AppStore.objects.all()
class AppResource(ModelResource):
store = fields.ForeignKey(AppStoreResource, 'store', full=False, null=True)
order = fields.IntegerField()
active = fields.BooleanField()
pending = fields.BooleanField()
categories = fields.ListField()
default_config = fields.DictField(default={})
app_instance_count = fields.IntegerField()
def dehydrate_order(self, bundle):
carto_app = bundle.obj.config
if carto_app:
return carto_app.order
return 0
def dehydrate_default_config(self, bundle):
if bundle.obj.default_config:
return bundle.obj.default_config
return {}
def dehydrate_active(self, bundle):
active = False
if bundle.obj.config and not bundle.obj.config.pending:
active = bundle.obj.config.active
return active
def dehydrate_pending(self, bundle):
app = bundle.obj
cartoview_app = CartoviewApp.objects.get(app.name)
return cartoview_app.pending
def dehydrate_categories(self, bundle):
return [category.name for category in bundle.obj.category.all()]
def dehydrate_app_instance_count(self, bundle):
return bundle.obj.appinstance_set.all().count()
class Meta():
queryset = App.objects.all().order_by('order')
filtering = {
"id": ALL,
"name": ALL,
"title": ALL,
"store": ALL_WITH_RELATIONS,
"single_instance": ALL
}
can_edit = True
def _build_url_exp(self, view, single=False):
name = view + "_app"
if single:
exp = r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/%s%s$" % (
self._meta.resource_name,
view,
trailing_slash(),
)
else:
exp = r"^(?P<resource_name>%s)/%s%s$" % (self._meta.resource_name,
view, trailing_slash())
return url(exp, self.wrap_view(view), name=name)
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/install%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('install'),
name="bulk_install"),
url(r"^(?P<resource_name>%s)/restart-server%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('restart_server'),
name="restart_server"),
self._build_url_exp('install'),
self._build_url_exp('reorder'),
self._build_url_exp('uninstall', True),
self._build_url_exp('suspend', True),
self._build_url_exp('activate', True),
]
def get_err_response(self,
request,
message,
response_class=http.HttpApplicationError):
data = {
'error_message': message,
}
return self.error_response(
request, data, response_class=response_class)
def install(self, request, **kwargs):
"""Install requested apps.
expected post data structure:
{"apps":[
{
"app_name":<str>,
"store_id":<number>,
"version":<str>,
},
],
"restart":<bool>
}
return json contains a list of apps with status and message ex:
[
{
"app_name":<str>,
"success":<bool>,
"message":<str>,
}
]
"""
# from builtins import basestring
self.method_check(request, allowed=['post'])
self.is_authenticated(request)
self.throttle_check(request)
if not (request.user.is_active and request.user.is_staff):
return self.get_err_response(request,
"this action require staff member",
http.HttpForbidden)
data = json.loads(request.body)
apps = data.get("apps", [])
restart = data.get("restart", False)
response_data = []
for app in apps:
app_name = app.get("app_name")
store_id = app.get("store_id")
version = app.get("version")
app_result = {"app_name": app_name, "success": True, "message": ""}
# try:
with transaction.atomic():
installer = AppInstaller(app_name, store_id, version,
request.user)
installer.install(restart=False)
app_result["message"] = "App Installed Successfully"
response_data.append(app_result)
# except Exception as ex:
# logger.error(ex)
# app_result["success"] = False
# app_result["message"] = "{0}".format(ex)
# response_data.append(app_result)
if restart:
RestartHelper.restart_server()
return self.create_response(
request, response_data, response_class=http.HttpAccepted)
def restart_server(self, request, **kwargs):
# from builtins import basestring
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
if not (request.user.is_active and request.user.is_staff):
return self.get_err_response(request,
"this action require staff member",
http.HttpForbidden)
RestartHelper.restart_server()
return self.create_response(
request, {"message": "Server Will be Restarted"},
response_class=http.HttpAccepted)
def uninstall(self, request, **kwargs):
pass
def set_active(self, active, request, **kwargs):
self.method_check(request, allowed=['post'])
self.is_authenticated(request)
self.throttle_check(request)
try:
bundle = self.build_bundle(
data={'pk': kwargs['pk']}, request=request)
app = self.cached_obj_get(
bundle=bundle, **self.remove_api_resource_names(kwargs))
app.set_active(active)
populate_apps()
except ObjectDoesNotExist:
return http.HttpGone()
self.log_throttled_access(request)
return self.create_response(request, {'success': True})
def suspend(self, request, **kwargs):
return self.set_active(False, request, **kwargs)
def activate(self, request, **kwargs):
return self.set_active(True, request, **kwargs)
def reorder(self, request, **kwargs):
ids_list = request.POST.get("apps", None)
if ids_list is not None:
ids_list = ids_list.split(",")
else:
ids_list = json.loads(request.body)["apps"]
for i in range(0, len(ids_list)):
app = App.objects.get(id=ids_list[i])
app.order = i + 1
app.save()
cartoview_app = CartoviewApp.objects.get(app.name)
if cartoview_app:
cartoview_app.order = app.order
cartoview_app.commit()
if i == (len(ids_list) - 1):
CartoviewApp.save()
self.log_throttled_access(request)
return self.create_response(request, {'success': True})
class AppTypeResource(ModelResource):
apps = fields.ToManyField(
AppResource, attribute='apps', full=True, null=True)
class Meta(object):
queryset = AppType.objects.all()
class AppInstanceResource(CommonModelApi):
launch_app_url = fields.CharField(null=True, blank=True, use_in='all')
edit_url = fields.CharField(null=True, blank=True)
app = fields.ForeignKey(AppResource, 'app', full=True, null=True)
map = fields.ForeignKey(MapResource, 'related_map', full=True, null=True)
owner = fields.ForeignKey(
ProfileResource, 'owner', full=True, null=True, blank=True)
keywords = fields.ListField(null=True, blank=True)
class Meta(CommonMetaApi):
filtering = CommonMetaApi.filtering
always_return_data = True
filtering.update({'app': ALL_WITH_RELATIONS, 'featured': ALL})
queryset = AppInstance.objects.distinct().order_by('-date')
if settings.RESOURCE_PUBLISHING:
queryset = queryset.filter(is_published=True)
resource_name = 'appinstances'
allowed_methods = ['get', 'post', 'put']
excludes = ['csw_anytext', 'metadata_xml']
authorization = GeoNodeAuthorization()
def get_object_list(self, request):
__inactive_apps = [
app.id for app in App.objects.all()
if app.config and not app.config.active
]
__inactive_apps_instances = [
instance.id for instance in AppInstance.objects.filter(
app__id__in=__inactive_apps)
]
active_app_instances = super(AppInstanceResource, self) \
.get_object_list(
request).exclude(
id__in=__inactive_apps_instances)
return active_app_instances
def format_objects(self, objects):
# hack needed because dehydrate does not seem to work in CommonModelApi
formatted_objects = []
for obj in objects:
formatted_obj = model_to_dict(obj, fields=self.VALUES)
formatted_obj['owner__username'] = obj.owner.username
formatted_obj['owner_name'] = \
obj.owner.get_full_name() or obj.owner.username
if obj.app is not None:
formatted_obj['launch_app_url'] = \
reverse("%s.view" % obj.app.name, args=[obj.pk])
formatted_obj['edit_url'] = \
reverse("%s.edit" % obj.app.name, args=[obj.pk])
formatted_objects.append(formatted_obj)
return formatted_objects
def dehydrate_owner(self, bundle):
return bundle.obj.owner.username
def dehydrate_config(self, bundle):
if bundle.obj.config:
return json.loads(bundle.obj.config)
else:
return json.dumps({})
def dehydrate_launch_app_url(self, bundle):
if bundle.obj.app is not None:
return reverse(
"%s.view" % bundle.obj.app.name, args=[bundle.obj.pk])
return None
def dehydrate_edit_url(self, bundle):
if bundle.obj.owner == bundle.request.user:
if bundle.obj.app is not None:
return reverse(
"%s.edit" % bundle.obj.app.name, args=[bundle.obj.pk])
return None
def hydrate_owner(self, bundle):
owner, created = Profile.objects.get_or_create(
username=bundle.data['owner'])
bundle.data['owner'] = owner
return bundle
def dehydrate_keywords(self, bundle):
return bundle.obj.keyword_list()
def obj_create(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = AppInstance()
bundle.obj.owner = bundle.request.user
app_name = bundle.data['appName']
bundle.obj.app = App.objects.get(name=app_name)
for key, value in list(kwargs.items()):
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
return self.save(bundle)
def get_search(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Get the list of objects that matches the filter
sqs = self.build_haystack_filters(request.GET)
if not settings.SKIP_PERMS_FILTER:
filter_set = get_objects_for_user(request.user,
'base.view_resourcebase')
filter_set = get_visible_resources(
filter_set,
request.user if request else None,
admin_approval_required=settings.ADMIN_MODERATE_UPLOADS,
unpublished_not_visible=settings.RESOURCE_PUBLISHING,
private_groups_not_visibile=settings.GROUP_PRIVATE_RESOURCES)
filter_set_ids = filter_set.values_list('id')
# Do the query using the filterset and the query term. Facet the
# results
if len(filter_set) > 0:
sqs = sqs.filter(id__in=filter_set_ids).facet('type').facet(
'owner').facet('keywords').facet('regions') \
.facet('category')
else:
sqs = None
else:
sqs = sqs.facet('type').facet('owner').facet('keywords').facet(
'regions').facet('category')
if sqs:
# Build the Facet dict
facets = {}
for facet in sqs.facet_counts()['fields']:
facets[facet] = {}
for item in sqs.facet_counts()['fields'][facet]:
facets[facet][item[0]] = item[1]
# Paginate the results
paginator = Paginator(sqs, request.GET.get('limit'))
try:
page = paginator.page(int(request.GET.get('offset') or 0)
/ int(request.GET.get('limit'), 0) + 1) # noqa
except InvalidPage:
raise Http404("Sorry, no results on that page.")
if page.has_previous():
previous_page = page.previous_page_number()
else:
previous_page = 1
if page.has_next():
next_page = page.next_page_number()
else:
next_page = 1
total_count = sqs.count()
objects = page.object_list
else:
next_page = 0
previous_page = 0
total_count = 0
facets = {}
objects = []
object_list = {
"meta": {
"limit": settings.CLIENT_RESULTS_LIMIT,
"next": next_page,
"offset": int(getattr(request.GET, 'offset', 0)),
"previous": previous_page,
"total_count": total_count,
"facets": facets,
},
"objects": map(lambda x: self.get_haystack_api_fields(x), objects),
}
self.log_throttled_access(request)
return self.create_response(request, object_list)
def get_haystack_api_fields(self, haystack_object):
object_fields = dict(
(k, v) for k, v in haystack_object.get_stored_fields().items()
if not re.search('_exact$|_sortable$', k))
return object_fields
def prepend_urls(self):
if settings.HAYSTACK_SEARCH:
return [
url(r"^(?P<resource_name>%s)/search%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('get_search'),
name="api_get_search"),
]
else:
return []
def build_haystack_filters(self, parameters):
from haystack.inputs import Raw
from haystack.query import SearchQuerySet, SQ # noqa
sqs = None
# Retrieve Query Params
# Text search
query = parameters.get('q', None)
# Types and subtypes to filter (map, layer, vector, etc)
type_facets = parameters.getlist("type__in", [])
# If coming from explore page, | |
inputfile1
)
]
],
err_file,
'a'
)
# convert time matrix to dictionary (both time matrices should be identical here)
tdict = matrix2hdict(time1)
tkey = list(tdict.keys())[0]
# convert data matrices to dictionaries
hdict1 = matrix2hdict(mat1)
hdict2 = matrix2hdict(mat2)
# Dictionaries of absolute and relative differences
abs_diffs = {}
rel_diffs = {}
for key in horder:
abs_diffs[key] = list(map(abs_diff, hdict1[key], hdict2[key]))
rel_diffs[key] = list(map(rel_diff, hdict1[key], hdict2[key]))
err_dict = {}
for key in horder:
err_dict[key] = {}
(abs_thresh, rel_thresh) = thresh_dict.lookup(key)
max_abs_diff = max(abs_diffs[key])
index_max_abs_diff = abs_diffs[key].index(max_abs_diff)
err_dict[key]['abs_thresh'] = abs_thresh
err_dict[key]['max_abs_diff'] = max_abs_diff
err_dict[key]['rel_diff_of_max_abs_diff'] = rel_diffs[key][index_max_abs_diff]
err_dict[key]['time_of_max_abs_diff'] = tdict[tkey][index_max_abs_diff]
err_dict[key]['count_of_small_abs_diff'] = sum(1 for x in abs_diffs[key] if 0.0 < x <= abs_thresh)
err_dict[key]['count_of_big_abs_diff'] = sum(1 for x in abs_diffs[key] if x > abs_thresh)
max_rel_diff = max(rel_diffs[key])
index_max_rel_diff = rel_diffs[key].index(max_rel_diff)
err_dict[key]['rel_thresh'] = rel_thresh
err_dict[key]['max_rel_diff'] = max_rel_diff
err_dict[key]['abs_diff_of_max_rel_diff'] = abs_diffs[key][index_max_rel_diff]
err_dict[key]['time_of_max_rel_diff'] = tdict[tkey][index_max_rel_diff]
if rel_thresh > 0:
err_dict[key]['count_of_small_rel_diff'] = sum(1 for x in rel_diffs[key] if 0.0 < x <= rel_thresh)
err_dict[key]['count_of_big_rel_diff'] = sum(1 for x in rel_diffs[key] if x > rel_thresh)
else:
err_dict[key]['count_of_small_rel_diff'] = 0
err_dict[key]['count_of_big_rel_diff'] = 0
if rel_thresh > 0:
err_dict[key]['count_of_small_abs_rel_diff'] = sum(
1 for x, y in zip(abs_diffs[key], rel_diffs[key]) if 0 < x <= abs_thresh or 0 < y <= rel_thresh
)
err_dict[key]['count_of_big_abs_rel_diff'] = sum(
1 for x, y in zip(abs_diffs[key], rel_diffs[key]) if x > abs_thresh and y > rel_thresh
)
else:
err_dict[key]['count_of_small_abs_rel_diff'] = err_dict[key]['count_of_small_abs_diff']
err_dict[key]['count_of_big_abs_rel_diff'] = err_dict[key]['count_of_big_abs_diff']
num_small = sum(err_dict[key]['count_of_small_abs_rel_diff'] for key in horder)
num_big = sum(err_dict[key]['count_of_big_abs_rel_diff'] for key in horder)
diff_type = 'All Equal'
if num_big > 0:
diff_type = 'Big Diffs'
elif num_small > 0:
diff_type = 'Small Diffs'
num_records = len(tdict[tkey])
input_file_path_tokens = inputfile1.split(os.sep)
# if it's the first pass, create the file with the header;
# also the null-pointer-ish check allows skipping the summary_csv file if the filename is blank
if summary_csv:
if not os.path.isfile(summary_csv):
with open(summary_csv, 'w') as f:
f.write("CaseName,FileName,Status,#Records\n")
with open(summary_csv, 'a') as f:
f.write(
"%s,%s,%s,%s records compared\n" % (
input_file_path_tokens[-2], input_file_path_tokens[-1], diff_type, num_records
)
)
# We are done
if diff_type == 'All Equal':
return diff_type, num_records, num_big, num_small
# Which columns had diffs?
dhorder = [h for h in horder if
err_dict[h]['count_of_small_abs_diff'] > 0 or err_dict[h]['count_of_big_abs_diff'] > 0 or err_dict[h][
'count_of_small_rel_diff'] > 0 or err_dict[h]['count_of_big_rel_diff'] > 0]
# Find the largest overall absolute diff
max_max_abs_diff = max(err_dict[key]['max_abs_diff'] for key in dhorder)
key_of_max_max_abs_diff = [key for key in dhorder if err_dict[key]['max_abs_diff'] == max_max_abs_diff][0]
rel_diff_of_max_max_abs_diff = err_dict[key_of_max_max_abs_diff]['rel_diff_of_max_abs_diff']
time_of_max_max_abs_diff = err_dict[key_of_max_max_abs_diff]['time_of_max_abs_diff']
# Find the largest overall relative diff
max_max_rel_diff = max(err_dict[key]['max_rel_diff'] for key in dhorder)
key_of_max_max_rel_diff = [key for key in dhorder if err_dict[key]['max_rel_diff'] == max_max_rel_diff][0]
abs_diff_of_max_max_rel_diff = err_dict[key_of_max_max_rel_diff]['abs_diff_of_max_rel_diff']
time_of_max_max_rel_diff = err_dict[key_of_max_max_rel_diff]['time_of_max_rel_diff']
# put the time column back
abs_diffs[tkey] = tdict[tkey]
rel_diffs[tkey] = tdict[tkey]
# Summarize the input files
summary_dict1 = make_summary_dict(tdict, hdict1)
summary_dict2 = make_summary_dict(tdict, hdict2)
# Flatten summaries out to dictionaries of lists rather than dictionaries of dictionaries
summary_dict12 = dict_of_dicts2dict_of_lists(summary_dict1, horder, list(summary_labels))
summary_dict12[tkey] = [sl + ':' for sl in list(summary_labels)]
summary_dict22 = dict_of_dicts2dict_of_lists(summary_dict2, horder, list(summary_labels))
summary_dict22[tkey] = [sl + ':' for sl in list(summary_labels)]
# Diff the flattend summaries
abs_diff_summary_dict = {}
rel_diff_summary_dict = {}
for key in dhorder:
abs_diff_summary_dict[key] = map(abs_diff, summary_dict12[key], summary_dict22[key])
rel_diff_summary_dict[key] = map(rel_diff, summary_dict12[key], summary_dict22[key])
# Prepend time key to header order list
thorder = [tkey] + horder
tdhorder = [tkey] + dhorder
# Convert the absolute and relative diff dictionaries to matrices and write them to files
abs_diff_mat = hdict2matrix(tdhorder, abs_diffs)
# print("Trying to write to %s " % abs_diff_file)
mycsv.writecsv(abs_diff_mat, abs_diff_file)
rel_diff_mat = hdict2matrix(tdhorder, rel_diffs)
mycsv.writecsv(rel_diff_mat, rel_diff_file)
# Write the error file header
mycsv.writecsv(
[
[],
[
'Max absolute diff: %s, field: %s, time: %s, relative: %s' % (
str(max_max_abs_diff),
str(key_of_max_max_abs_diff),
str(time_of_max_max_abs_diff),
str(rel_diff_of_max_max_abs_diff))
]
], err_file, 'a'
)
mycsv.writecsv(
[
[],
[
'Max relative diff: %s, field: %s, time: %s, absolute: %s' % (
str(max_max_rel_diff),
str(key_of_max_max_rel_diff),
str(time_of_max_max_rel_diff),
str(abs_diff_of_max_max_rel_diff))
]
], err_file, 'a'
)
# Convert the error dictionary to a matrix and write to the error
# file. Need to convert it from a nested dictionary to a
# dictionary of lists first.
err_dict2 = dict_of_dicts2dict_of_lists(err_dict, horder, list(error_labels))
err_dict2[tkey] = [el + ':' for el in list(error_labels)]
err_mat = hdict2matrix(tdhorder, err_dict2)
mycsv.writecsv([[], []] + err_mat, err_file, 'a')
# Convert the summaries to matrices and write them out to the error file
summary_mat1 = hdict2matrix(thorder, summary_dict12)
mycsv.writecsv([[], [], ['Summary of %s' % (inputfile1,)], []] + summary_mat1, err_file, 'a')
summary_mat2 = hdict2matrix(thorder, summary_dict22)
mycsv.writecsv([[], [], ['Summary of %s' % (inputfile2,)], []] + summary_mat2, err_file, 'a')
# Convert the absolute and relative differences of the summaries and write them to the error file
abs_diff_summary_dict[tkey] = [sl + ':' for sl in list(summary_labels)]
abs_diff_summary_mat = hdict2matrix(tdhorder, abs_diff_summary_dict)
mycsv.writecsv([[], [], ['Absolute difference in Summary of %s and Summary of %s' % (inputfile1, inputfile2)],
[]] + abs_diff_summary_mat, err_file, 'a')
rel_diff_summary_dict[tkey] = [sl + ':' for sl in list(summary_labels)]
rel_diff_summary_mat = hdict2matrix(tdhorder, rel_diff_summary_dict)
mycsv.writecsv([[], [], ['Relative difference in Summary of %s and Summary of %s' % (inputfile1, inputfile2)],
[]] + rel_diff_summary_mat, err_file, 'a')
return diff_type, num_records, num_big, num_small
def main(argv=None): # pragma: no cover
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "ho:v", ["help", "output="])
except getopt.error as msg:
info(sys.argv[0].split("/")[-1] + ": " + str(msg) + "\n\t for help use --help")
return -1
# Test for correct number of arguments
prog_name = os.path.basename(sys.argv[0])
if len(args) == 6:
[csv1, csv2, abs_diff_file, rel_diff_file, err_file, csvsummary] = args
else:
info('%s: incorrect operands: Try %s -h for more info' % (prog_name, prog_name))
return -1
if csv1[-4:] != '.csv' or csv1[-7:] == 'Map.csv' or csv1[-9:] == 'Table.csv' or csv1[-10:] == 'Screen.csv':
info('%s: input file <%s> with improper extension' % (prog_name, csv1))
return -1
if csv2[-4:] != '.csv' or csv2[-7:] == 'Map.csv' or csv2[-9:] == 'Table.csv' or csv2[-10:] == 'Screen.csv':
info('%s: input file <%s> with improper extension' % (prog_name, csv2))
return -1
# Load diffing threshold dictionary
thresh_dict = ThreshDict(os.path.join(script_dir, 'math_diff.config'))
math_diff(thresh_dict, csv1, csv2, abs_diff_file, rel_diff_file, err_file, csvsummary)
return 0
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
# TODO document what happens when there is a time mismatch.
# how the program will respond when the headers of the two csv file do not match
# ------------------------------------------------------------------------------
# The csv files are in the following format:
# "time", "h2", "h3", "h4"
# "t1", 1 , 2 , 4
# "t2", 11 , 22 , 44
# "t3", 111, 222, 444
#
# In the first line
# "h2", "h3", "h4"
# are considered the headers of the columns
#
# When we compare two files, it is assumed that the headers of the two files will match.
# In case the headers do not match mathdiff.py still has to respond in an intelligent way.
#
# We have the following four possiblities:
# 1. identical headers
# file1 = "h2", "h3", "h4"
# file2 = "h2", "h3", "h4"
# this is straight forward: the program will report the outputs in the same order:
# output = "h2", "h3", "h4"
# output warning = None
# 2. shuffled headers
# file1 = "h2", "h3", "h4"
# file2 = "h3", "h4", "h2"
# the program will unshuffle the columns of file2 to match that of file1
# output = "h2", "h3", "h4"
# output warning = None
# 3. renamed headers
# file1 = "h2", "h3", "h4"
# file2 = "hh3", "hh4", "hh2"
# if both the files have the same number of columns and they don't happen to be shuffled,
# the program will assume that the headers in file2 have been renamed
# output = "h2", "h3", "h4"
# output warning = warning printed to terminal and to error.csv file
# 4. mismatched headers
# file1 = "h2", "h3", "h4", "h5", "h6"
# file2 = "h2", "h3", "h4", "h7"
# the number of columns in file1 and file2 do not match.
# The program will report on all the columns in file1 and file2
# output = "h2", "h3", "h4", "h5", "h6", "h7"
# columns "h5", "h6", "h7" will report an ERROR
#
#
#
#
# ----------------------------------------------------------------------------------
# data structure for mathdiff.py - to be read if you are planning to update | |
RegenerateCount=None, RoundRobinPacketOrdering=None, RouteMesh=None, SrcDestMesh=None, State=None, Suspend=None, TrafficItemType=None, TrafficType=None, TransmitMode=None, TransportLdpPreference=None, TransportRsvpTePreference=None, UseControlPlaneFrameSize=None, UseControlPlaneRate=None, Warnings=None):
"""Finds and retrieves trafficItem resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve trafficItem resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all trafficItem resources from the server.
Args
----
- AllowSelfDestined (bool): If true, this helps to send traffic from routes on an Ixia port to other routes on the same Ixia port.
- BiDirectional (bool): If true, this enables traffic to be sent in forward and reverse destination.
- EgressEnabled (bool): Enables the egress.
- EnableDynamicMplsLabelValues (bool): Enables the dynamic MPLS label values.
- EnableMacsecEgressOnlyAutoConfig (bool):
- Enabled (bool): If true, this enables the selected traffic item.
- Errors (list(str)): Displays the errors.
- FlowGroupCount (number): Indicates the number of flow groups.
- FrerDuplicateElimination (bool):
- HasOpenFlow (bool): Indicates whether or not this trafficItem has openflow.
- HostsPerNetwork (number): The number of emulated hosts for the traffic stream.
- InterAsBgpPreference (str(one | two)): This attribute is deprecated. Use labelPreferences attribute instead.
- InterAsLdpPreference (str(one | two)): This attribute is deprecated. Use labelPreferences attribute instead.
- LabelPreferences (list(dict(labelCategory:str[interAsRegionLsp | interAsRegionLspClassic | ipTransportLsp | transportLspClassic | vpnTransportLsp],labelPreferenceInput:str[auto | custom | none],labelProviderList:list[str[bgpLuSr | bgpLuSrInterAs | bgpv6LuSr | isisSr | ldp | ospfSr | ospfv3Sr | rsvp | targetedLdpInterAs]]))): List of label preferences per Label Category defined as List[Label Category, Label Category input type, List of Label Providers in the preferred order]
- MaxNumberOfVpnLabelStack (number): Signifies the maximum number of VPN label stack
- MergeDestinations (bool): If true, merges the traffic flow in the destination ranges.
- MulticastForwardingMode (str(loadBalancing | replication)):
- Name (str): The name of the traffic item.
- NumVlansForMulticastReplication (number): Set the number of vlans for multicast replication
- OrdinalNo (number): Signifies the ordinal number
- OriginatorType (str(endUser | quickTest)): Indicates who created this trafficItem.
- RegenerateCount (number):
- RoundRobinPacketOrdering (bool): This option enables Round Robin Packet Ordering within endpoints across Rx ports.
- RouteMesh (str(fullMesh | oneToOne)): The traffic flow type between each pair of source route endpoint and destination route endpoint.
- SrcDestMesh (str(fullMesh | manyToMany | none | oneToOne)): Select the options to set the traffic mesh type between the Source Endpoint and Destination endpoint.
- State (str): (Read only) A read-only field which indicates the current state of the traffic item.
- Suspend (bool): Suspends all traffic on this stream.
- TrafficItemType (str(application | applicationLibrary | l2L3 | quick)): Helps to configure and edit a traffic item that is sent across Ixia ports.
- TrafficType (str(atm | avb1722 | avbRaw | ethernetVlan | fc | fcoe | frameRelay | hdlc | ipv4 | ipv4ApplicationTraffic | ipv6 | ipv6ApplicationTraffic | ppp | raw)): Helps to select the type of traffic endpoint to be configured.
- TransmitMode (str(interleaved | sequential)): The transmit mode for this traffic item
- TransportLdpPreference (str(one | two)): This attribute is deprecated. Use labelPreferences attribute instead.
- TransportRsvpTePreference (str(one | two)): This attribute is deprecated. Use labelPreferences attribute instead.
- UseControlPlaneFrameSize (bool):
- UseControlPlaneRate (bool):
- Warnings (list(str)): Displays the warnings.
Returns
-------
- self: This instance with matching trafficItem resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of trafficItem data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the trafficItem resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def ConvertToRaw(self):
"""Executes the convertToRaw operation on the server.
Converts a non-raw traffic item to a raw traffic item.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('convertToRaw', payload=payload, response_object=None)
def Duplicate(self, *args, **kwargs):
"""Executes the duplicate operation on the server.
Duplicates a specific traffic item.
duplicate(Arg2=number)
----------------------
- Arg2 (number): The number of times to duplicate the traffic item.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('duplicate', payload=payload, response_object=None)
def DuplicateItems(self):
"""Executes the duplicateItems operation on the server.
Duplicates a list of traffic items.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('duplicateItems', payload=payload, response_object=None)
def Generate(self):
"""Executes the generate operation on the server.
Generate traffic for specific traffic items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('generate', payload=payload, response_object=None)
def PauseStatelessTraffic(self, *args, **kwargs):
"""Executes the pauseStatelessTraffic operation on the server.
Pause or Resume stateless traffic.
pauseStatelessTraffic(Arg2=bool)
--------------------------------
- Arg2 (bool): If true, it will pause running traffic. If false, it will resume previously paused traffic.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pauseStatelessTraffic', payload=payload, response_object=None)
def ResolveAptixiaEndpoints(self):
"""Executes the resolveAptixiaEndpoints operation on the server.
Resolves /vport/protocolStack/. endpoints being used by a specific traffic item.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('resolveAptixiaEndpoints', payload=payload, response_object=None)
def StartDefaultLearning(self):
"""Executes the startDefaultLearning operation on the server.
Starts default learning for a list of traffic items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('startDefaultLearning', payload=payload, response_object=None)
def StartLearning(self, *args, **kwargs):
"""Executes the startLearning operation on the server.
Sends learning frames.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
startLearning(Arg2=number, Arg3=number, Arg4=number)
----------------------------------------------------
- Arg2 (number): The framesize of the learning frame.
- Arg3 (number): The framecount of the learning frames.
- Arg4 (number): The frames per second of the learning frames.
startLearning(Arg2=number, Arg3=number, Arg4=number, Arg5=bool, Arg6=bool, Arg7=bool)
-------------------------------------------------------------------------------------
- Arg2 (number): The framesize of the learning frame.
- Arg3 (number): The framecount of the learning frames.
- Arg4 (number): The frames per second of the learning frames.
- Arg5 (bool): Send gratuitous ARP frames.
- Arg6 (bool): Send MAC frames.
- Arg7 (bool): Send Fast Path frames.
startLearning(Arg2=number, Arg3=number, Arg4=number, Arg5=bool, Arg6=bool, Arg7=bool, Arg8=bool)
------------------------------------------------------------------------------------------------
- Arg2 (number): The framesize of the learning frame.
- Arg3 (number): The framecount of the learning frames.
- Arg4 (number): The frames per second of the learning frames.
- Arg5 (bool): Send gratuitous ARP frames.
- Arg6 (bool): Send MAC frames.
- | |
# -*- coding: utf-8 -*-
from PyQt4 import QtTest
from acq4.devices.OptomechDevice import OptomechDevice
from .FilterWheelTaskTemplate import Ui_Form
from acq4.devices.Microscope import Microscope
from acq4.util.SequenceRunner import SequenceRunner
from acq4.devices.Device import *
from acq4.devices.Device import TaskGui
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
import acq4.util.debug as debug
import acq4.pyqtgraph as pg
import time
from collections import OrderedDict
# Changes:
# signal signatures changed
# Filter is just object, not OptoMech
class FilterWheel(Device, OptomechDevice):
"""Optical filter wheel device for swapping FilterSet devices.
The Filter wheel device class adds control and display for a filter wheel that selects between
many filters or filter sets. Filters must be defined in the configuration prior to the
FilterWheel; see FilterSet for more information.
* Maintains list of the filters in the wheel positions with their description
* Support for selecting a filter wheel position
* Support for filter wheel implementation during task : specific filter wheel position during one task, different positions as task sequence
Configuration examples:
FilterWheel:
driver: 'FilterWheel'
parentDevice: 'Microscope'
slots:
# These are the names of FilterSet devices that have been defined elsewhere
0: "DIC_FilterCube"
1: "EGFP_FilterCube"
2: "EYFP_FilterCube"
"""
sigFilterChanged = QtCore.Signal(object, object) # self, Filter
sigFilterWheelSpeedChanged = QtCore.Signal(object, object) # self, speed
def __init__(self, dm, config, name):
Device.__init__(self, dm, config, name)
self.lock = Mutex(QtCore.QMutex.Recursive)
self._filters = OrderedDict()
self._slotNames = OrderedDict()
self._slotIndicators = OrderedDict()
nPos = self.getPositionCount()
ports = config.get('ports', None)
for k in range(nPos): ## Set value for each filter
slot = config['slots'].get(str(k))
if slot is None:
self._filters[k] = None
self._slotNames[k] = "empty"
continue
if isinstance(slot, str):
# We are only naming this slot; no actual filter device is defined here
self._filters[k] = None
self._slotNames[k] = slot
elif isinstance(slot, dict):
filtname = slot['device']
filt = dm.getDevice(filtname)
self._filters[k] = filt
self._slotNames[k] = slot.get('name', filt.name())
devports = filt.ports()
if ports is None:
ports = devports
elif set(ports) != set(devports):
raise Exception("FilterSet %r does not have the expected ports (%r vs %r)" % (filt, devports, ports))
else:
raise TypeError("Slot definition must be str or dict; got: %r" % slot)
if 'hotkey' in slot:
dev = dm.getDevice(slot['hotkey']['device'])
key = slot['hotkey']['key']
dev.addKeyCallback(key, self._hotkeyPressed, (k,))
self._slotIndicators[k] = (dev, key)
# todo: connect to key
config['ports'] = ports
OptomechDevice.__init__(self, dm, config, name)
self._lastFuture = None
self._lastPosition = None
# polling thread just checks position regularly; this causes sigFilterChanged to be emitted
# whenever a change is detected
pollInterval = config.get('pollInterval', 0.1)
if pollInterval is not None:
self.fwThread = FilterWheelPollThread(self, interval=pollInterval)
self.fwThread.start()
dm.sigAbortAll.connect(self.stop)
if 'initialSlot' in config:
self.setPosition(config['initialSlot'])
def listFilters(self):
"""Return a dict of available filters.
"""
with self.filterWheelLock:
return self._filters.copy()
def slotNames(self):
"""Return a dict of names for each slot in the wheel.
"""
return self._slotNames.copy()
def getFilter(self, position=None):
"""Return the Filter at *position*.
If *position* is None, then return the currently active Filter."""
if position is None:
position = self.getPosition()
return self._filters[position]
def getPositionCount(self):
"""Return the number of filter positions.
The number returned indicates all available positions, regardless of
the presence or absence of a filter in each position.
"""
raise NotImplementedError("Method must be implemented in subclass")
def setPosition(self, pos):
"""Set the filter wheel position and return a FilterWheelFuture instance
that can be used to wait for the move to complete.
"""
with self.lock:
fut = self._lastFuture
if fut is not None and not fut.isDone():
fut.cancel()
self._lastFuture = self._setPosition(pos)
return self._lastFuture
def _setPosition(self, pos):
"""Must be implemented in subclass to request device movement and
return a FilterWheelFuture.
Example::
def _setPosition(self, pos):
self.device.setPosition(pos) # actually ask device to move
return FilterWheelFuture(self, pos)
"""
raise NotImplementedError("Method must be implemented in subclass")
def _hotkeyPressed(self, dev, changes, pos):
self.setPosition(pos)
def getPosition(self):
"""Return the current position of the filter wheel.
"""
pos = self._getPosition()
if pos != self._lastPosition:
self._lastPosition = pos
self._positionChanged(pos)
return pos
def _getPosition(self):
raise NotImplementedError("Method must be implemented in subclass")
def _positionChanged(self, pos):
filt = self.getFilter(pos)
self.setCurrentSubdevice(filt)
self.sigFilterChanged.emit(self, filt)
for k,indicator in self._slotIndicators.items():
dev, key = indicator
if k == pos:
dev.setBacklight(key, blue=1, red=1)
else:
dev.setBacklight(key, blue=0, red=0)
def isMoving(self):
"""Return the current position of the filter wheel.
"""
raise NotImplementedError("Method must be implemented in subclass")
def stop(self):
"""Immediately stop the filter wheel.
"""
self._stop()
with self.lock:
fut = self._lastFuture
if fut is not None:
fut.cancel()
def _stop(self):
raise NotImplementedError("Method must be implemented in subclass")
def setSpeed(self, speed):
raise NotImplementedError("Method must be implemented in subclass")
def getSpeed(self):
raise NotImplementedError("Method must be implemented in subclass")
def speedChanged(self, speed):
"""Sublclasses should call this method when the filterwheel speed has changed.
"""
self.sigSpeedChanged.emit(self, speed)
def createTask(self, cmd, parentTask):
return FilterWheelTask(self, cmd, parentTask)
def taskInterface(self, taskRunner):
return FilterWheelTaskGui(self, taskRunner)
def deviceInterface(self, win):
return FilterWheelDevGui(self)
class FilterWheelFuture(object):
def __init__(self, dev, position):
self.dev = dev
self.position = position
self._wasInterrupted = False
self._done = False
self._error = None
def wasInterrupted(self):
"""Return True if the move was interrupted before completing.
"""
return self._wasInterrupted
def cancel(self):
if self.isDone():
return
self._wasInterrupted = True
self._error = "Filter change was cancelled"
def _atTarget(self):
return self.dev.getPosition() == self.position
def isDone(self):
"""Return True if the move has completed or was interrupted.
"""
if self._wasInterrupted or self._done:
return True
if self.dev.isMoving():
return False
if self._atTarget():
self._done = True
return True
else:
self._wasInterrupted = True
self._error = "Filter wheel did not reach target"
return True
def errorMessage(self):
"""Return a string description of the reason for a move failure,
or None if there was no failure (or if the reason is unknown).
"""
return self._error
def wait(self, timeout=None, updates=False):
"""Block until the move has completed, has been interrupted, or the
specified timeout has elapsed.
If *updates* is True, process Qt events while waiting.
If the move did not complete, raise an exception.
"""
start = ptime.time()
while (timeout is None) or (ptime.time() < start + timeout):
if self.isDone():
break
if updates is True:
QtTest.QTest.qWait(100)
else:
time.sleep(0.1)
if not self.isDone():
err = self.errorMessage()
if err is None:
raise RuntimeError("Timeout waiting for filter wheel change")
else:
raise RuntimeError("Move did not complete: %s" % err)
class FilterWheelTask(DeviceTask):
def __init__(self, dev, cmd, parentTask):
DeviceTask.__init__(self, dev, cmd, parentTask)
self.dev = dev
self.cmd = cmd
self.parentTask = parentTask
#print parentTask
def configure(self):
with self.dev.filterWheelLock:
#self.state = self.dev.getLastState()
requiredPos = int(self.cmd['filterWheelPosition'][0]) # take the first character of string and convert it to int
if self.dev.currentFWPosition != requiredPos:
self.dev.setPosition(requiredPos)
def start(self):
pass
def stop(self, abort):
pass
def isDone(self):
return True
class FilterWheelTaskGui(TaskGui):
def __init__(self, dev, taskRunner):
TaskGui.__init__(self, dev, taskRunner)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.dev = dev
filters = self.dev.listFilters()
filterList = self.generatFilterList(filters)
for i in range(len(filterList)):
item = self.ui.filterCombo.addItem('%s' % filterList[i][1])
item = self.ui.sequenceCombo.addItem('off')
item = self.ui.sequenceCombo.addItem('list')
self.ui.sequenceListEdit.hide()
self.ui.sequenceCombo.currentIndexChanged.connect(self.sequenceChanged)
self.ui.sequenceListEdit.editingFinished.connect(self.sequenceChanged)
## Create state group for saving/restoring state
self.stateGroup = pg.WidgetGroup([
(self.ui.filterCombo,),
(self.ui.sequenceCombo,),
(self.ui.sequenceListEdit,),
])
def generateTask(self, params=None):
state = self.stateGroup.state()
if params is None or 'filterWheelPosition' not in params:
target = state['filterCombo']
else:
target = self.filterTaskList[params['filterWheelPosition']]
task = {}
task['recordState'] = True
task['filterWheelPosition'] = target #state['filterCombo']
return task
def saveState(self, saveItems=False):
state = self.stateGroup.state()
return state
def restoreState(self, state):
self.stateGroup.setState(state)
self.ui.sequenceListEdit.setVisible(state['sequenceCombo'] != 'off')
self.sequenceChanged()
def storeConfiguration(self):
state = self.saveState(saveItems=True)
self.dev.writeConfigFile(state, 'lastConfig')
def loadConfiguration(self):
state = self.dev.readConfigFile('lastConfig')
self.restoreState(state)
def listSequence(self):
if self.ui.sequenceCombo.currentIndex() == 1:
filt = self.getFilterList()
return OrderedDict([('filterWheelPosition', filt)])
else:
return []
def sequenceChanged(self):
self.filterTaskList = None
self.sigSequenceChanged.emit(self.dev.name())
if self.ui.sequenceCombo.currentIndex() == 1:
self.ui.sequenceListEdit.show()
else:
self.ui.sequenceListEdit.hide()
def getFilterList(self):
self.filterTaskList = []
pos = self.ui.sequenceListEdit.text()
if pos == '':
return self.filterTaskList
else:
pos = map( int, pos.split(',') )
for i in range(len(pos)):
self.filterTaskList.append(self.filterList[pos[i]-1])
#print 'filterTaskList :', self.filterTaskList
return self.filterTaskList
def generatFilterList(self, filt):
self.filterList = []
for i in range(len(filt)):
self.filterList.append([(i+1), filt[i].name()])
#print 'filterList : ', self.filterList
return self.filterList
class FilterWheelDevGui(QtGui.QWidget):
def __init__(self, dev):
QtGui.QWidget.__init__(self)
self.dev = dev
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
self.positionBtnLayout = QtGui.QGridLayout()
self.layout.addLayout(self.positionBtnLayout, 0, 0)
self.positionBtnLayout.setContentsMargins(0, 0, 0, 0)
self.positionGroup = QtGui.QButtonGroup()
self.positionButtons = []
cols = 3
slotNames = self.dev.slotNames()
for | |
from __future__ import annotations
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from desdeo_problem.Problem import MOProblem
from desdeo_tools.interaction.request import BaseRequest, SimplePlotRequest
from desdeo_tools.scalarization.ASF import AugmentedGuessASF, MaxOfTwoASF, PointMethodASF, SimpleASF, StomASF
from desdeo_tools.scalarization.Scalarizer import Scalarizer
from desdeo_tools.solver.ScalarSolver import ScalarMethod, ScalarMinimizer
from desdeo_mcdm.interactive.InteractiveMethod import InteractiveMethod
from desdeo_mcdm.utilities.solvers import payoff_table_method
class NimbusException(Exception):
"""Risen when an error related to NIMBUS is encountered.
"""
pass
class NimbusClassificationRequest(BaseRequest):
"""A request to handle the classification of objectives in the synchronous NIMBUS method.
Args:
method (NIMBUS): The instance of the NIMBUS method the request should be initialized for.
ref (np.ndarray): Objective values used as a reference the decision maker is classifying the objectives.
Attributes:
self._valid_classifications (List[str]): The valid classifications. Defaults is ['<', '<=', '=', '>=', '0']
"""
def __init__(self, method: NIMBUS, ref: np.ndarray):
msg = (
"Please classify each of the objective values in one of the following categories:"
"\n\t1. values should improve '<'"
"\n\t2. values should improve until some desired aspiration level is reached '<='"
"\n\t3. values with an acceptable level '='"
"\n\t4. values which may be impaired until some upper bound is reached '>='"
"\n\t5. values which are free to change '0'"
"\nProvide the aspiration levels and upper bounds as a vector. For categories 1, 3, and 5,"
"the value in the vector at the objective's position is ignored. Suppy also the number of maximum"
"solutions to be generated."
)
self._method = method
self._valid_classifications = ["<", "<=", "=", ">=", "0"]
content = {
"message": msg,
"objective_values": ref,
"classifications": [None],
"levels": [None],
"number_of_solutions": 1,
}
super().__init__("classification_preference", "required", content=content)
def validator(self, response: Dict) -> None:
"""Validates a dictionary containing the response of a decision maker. Should contain the keys
'classifications', 'levels', and 'number_of_solutions'.
'classifications' should be a list of strings, where the number of
elements is equal to the number of objectives being classified, and
the elements are found in `_valid_classifications`. 'levels' should
have either aspiration levels or bounds for each objective depending
on that objective's classification. 'number_of_solutions' should be
an integer between 1 and 4 indicating the number of intermediate solutions to be
computed.
Args:
response (Dict): See the documentation for `validator`.
Raises:
NimbusException: Some discrepancy is encountered in the parsing of the response.
"""
if "classifications" not in response:
raise NimbusException("'classifications' entry missing.")
if "levels" not in response:
raise NimbusException("'levels' entry missing.")
if "number_of_solutions" not in response:
raise NimbusException("'number_of_solutions' entry missing.")
# check the classifications
is_valid_cls = map(lambda x: x in self._valid_classifications, response["classifications"],)
if not all(list(is_valid_cls)):
raise NimbusException(f"Invalid classificaiton found in {response['classifications']}")
# check the levels
if len(np.array(response["levels"]).squeeze()) != self._method._problem.n_of_objectives:
raise NimbusException(f"Wrong number of levels supplied in {response['levels']}")
improve_until_inds = np.where(np.array(response["classifications"]) == "<=")[0]
impaire_until_inds = np.where(np.array(response["classifications"]) == ">=")[0]
if len(improve_until_inds) > 0:
# some objectives classified to be improved until some level
if not np.all(
np.array(response["levels"])[improve_until_inds] >= self._method._ideal[improve_until_inds]
) or not np.all(
np.array(response["levels"])[improve_until_inds] <= self._method._nadir[improve_until_inds]
):
raise NimbusException("Given levels must be between the nadir and ideal points!")
if len(impaire_until_inds) > 0:
# some objectives classified to be improved until some level
if not np.all(
np.array(response["levels"])[impaire_until_inds] >= self._method._ideal[impaire_until_inds]
) or not np.all(
np.array(response["levels"])[impaire_until_inds] <= self._method._nadir[impaire_until_inds]
):
raise NimbusException("Given levels must be between the nadir and ideal points!")
# check maximum number of solutions
if response["number_of_solutions"] > 4 or response["number_of_solutions"] < 1:
raise NimbusException("The number of solutions must be between 1 and 4.")
@BaseRequest.response.setter
def response(self, response: Dict):
self.validator(response)
self._response = response
class NimbusSaveRequest(BaseRequest):
"""A request to handle archiving of the solutions computed with NIMBUS.
Args:
solution_vectors (List[np.ndarray]): A list of numpy arrays each representing a decision variable vector.
objective_vectors (List[np.ndarray]): A list of numpy arrays each representing an objective vector.
Note:
The objective vector at position 'i' in `objective_vectors` should correspond to the decision variables at
position 'i' in `solution_vectors`.
"""
def __init__(
self, solution_vectors: List[np.ndarray], objective_vectors: List[np.ndarray],
):
msg = (
"Please specify which solutions shown you would like to save for later viewing. Supply the "
"indices of such solutions as a list, or supply an empty list if none of the shown soulutions "
"should be saved."
)
content = {
"message": msg,
"solutions": solution_vectors,
"objectives": objective_vectors,
"indices": [],
}
super().__init__("classification_preference", "required", content=content)
def validator(self, response: Dict) -> None:
"""Validates a response dictionary. The dictionary should contain the keys 'indices'.
'indices' should be a list of integers representing an index to the
lists `solutions_vectors` and `objective_vectors`.
Args:
response (Dict): See the documentation for `validator`.
Raises:
NimbusException: Some discrepancy is encountered in the parsing of `response`.
"""
if "indices" not in response:
raise NimbusException("'indices' entry missing")
if not response["indices"]:
# nothing to save, continue to next state
return
if len(response["indices"]) > len(self.content["objectives"]) or np.min(response["indices"]) < 0:
# wrong number of indices
raise NimbusException(f"Invalid indices {response['indices']}")
if np.max(response["indices"]) >= len(self.content["objectives"]) or np.min(response["indices"]) < 0:
# out of bounds index
raise NimbusException(f"Incides {response['indices']} out of bounds.")
@BaseRequest.response.setter
def response(self, response: Dict):
self.validator(response)
self._response = response
class NimbusIntermediateSolutionsRequest(BaseRequest):
"""A request to handle the computation of intermediate points between two previously computed points.
Args:
solution_vectors (List[np.ndarray]): A list of numpy arrays each representing a decision variable vector.
objective_vectors (List[np.ndarray]): A list of numpy arrays each representing an objective vector.
Note:
The objective vector at position 'i' in `objective_vectors` should correspond to the decision variables at
position 'i' in `solution_vectors`. Only the two first entries in each of the lists is relevant. The
rest is ignored.
"""
def __init__(
self, solution_vectors: List[np.ndarray], objective_vectors: List[np.ndarray],
):
msg = (
"Would you like to see intermediate solutions between two previusly computed solutions? "
"If so, please supply two indices corresponding to the solutions."
)
content = {
"message": msg,
"solutions": solution_vectors,
"objectives": objective_vectors,
"indices": [],
"number_of_desired_solutions": 0,
}
super().__init__("classification_preference", "required", content=content)
def validator(self, response: Dict):
"""Validates a response dictionary. The dictionary should contain the keys 'indices' and 'number_of_solutions'.
'indices' should be a list of integers representing an index to the
lists `solutions_vectors` and `objective_vectors`. 'number_of_solutions' should be an integer greater or equal
to 1.
Args:
response (Dict): See the documentation for `validator`.
Raises:
NimbusException: Some discrepancy is encountered in the parsing of `response`.
"""
if "indices" not in response:
raise NimbusException("'indices' entry missing.")
if "number_of_desired_solutions" not in response:
raise NimbusException("'number_of_desired_solutions' entry missing.")
if response["number_of_desired_solutions"] < 0:
raise NimbusException(f"Invalid number of desired solutions {response['number_of_desired_solutions']}.")
if not response["indices"] and response["number_of_desired_solutions"] > 0:
raise NimbusException("Indices supplied yet number of desired soutions is greater than zero.")
if response["indices"] and response["number_of_desired_solutions"] == 0:
raise NimbusException("Indices not supplied yet number of desired soutions is zero.")
if not response["indices"]:
return
if np.max(response["indices"]) >= len(self.content["objectives"]) or np.min(response["indices"]) < 0:
# indices out of bounds
raise NimbusException(f"Invalid indices {response['indices']}")
@BaseRequest.response.setter
def response(self, response: Dict):
self.validator(response)
self._response = response
class NimbusMostPreferredRequest(BaseRequest):
"""A request to handle the indication of a preferred point.
Args:
solution_vectors (List[np.ndarray]): A list of numpy arrays each representing a decision variable vector.
objective_vectors (List[np.ndarray]): A list of numpy arrays each representing an objective vector.
Note:
The objective vector at position 'i' in `objective_vectors` should correspond to the decision variables at
position 'i' in `solution_vectors`. Only the two first entries in each of the lists is relevant. The preferred
solution will be selected from `objective_vectors`.
"""
def __init__(
self, solution_vectors: List[np.ndarray], objective_vectors: List[np.ndarray],
):
msg = "Please select your most preferred solution and whether you would like to continue. "
content = {
"message": msg,
"solutions": solution_vectors,
"objectives": objective_vectors,
"index": -1,
"continue": True,
}
super().__init__("classification_preference", "required", content=content)
def validator(self, response: Dict):
"""Validates a response dictionary. The dictionary should contain the keys 'index' and 'continue'.
'index' is an integer and should indicate the index of the preferred solution is `objective_vectors`.
'continue' is a boolean and indicates whether to stop or continue the iteration of Synchronous NIMBUS.
Args:
response (Dict): See the documentation for `validator`.
Raises:
NimbusException: Some discrepancy is encountered in the parsing of `response`.
"""
if "index" not in response:
raise NimbusException(f"'index' entry missing.")
if "continue" not in response:
raise NimbusException(f"'continue' entry missing.")
if not | |
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bz2
import collections
import threading
import time
import zlib
from contextlib import closing
from zope import interface
from relstorage._util import get_memory_usage
from relstorage._util import byte_display
from relstorage._util import timer as _timer
from relstorage._util import log_timed as _log_timed
from relstorage._compat import OidTMap_intersection
from relstorage._compat import OID_TID_MAP_TYPE as OidTMap
from relstorage._compat import iteroiditems
from relstorage.interfaces import Int
from relstorage.cache.interfaces import IStateCache
from relstorage.cache.interfaces import IPersistentCache
from relstorage.cache.interfaces import MAX_TID
from relstorage.cache.interfaces import CacheConsistencyError
from relstorage.cache.lru_cffiring import CFFICache
from relstorage.cache.persistence import sqlite_connect
from relstorage.cache.persistence import sqlite_files
from relstorage.cache.persistence import FAILURE_TO_OPEN_DB_EXCEPTIONS
from relstorage.cache.local_database import Database
logger = __import__('logging').getLogger(__name__)
# pylint:disable=too-many-lines
class ICachedValue(interface.Interface):
"""
Data stored in the cache for a single OID.
This may be a single ``(state, tid)`` pair, or it may be multiple
such pairs, representing evolution of the object.
Memory and time efficiency both matter. These objects do not know
their own OID, just the state and tid.
.. rubric:: Freezing objects
For any given OID, one TID may be frozen. It may then be looked up
without knowing its actual TID (using a TID of ``None``).
This is useful for objects that do
not change. Invalidations of frozen states happen automatically
during the MVCC vacuuming process:
- Freezing happens after a poll, when we have determined that
an object has not changed within the range of transactions
visible to all database viewers. The index entry is removed,
and we begin looking for it at None.
At this time, any older states cached for the object are removed.
By definition, there can be no newer states, so only one state is
accessible. (Of course, if we've just completed a transaction and
not yet polled, then that's not strictly true; there could be
cached data from the future not yet visible to any viewers.)
- If an object previously frozen is changed, we see that in
our index and won't ask for frozen states anymore.
If we then load the new state from the DB, we cache it, leaving it
with two cached states. Older viewers unaware of the change and accessing the database
prior to it, can still use the frozen
revision.
Eventually that index entry reaches the end of its lifetime. If
the object has not changed again, we will freeze it. This will
discard the older frozen value and replace it with a new one. If
it has changed again, we will invalidate the cache for anything
older than that TID, which includes the first frozen state.
The implementation and usage of frozen objects is contained entirely within
this module. Clients are then responsible for making sure that the
returned tid, if any, is within their viewing range.
"""
# pylint:disable=no-self-argument,unexpected-special-method-signature,inherit-non-class
# pylint:disable=arguments-differ
weight = Int(
description=u"""The cost (size) of this cached value.""")
max_tid = Int(
description=u"""The newest TID cached for the object."""
)
newest_value = interface.Attribute(u"The ``(state, tid)`` pair that is the newest.")
def __mod__(tid):
"""
Return the ``(state, tid)`` for the given TID.
A special value of None matches any previously frozen TID.
If no TID matches, returns None.
We use the % operator because ``__getitem__`` was taken.
"""
def __ilshift__(tid):
"""
Mark the given TID, if it exists, as frozen.
Returns a new value to store in the cache. If it returns None,
this entry is removed from the cache (because the TID didn't match,
and must have been older.)
"""
def __iadd__(value):
"""
Add the ``(state, tid)`` (another ICachedValue) to the list of
cached entries. Return the new value to place in the cache.
"""
def __isub__(tid):
"""
Remove the tid, and anything older, from the list of cached entries.
Return the new value to place in the cache. Return None if all values
were removed and the cached entry should be removed.
"""
@interface.implementer(ICachedValue)
class _MultipleValues(list):
__slots__ = ()
frozen = False
def __init__(self, *values):
list.__init__(self, values)
@property
def weight(self):
return sum(
len(value[0])
for value in self
if value[0]
)
@property
def max_tid(self):
return max(x[1] for x in self)
@property
def newest_value(self):
value = (None, -1)
for entry in self:
if entry[1] > value[1]:
value = entry
return value
def __mod__(self, tid):
for entry in self:
entry = entry % tid
if entry is not None:
return entry
def __ilshift__(self, tid):
# If we have the TID, everything else should be older,
# unless we just overwrote and haven't made the transaction visible yet.
# By (almost) definition, nothing newer, but if there is, we shouldn't
# drop it.
# So this works like invalidation: drop everything older than the
# tid; if we still have anything left, find and freeze the tid;
# if that's the *only* thing left, return that, otherwise return ourself.
to_save = [v for v in self if v[1] >= tid]
if not to_save:
return None
if len(to_save) == 1:
# One item, either it or not
result = to_save[0]
result <<= tid
return result
# Multiple items, possibly in the future.
self[:] = to_save
for i, entry in enumerate(self):
if entry[1] == tid:
self[i] <<= tid
break
return self
def __iadd__(self, value):
self.append(value)
return self
def __isub__(self, tid):
to_save = [v for v in self if v[1] > tid]
if not to_save:
del self[:]
return None
if len(to_save) == 1:
return to_save[0]
self[:] = to_save
return self
@interface.implementer(ICachedValue)
class _SingleValue(collections.namedtuple('_ValueBase', ('state_pickle', 'tid'))):
__slots__ = ()
# TODO: Maybe we should represent simple values as just byte strings;
# the key will match the TID in that case.
frozen = False
@property
def max_tid(self):
return self[1]
@property
def newest_value(self):
return self
@property
def weight(self):
return len(self[0]) if self[0] else 0
def __mod__(self, tid):
if tid == self[1]:
return self
def __ilshift__(self, tid):
# We could be newer
if self[1] > tid:
return self
if tid == self[1]:
return _FrozenValue(*self)
# if we're older, fall off the end and discard.
def __iadd__(self, value):
if value == self:
return value # Let us become frozen if desired.
if value[1] == self[1] and value[0] != self[0]:
raise CacheConsistencyError(
"Detected two different values for same TID",
self,
value
)
return _MultipleValues(self, value)
def __isub__(self, tid):
if tid <= self[1]:
return None
return self
class _FrozenValue(_SingleValue):
__slots__ = ()
frozen = True
def __mod__(self, tid):
if tid in (None, self[1]):
return self
def __ilshift__(self, tid):
# This method can get called if two different transaction views
# tried to load an object at the same time and store it in the cache.
if tid == self[1]:
return self
@interface.implementer(IStateCache,
IPersistentCache)
class LocalClient(object):
# pylint:disable=too-many-public-methods,too-many-instance-attributes
# Use the same markers as zc.zlibstorage (well, one marker)
# to automatically avoid double-compression
_compression_markers = {
'zlib': (b'.z', zlib.compress),
'bz2': (b'.b', bz2.compress),
'none': (None, None)
}
_decompression_functions = {
b'.z': zlib.decompress,
b'.b': bz2.decompress
}
# What multiplier of the number of items in the cache do we apply
# to determine when to age the frequencies?
_age_factor = 10
# When did we last age?
_aged_at = 0
_next_age_at = 1000
_hits = 0
_misses = 0
_sets = 0
_cache = None
_cache_type = CFFICache
# Things copied from self._cache
_peek = None
_cache_mru = None
def __init__(self, options,
prefix=None):
self._lock = threading.Lock()
self.options = options
self.prefix = prefix or ''
# XXX: The calc for limit is substantially smaller
# The real MB value is 1024 * 1024 = 1048576
self.limit = int(1000000 * options.cache_local_mb)
self._value_limit = options.cache_local_object_max
# The underlying data storage. It maps ``{oid: value}``,
# where ``value`` is an :class:`ICachedValue`.
#
# Keying off of OID directly | |
elif event.key == 'e':
self.expsub()
elif event.key == 'P': # PCA
self.plotscan(self.scannum,data=efuncs(self.plane),flag=False,logscale=True)
self.PCAflag = True
elif event.key == 'q':
self.close()
elif event.key == 'Q':
self.close(write=False)
elif event.key == '.':
self.maparrow(round(event.xdata),round(event.ydata))
elif event.key == 'f':
self.footprint(round(event.xdata),round(event.ydata))
elif event.key == 'F':
self.footprint(round(event.xdata),round(event.ydata),scatter=True)
elif event.key == 'R': # reverse order of boxes
self.rectangles[self.scannum].reverse()
elif event.key == 'r': # redraw
self.plotscan(self.scannum)
elif event.key == 'M': # flag highest point
self.flags[self.scannum,:,:].flat[self.plane.argmax()] += 1
self.plane.flat[self.plane.argmax()] = 0
elif event.key == 'm': # flag lowest point
self.flags[self.scannum,:,:].flat[self.plane.argmin()] += 1
self.plane.flat[self.plane.argmin()] = 0
elif event.key == 'd':
self.flag_box(self.x1,self.y1,self.x2,self.y2,'d')
elif event.key == 't' or event.key == 'T':
if self._lastkey == 't' or self._lastkey == 'T':
self._y2 = numpy.ceil(event.ydata)
if event.key == 'T':
self.unflag_times(self._y1,self._y2)
elif event.key =='t':
self.flag_times(self._y1,self._y2)
self._lastkey = None
set_lastkey = False
else:
self._y1 = numpy.floor(event.ydata)
elif event.key == 's' or event.key == 'w': # "whole" scan
self.flags[self.scannum,:,:] += 1
elif event.key == 'S' or event.key == 'W':
self.flags[self.scannum,:,:] -= (self.flags[self.scannum,:,:] > 0)
elif event.key == 'b':
self.flag_bolo(event.xdata,event.key)
elif event.key == 'B':
self.unflag_bolo(event.xdata,event.key)
elif event.key == 'c':
self.toggle_currentscan()
elif event.key == 'C':
self.plot_column(event.xdata)
elif event.key == 'L':
self.plot_line(event.ydata)
elif event.key == 'z':
self.powerspec()
elif event.key == 'Z':
self.powerspec_whole(event.xdata)
elif event.key == 'j':
self.timestream_whole(event.xdata)
elif event.key == 'a':
if self._lastkey == 'a':
self._y2 = round(event.ydata)
self.skymovie = self.footmovie(self._y1,self._y2)
self._lastkey = None
set_lastkey = False
else:
self._y1 = round(event.ydata)
self.footprint(round(event.xdata),round(event.ydata),scatter=True)
elif event.key == 'o':
self.bolomap(event.xdata)
elif event.key == 'v':
x,y = round(event.xdata),round(event.ydata)
vpt = self.data[self.scannum,y,x]
fpt = self.flags[self.scannum,y,x]
xmap = self.tstomap[self.scannum,y,x] / self.map.shape[1]
ymap = self.tstomap[self.scannum,y,x] % self.map.shape[1]
print "Value at %i,%i: %f Flagged=%i Maps to: %i,%i" % (x,y,vpt,fpt,xmap,ymap)
elif event.key == '?':
print self.help
if set_lastkey:
self._lastkey = event.key
def find_all_points(self,x,y):
mappoint = y * self.map.shape[1] + x
self.timepoints = nonzero(self.tstomap == mappoint)
wtavg = (self.mapped_timestream[self.timepoints]*self.weight[self.timepoints]).sum() / self.weight[self.timepoints].sum()
# not a real thing wtsclavg = (self.mapped_timestream[self.timepoints]*self.weight[self.timepoints]*self.scalearr[self.timepoints]).sum() / (self.weight[self.timepoints]*self.scalearr[self.timepoints]).sum()
uwtavg = self.mapped_timestream[self.timepoints].mean()
medavg = median(self.mapped_timestream[self.timepoints])
Hmad = MAD(self.mapped_timestream[self.timepoints])
Hstd = std(self.mapped_timestream[self.timepoints])
print ""
print "Location: %i,%i" % (x,y)
print "Map value: %f Weighted average: %f Unweighted Average: %f Median: %f" % (self.map[y,x],wtavg,uwtavg,medavg)
print "MAD: %f StdDev: %f" % (Hmad,Hstd)
print "scan,bolo,time: %12s%12s%12s%12s%12s%12s%12s%12s" % ('mapped','mapped_astr','astro','noise','residual','flags','weight','scale')
for ii,jj,kk in transpose(self.timepoints):
if self.flags[ii,jj,kk]:
print "%4i,%4i,%4i: %12s%12s%12s%12f%12s%12i%12s%12s" % (ii,kk,jj,"","","",self.noisemap[y,x],"",self.flags[ii,jj,kk],"","")
else:
print "%4i,%4i,%4i: %12f%12f%12f%12f%12f%12i%12f%12f" % (ii,kk,jj,
self.mapped_timestream[ii,jj,kk],
self.mapped_astrosignal[ii,jj,kk],
self.astrosignal[ii,jj,kk],
self.noisemap[y,x],
self.noise[ii,jj,kk],
self.flags[ii,jj,kk],
self.weight[ii,jj,kk],
self.scalearr[ii,jj,kk])
def expsub(self,piecewise=False):
for ii in xrange(self.nbolos):
if hasattr(self.plane,'mask'):
if self.plane.mask[:,ii].sum() < self.plane.shape[0] - 2:
self.plane[:,ii] = expsub_line(self.plane[:,ii], piecewise=piecewise)
else:
self.plane[:,ii] = expsub_line(self.plane[:,ii], piecewise=piecewise)
self.plotscan(self.scannum, data=self.plane, flag=False)
def polysub(self):
for ii in xrange(self.nbolos):
if hasattr(self.plane,'mask'):
if self.plane.mask[:,ii].sum() < self.plane.shape[0] - 2:
self.plane[:,ii] = polysub_line(self.plane[:,ii])
else:
self.plane[:,ii] = polysub_line(self.plane[:,ii])
self.plotscan(self.scannum, data=self.plane, flag=False)
def hist_all_points(self,x,y,clear=True,timestream='mapped_timestream'):
mappoint = y * self.map.shape[1] + x
self.timepoints = nonzero(self.tstomap == mappoint)
if timestream in self.tsplot_dict.keys():
TS = self.lookup(timestream) #tsplot_dict[timestream]()
else:
raise KeyError("Timestream %s is not valid" % (timestream))
wtavg = (TS[self.timepoints]*self.weight[self.timepoints]).sum() / self.weight[self.timepoints].sum()
uwtavg = TS[self.timepoints].mean()
medavg = median(TS[self.timepoints])
Hmad = MAD(TS[self.timepoints])
Hstd = std(TS[self.timepoints])
datapts = TS[self.tstomap==mappoint]
self.plotfig=figure(4)
self.plotfig.clear()
OK = asarray(datapts == datapts)
if hasattr(datapts,'mask'):
OK *= (datapts.mask == False)
n,bins,patches = hist(asarray(datapts[OK]),histtype='step',color='k',linewidth=2)
vlines(wtavg,0,max(n),color='k',linestyles=':',label="Weighted: %0.4g $\\pm$ %0.4g" % (wtavg,Hstd))
#vlines(wtavg,0,max(n),color='k',linestyles=':',label="Std: %0.4g" % Hstd)
fill_betweenx([0,max(n)],[wtavg-Hstd]*2,[wtavg+Hstd]*2,color='k',alpha=0.1,label="Std: %0.4g" % Hstd)
vlines(uwtavg,0,max(n),color='b',linestyles='-.',label="Unweighted: %0.4g" % uwtavg)
vlines(medavg,0,max(n),color='g',linestyles='--',label="Median: %0.4g $\\pm$ %0.4g" % (medavg,Hmad))
fill_betweenx([0,max(n)],[medavg-Hmad]*2,[medavg+Hmad]*2,color='g',alpha=0.1,label="MAD: %0.4g" % Hmad)
vlines(self.model[y,x],0,max(n),color='purple',linestyles='--',label="Model: %0.4g" % Hmad)
Ctemp = matplotlib.collections.CircleCollection([0],facecolors='k',edgecolors='k')
Ctemp.set_label('Map Value: %0.4g' % (self.map[y,x]))
self.plotfig.axes[0].add_collection(Ctemp)
L=legend(loc='best')
L.draggable(True)
title("%s pixel %i,%i" % (self.filename,x,y))
xlabel('Flux (Jy or Volts)')
def tsarrow(self,x,y):
if self.debug: print "tsarrow at %f,%f" % (x,y)
# xy = [clickX,clickY]
# this took a little thinking:
# the Y axis has HUGE variation, X has small....
mappoint = y * self.map.shape[1] + x
self.timepoints = nonzero(self.tstomap == mappoint)
matchpts = list(nonzero(self.timepoints[0] == self.scannum))[0]
# print mappoint,clickX,clickY,self.timepoints,outer(xy,self.map.shape)
# for i in outer(xy,self.map.shape).ravel():
# print i," : ",nonzero(self.tstomap==mappoint)
# print matchpts,mappoint,self.timepoints
if self.connected:
for a in self.arrows:
a.set_visible(False)
for a in self.arrows:
self.arrows.remove(a)
for i in list(matchpts):
if self.debug: print "i shape: ",i.shape, " matchpts ",matchpts
i = int(i)
t,b = self.timepoints[1][i],self.timepoints[2][i]
# print "T,b,i ",t,b,i
# print "Does t = []?",t == []
# print "Is t >= 0?",t >= 0
# arrow = FancyArrow(t-5,b-5,5,5)
# self.datafig.axes[0].add_patch(arrow)
figure(self.fignum)
ax = self.datafig.axes[0]
# redundant? self.datafig.sca(self.datafig.axes[0])
#arrow = self.datafig.axes[0].arrow(t-5,b-5,5,5)
a1 = ax.arrow(b-3,t-3,6,6,head_width=0,facecolor='black')
a2 = ax.arrow(b-3,t+3,6,-6,head_width=0,facecolor='black')
a1.set_visible(True)
a2.set_visible(True)
# print a,t,b
self.arrows.append(a1)
self.arrows.append(a2)
self._refresh()
def maparrow(self,tsx,tsy):
# scanpoint = self.scannum*self.flags.shape[1]*self.flags.shape[2]\
# + y*self.flags.shape[0] + x
# print tsx,tsy
mappoint = self.tstomap[self.scannum,tsy,tsx]
x,y = mappoint / self.map.shape[1],mappoint % self.map.shape[1]
for a in self.maparrows:
a.set_visible(False)
for a in self.maparrows:
self.maparrows.remove(a)
figure(0)
ax = self.mapfig.axes[0]
a1 = ax.arrow(y+2,x+2,-4,-4,head_width=0,facecolor='black',
length_includes_head=True,head_starts_at_zero=False)
a2 = ax.arrow(y-2,x+2,4,-4,head_width=0,facecolor='black',
length_includes_head=True,head_starts_at_zero=False)
a1.set_visible(True)
a2.set_visible(True)
self.maparrows.append(a1)
self.maparrows.append(a2)
self._refresh()
def toggle_currentscan(self):
if self.currentscan == 0:
xarr = self.tstomap[self.scannum,:,:] / self.map.shape[1]
yarr = self.tstomap[self.scannum,:,:] % self.map.shape[1]
x0,x1 = xarr.min(),xarr.max()
y0,y1 = yarr.min(),yarr.max()
self.mapfig.axes[0].axis([y0,y1,x0,x1])
self.currentscan = 1
self.mapcursor=Cursor(gca(),useblit=True,color='black',linewidth=1)
elif self.currentscan == 1:
self.mapfig.axes[0].axis([0,self.map.shape[1],0,self.map.shape[0]])
self.currentscan = 0
self.mapcursor=Cursor(gca(),useblit=True,color='black',linewidth=1)
def showrects(self):
ax = gca()
for p in self.rectangles[self.scannum]:
p.set_transform(ax.transData)
ax.add_patch(p)
def showlines(self):
ax = gca()
for l in self.lines[self.scannum]:
l.set_transform(ax.transData)
ax.add_line(l)
def reset(self):
""" Reset flags after the update function is called.
Mouse is tracked separately.
"""
self.limits_changed = 0
self.got_draw = False
def mouse_up_event(self, event):
if event.inaxes is None: return
self.mouse_up = True
self.x2 = event.xdata
self.y2 = event.ydata
self.event = event
tb = get_current_fig_manager().toolbar
if tb.mode=='' and not self.PCAflag:
self.flag_box(self.x1,self.y1,self.x2,self.y2,event.button)
# if abs(self.x2-self.x1) > 1 or abs(self.y2-self.y1) > 1:
# else:
# self.flagpoint(self.x1,self.y1,event.button)
def mouse_down_event(self, event):
if event.inaxes is None: return
self.mouse_up = False
self.x1 = event.xdata
self.y1 = event.ydata
def powerspec(self):
self.powerspectra = real(fft(masktozero(self.plane),axis=0) * conj(fft(masktozero(self.plane),axis=0)))
self.plotscan(self.scannum,data=self.powerspectra,flag=False,logscale=True)
ylabel('Frequency')
self.powerspec_plotted = True
def powerspec_whole(self, bolonum=0, recompute=False, timestream='data',
clear=True, fignum=4, logx=False, logy=True, color='k', **kwargs):
if self.powerspectra_whole is None or recompute:
if timestream == 'data':
wholedata = reshape(self.data,[self.data.shape[0]*self.data.shape[1],self.data.shape[2]])
elif self.tsplot_dict.has_key(timestream):
data = self.lookup(timestream) #tsplot_dict[timestream]()
wholedata = reshape(data,[data.shape[0]*data.shape[1],data.shape[2]])
else:
raise KeyError("Timestream %s is not valid." % timestream)
if hasattr(wholedata,'data'): wholedata = wholedata.data
wholedata[wholedata!=wholedata] = 0
self.powerspectra_whole = real(fft(wholedata,axis=0) * conj(fft(wholedata,axis=0)))
datashape = self.powerspectra_whole.shape[0]
self.plotfig=figure(fignum)
if clear: self.plotfig.clear()
if logy:
if logx:
plotcmd = loglog
else:
plotcmd = semilogy
else:
plotcmd = plot
plotcmd(fftfreq(datashape,d=self.sample_interval)[0:datashape/2],
self.powerspectra_whole[0:datashape/2,bolonum],
linewidth=0.5,color=color, **kwargs)
xlabel("Frequency (Hz)")
def atmo_to_astro_power(self, recompute=False, clear=True, fignum=4,
logx=False, logy=True, color='k', atmo='ac_bolos', **kwargs):
"""
Plot ratio of atmospheric to astrophysical power spectrum
"""
if recompute or 'powerspectra_whole_astro' not in self.__dict__:
data = self.lookup('astrosignal').filled(0)
wholedata = reshape(data,[data.shape[0]*data.shape[1],data.shape[2]])
wholedata[wholedata!=wholedata] = 0
self.powerspectra_whole_astro = real(fft(wholedata,axis=0) * conj(fft(wholedata,axis=0)))
data = self.lookup(atmo).filled(0)
wholedata = reshape(data,[data.shape[0]*data.shape[1],data.shape[2]])
wholedata[wholedata!=wholedata] = 0
self.powerspectra_whole_atmo = real(fft(wholedata,axis=0) * conj(fft(wholedata,axis=0)))
datashape = self.powerspectra_whole_astro.shape[0]
self.plotfig=figure(fignum)
if clear: self.plotfig.clear()
if logy:
if logx:
plotcmd = loglog
else:
plotcmd = semilogy
else:
plotcmd = plot
plotcmd(fftfreq(datashape,d=self.sample_interval)[0:datashape/2],
self.powerspectra_whole_astro.mean(axis=1)[0:datashape/2] / self.powerspectra_whole_atmo.mean(axis=1)[0:datashape/2],
linewidth=0.5,color=color, **kwargs)
xlabel("Frequency (Hz)")
def broken_powerfit(self, bolonum=0, plbreak=2, doplot=True, logx=True,
psd=True, replotspec=True, defaultplot=False, p0in=None, p1in=None,
p2in=None, **kwargs):
"""
psd : bool
Divide Y-axis by frequency when plotting to turn power spectrum into
power spectral density?
"""
if replotspec or (self.powerspectra_whole is None):
self.powerspec_whole(bolonum=bolonum,logx=True,**kwargs)
datashape = self.powerspectra_whole.shape[0]
xfreq = fftfreq(datashape,d=self.sample_interval)[1:datashape/2]
powerspectra_half = self.powerspectra_whole[1:datashape/2,bolonum]
p0 = polyfit(log10(xfreq[(xfreq<0.02)]),log10(powerspectra_half[(xfreq<0.02)]),1)
p1 = polyfit(log10(xfreq[(xfreq<plbreak)*(xfreq>0.02)]),log10(powerspectra_half[(xfreq<plbreak)*(xfreq>0.02)]),1)
p2 = polyfit(log10(xfreq[xfreq>=plbreak]),log10(powerspectra_half[xfreq>=plbreak]),1)
# renormalize so that the high-frequency matches the low
p2nought = p2[1]
p2[1] = log10(10**p1[1]*(plbreak**(p1[0]-p2[0])))
def f(x,p):
if psd:
return 10**(p[1])*x**(p[0]) * x
else:
return 10**(p[1])*x**(p[0])
if None not in (p1in,p2in,p0in):
plot(xfreq[xfreq<0.02],f(xfreq[xfreq<0.02],p0in),
color='r',
label="$10^{%0.3f} \\nu^{%0.3f}$" % (p0in[1],p0in[0]+psd),
linewidth=3,
alpha=0.5,
linestyle="-")
d, = plot(xfreq[(xfreq<plbreak)*(xfreq>=0.02)],
f(xfreq[(xfreq<plbreak)*(xfreq>=0.02)],p1in),
color='r',
label="$10^{%0.3f} \\nu^{%0.3f}$" % (p1in[1],p1in[0]+psd),
linestyle="--",
linewidth=5,
alpha=0.5,
)
d.set_dashes([12,12]) # make dot length > dot width
d,= plot(xfreq[xfreq>=plbreak],f(xfreq[xfreq>=plbreak],p2in),
color='r',
label="$10^{%0.3f} \\nu^{%0.3f}$" % (p2in[1],p2in[0]+psd),
linestyle=":",
linewidth=5,
alpha=0.5,
)
d.set_dashes([5,5]) # make dot length = dot width
if doplot:
P = plot(xfreq[xfreq<0.02],f(xfreq[xfreq<0.02],p0),
label="$10^{%0.3f} \\nu^{%0.3f}$" % (p0[1],p0[0]+psd),
)[0]
plot(xfreq[(xfreq<plbreak)*(xfreq>=0.02)],
f(xfreq[(xfreq<plbreak)*(xfreq>=0.02)],p1),
color=P.get_color(),
label="$10^{%0.3f} \\nu^{%0.3f}$" % (p1[1],p1[0]+psd),
linewidth=3,
alpha=0.5)
plot(xfreq[xfreq>=plbreak],f(xfreq[xfreq>=plbreak],p2),
color=P.get_color(),
label="$10^{%0.3f} \\nu^{%0.3f}$" % (p2[1],p2[0]+psd),
linewidth=3,
alpha=0.5,
)
print "Best powerlaw fit: P = 10^%0.3f freq^%0.3f { freq < %0.2f" % (p1[1],p1[0],plbreak)
print " 10^%0.3f freq^%0.3f { freq >= %0.2f" % (p2[1],p2[0],plbreak)
print " 10^%0.3f freq^%0.3f { freq < %0.2f" % (p0[1],p0[0],0.02)
print "Reminder: the high-frequency end is forced to meet the low-freqency. Scale was originally 10^%0.3f" % (p2nought)
noise_scale = (self.powerspectra_whole[1:datashape/2,0]/f(xfreq,p2)*(xfreq>=2.5))[(9>xfreq)*(xfreq>=2.5)].std()
print "The Gaussian stddev should be %f and the mean should be 1" | |
None , None ] )
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
oOo00Oo0o00oo = struct . unpack ( oOo0ooO0O0oo , packet [ : OO00OO ] ) [ 0 ]
packet = packet [ OO00OO : : ]
o00OOo00 -= OO00OO
ii1I1 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
ii1I1 . afi = socket . ntohs ( oOo00Oo0o00oo )
ii1I1 . mask_len = OOOoo0O0
ii1I1 . instance_id = self . instance_id
iII = self . addr_length ( )
if ( o00OOo00 < iII ) : return ( [ None , None ] )
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
packet = ii1I1 . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
return ( [ packet , ii1I1 ] )
if 18 - 18: OoooooooOO + ooOoO0o * OoOoOO00 - OoO0O00
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
def lcaf_decode_eid ( self , packet ) :
oOo0ooO0O0oo = "BBB"
OO00OO = struct . calcsize ( oOo0ooO0O0oo )
if ( len ( packet ) < OO00OO ) : return ( [ None , None ] )
if 74 - 74: OoO0O00 - II111iiii - ooOoO0o % i1IIi
if 42 - 42: i11iIiiIii / O0
if 8 - 8: I1Ii111
if 51 - 51: i11iIiiIii
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
O0ooO , oOo0ooo00OoO , OOo000OOoOO = struct . unpack ( oOo0ooO0O0oo ,
packet [ : OO00OO ] )
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
if ( OOo000OOoOO == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( OOo000OOoOO == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , ii1I1 = self . lcaf_decode_sg ( packet )
return ( [ packet , ii1I1 ] )
elif ( OOo000OOoOO == LISP_LCAF_GEO_COORD_TYPE ) :
oOo0ooO0O0oo = "BBBBH"
OO00OO = struct . calcsize ( oOo0ooO0O0oo )
if ( len ( packet ) < OO00OO ) : return ( None )
if 8 - 8: i11iIiiIii * OoOoOO00 . o0oOOo0O0Ooo
oO0OO0o0oo0o , oOo0ooo00OoO , OOo000OOoOO , ooooOo00O , ii1iII1i1iiIi = struct . unpack ( oOo0ooO0O0oo , packet [ : OO00OO ] )
if 27 - 27: I1ii11iIi11i + Ii1I % I1Ii111
if 20 - 20: Oo0Ooo
if ( OOo000OOoOO != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
ii1iII1i1iiIi = socket . ntohs ( ii1iII1i1iiIi )
packet = packet [ OO00OO : : ]
if ( ii1iII1i1iiIi > len ( packet ) ) : return ( None )
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
oO0o0oO0O = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = oO0o0oO0O
packet = oO0o0oO0O . decode_geo ( packet , ii1iII1i1iiIi , ooooOo00O )
self . mask_len = self . host_mask_len ( )
if 84 - 84: OOooOOo
return ( [ packet , None ] )
if 68 - 68: I1Ii111
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
if 54 - 54: oO0o + I11i - OoO0O00
if 86 - 86: OoooooooOO
if 51 - 51: i11iIiiIii
if 91 - 91: OOooOOo
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 22 - 22: OoooooooOO + OoOoOO00 - Ii1I . iII111i / OoooooooOO / I1IiiI
if 73 - 73: i1IIi - Ii1I + oO0o * iIii1I11I1II1
def copy_elp_node ( self ) :
ii1iIiIIiIIii = lisp_elp_node ( )
ii1iIiIIiIIii . copy_address ( self . address )
ii1iIiIIiIIii . probe = self . probe
ii1iIiIIiIIii . strict = self . strict
ii1iIiIIiIIii . eid = self . eid
ii1iIiIIiIIii . we_are_last = self . we_are_last
return ( ii1iIiIIiIIii )
if 100 - 100: i11iIiiIii / iIii1I11I1II1 + Oo0Ooo + OoO0O00 - iII111i
if 8 - 8: i11iIiiIii . O0 + o0oOOo0O0Ooo * oO0o + II111iiii
if 61 - 61: ooOoO0o / ooOoO0o
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 51 - 51: iIii1I11I1II1 / oO0o * I1Ii111 + i1IIi
if 96 - 96: Oo0Ooo + oO0o - Oo0Ooo - OoOoOO00 % OOooOOo . iIii1I11I1II1
def copy_elp ( self ) :
iI1ii1I1i = lisp_elp ( self . elp_name )
iI1ii1I1i . use_elp_node = self . use_elp_node
iI1ii1I1i . we_are_last = self . we_are_last
for ii1iIiIIiIIii in self . elp_nodes :
iI1ii1I1i . elp_nodes . append ( ii1iIiIIiIIii . copy_elp_node ( ) )
if 93 - 93: iIii1I11I1II1 % OoooooooOO
return ( iI1ii1I1i )
if 6 - 6: II111iiii / oO0o - OOooOOo . O0 - o0oOOo0O0Ooo
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
def print_elp ( self , want_marker ) :
II1iIiiI = ""
for ii1iIiIIiIIii in self . elp_nodes :
O0Oo = ""
if ( want_marker ) :
if ( ii1iIiIIiIIii == self . use_elp_node ) :
O0Oo = "*"
elif ( ii1iIiIIiIIii . we_are_last ) :
O0Oo = "x"
if 5 - 5: I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
II1iIiiI += "{}{}({}{}{}), " . format ( O0Oo ,
ii1iIiIIiIIii . address . print_address_no_iid ( ) ,
"r" if ii1iIiIIiIIii . eid else "R" , "P" if ii1iIiIIiIIii . probe else "p" ,
"S" if ii1iIiIIiIIii . strict else "s" )
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
return ( II1iIiiI [ 0 : - 2 ] if II1iIiiI != "" else "" )
if 87 - 87: iII111i
if 86 - 86: IiII - I11i
def select_elp_node ( self ) :
ooOoOo0 , oO0oOOOOOOoOO , O0OoO0o = lisp_myrlocs
iI11I = None
if 26 - 26: I1ii11iIi11i / Oo0Ooo
for ii1iIiIIiIIii in self . elp_nodes :
if ( ooOoOo0 and ii1iIiIIiIIii . address . is_exact_match ( ooOoOo0 ) ) :
iI11I = self . elp_nodes . index ( ii1iIiIIiIIii )
break
if 28 - 28: OoO0O00 / I1ii11iIi11i % OOooOOo % I1IiiI + Ii1I
if ( oO0oOOOOOOoOO and ii1iIiIIiIIii . address . is_exact_match ( oO0oOOOOOOoOO ) ) :
iI11I = self . elp_nodes . index ( ii1iIiIIiIIii )
break
if 6 - 6: o0oOOo0O0Ooo % OOooOOo
if 71 - 71: oO0o + II111iiii * O0 / i11iIiiIii * o0oOOo0O0Ooo
if 85 - 85: o0oOOo0O0Ooo - I1Ii111
if 90 - 90: OoO0O00 * I1Ii111 * iII111i * Ii1I + OoOoOO00 / iII111i
if 63 - 63: o0oOOo0O0Ooo * I1Ii111
if 9 - 9: ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
if ( iI11I == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
ii1iIiIIiIIii . we_are_last = False
return
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if 10 - 10: OOooOOo * OoooooooOO
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
if ( self . | |
from sql server
def get_availability(con):
availability_db = pandas.read_sql('SELECT * FROM "availability"',con,index_col=None)
return availability_db
# read in the availability view
print("Reading in availability view")
availdb = get_availability(con)
# use for fake data / demo purposes to make processing quicker for development and debugging
# '''
print('NOW shortening availability view')
availdb = availdb.head(100)
# '''
# Helper Function for availability ratio plot that returns the number of availability periods in a given hour from availability view
def avail_dev_in_hour(hour,pd_df):
start_time = [time for time in pd_df['start_time']]
end_time = [time for time in pd_df['end_time']]
hr = hour
count = 0
for i in range(len(end_time)): # count all observations falling within the specified hour interval
t_s = start_time[i]
t_e = end_time[i]
if numpy.isnan(t_e): # no end time
break
if numpy.isnan(t_s): # no start time
break
if datetime.datetime.utcfromtimestamp(t_s).hour==hr: # starting hour is during interval
count = count + 1
elif datetime.datetime.utcfromtimestamp(t_s).hour<hr and datetime.datetime.utcfromtimestamp(t_e).hour>hr: # starting hour before interval ends after interval
count = count + 1
elif datetime.datetime.utcfromtimestamp(t_e).hour==hr: # ending hour is in interval
count = count + 1
else:
None
return count
print("Generating availability ratio plot...")
# returns an availability per dev ratio plot comparing all provider to a standard modular:
def plot_availability_ratios(availdb):
traces = [] # to store ratio traces for each company
for co in availdb['company_name'].unique():
# extract availability view rows
co_avail = availdb.loc[availdb['company_name'] == co]
num_avail = len(co_avail['device_id'].unique())
# compute unique device count for deployed vehicles per company
tot_dev_per_24hour = [num_avail] * 24
tot_dev_avail_per_24hour = []
for i in range(0,24,1):
coavail = avail_dev_in_hour(i,co_avail)
tot_dev_avail_per_24hour.append(coavail)
# ensure not dividing by 0 in ratio of avail dev / tot dev
co_zeros = []
for i in range(len(tot_dev_avail_per_24hour)):
if tot_dev_per_24hour[i] == 0:
co_zeros.append(i) # track loc of zeros
for i in co_zeros:
tot_dev_per_24hour[i] = 0.01
co_avail_ratio = [float( tot_dev_avail_per_24hour[i] ) / tot_dev_per_24hour[i] for i in range(24)]# num avail per num devices
trace = go.Scatter(
x = [x for x in range(0,24,1)],
y = co_avail_ratio,
name = '{} Availability Ratio'.format(co)
)
traces.append(trace) # add a trace for each company
# define required standard trace for companies to be compared to
trace1 = go.Scatter(
x = [ x for x in range(0,24,1) ],
y = [2] * 24, # for real data, can adjust this '[2]' to be the required standard ratio of availability per device per hour
mode = 'lines',
name = 'Required Standard',
line = dict(
color = 'red',
width = 4,
dash = 'dash')
)
traces.append(trace1)
data = traces
layout = dict(title = 'Availabilities Per Device',
xaxis = dict(title = 'Hour of Day'),
yaxis = dict(title = 'Availabilities Per Device'),
)
avail_per_dev_fig = go.Figure(data = data, layout = layout)
return avail_per_dev_fig
avail_per_dev_fig = plot_availability_ratios(availdb)
# Function returns double bar chart of the numer of trips starting and ending in each council district
# tdb: trips dataframs
def plot_cd_start_and_ends(tdb):
co_end_points = []
co_start_points = []
# use 16 x 16 cd_array's row sum and column sum properties to calculate trips entering and leaving each cd
def num_trips_leaving_cd(cdnum):
sum = 0
for i in range(15):
sum = sum + cd_array[cdnum-1][i]
return sum
def num_trips_entering_cd(cdnum):
sum = 0
for i in range(15):
sum = sum + cd_array[i][cdnum-1]
return sum
total_cd_starts = [num_trips_leaving_cd(i) for i in range(1,16)]
total_cd_ends = [num_trips_entering_cd(i) for i in range(1,16)]
trace = go.Bar(
y = total_cd_starts,
x = [x for x in range(1,16,1)],
name="trip starts",
)
trace2 = go.Bar(y= total_cd_ends, x = [x for x in range(1,16,1)],name = "trip ends",
)
data= [trace,trace2]
layout = go.Layout(
barmode='group',
title="Number of Trips Starting and Ending Per Council District",
yaxis={"title":"Counts"},
xaxis={"title":"Council District"},
)
trip_starts_v_ends_fig = go.Figure(data=data, layout=layout)
return trip_starts_v_ends_fig
trip_starts_v_ends_fig = plot_cd_start_and_ends(tdb)
####################################################### plot trips per day of week
# Helper Function returns trip database observations that occur within a period of 2 specified days (days are datetime objects)
def obs_in_days(firstday,lastday,pd_df):
start_time = [pd_df['route'][i]['features'][0]['properties']['timestamp'] for i in range(len(pd_df))]
bool_vec = [((datetime.datetime.utcfromtimestamp(d) >=firstday) & (datetime.datetime.utcfromtimestamp(d)<= lastday)) for d in start_time]
return pd_df.loc[bool_vec].reset_index()
# Helper Function extracts the days of each trip for plotting trips taken per day of week
def get_days_of_trips(tripsdf):
start_time = [tripsdf['route'][i]['features'][0]['properties']['timestamp'] for i in range(len(tripsdf))]
return [calendar.day_name[datetime.datetime.utcfromtimestamp(x).weekday()] for x in start_time]
# Helper Function counts the frequency of each day given a list of days, to be used for plotting trips per day of week
def count_days(day,dayvec):
vec=[dayvec[i]==day for i in range(len(dayvec))]
return sum(vec)
# Function returns a double bar plot for the number of trips taken per day of week for each company *can select legend to view one company at a time
def plot_trips_per_weekdays(trips_df ):
traces=[]
# add a trace counting trips per day of week for each company
# companies =
for co in companies:
df = trips_df.loc[trips_df['company_name'] == co].reset_index()
trips_by_day = get_days_of_trips(df)
mon_count = count_days('Monday',trips_by_day)
tues_count = count_days('Tuesday',trips_by_day)
wed_count = count_days('Wednesday',trips_by_day)
thurs_count = count_days('Thursday',trips_by_day)
fri_count = count_days('Friday',trips_by_day)
sat_count = count_days('Saturday',trips_by_day)
sun_count = count_days('Sunday',trips_by_day)
trace = go.Bar(
y=[mon_count,tues_count,wed_count,thurs_count,fri_count,sat_count,sun_count ],
x= [x for x in calendar.day_name],
name= co
)
traces.append(trace)
data=traces
layout = go.Layout(
barmode='group',
title="Trips Per Day of Week",
yaxis={"title":"Number of Trips"}
)
double_bar_fig = go.Figure(data=data, layout=layout)
return double_bar_fig
trips_per_weekday_fig = plot_trips_per_weekdays(tdb )
################################################################### Plot drop offs by provider
# show map of provider drop offs
print("Generating map of provider drop offs...")
token = '<KEY>' # mapbox token
# Function returns a plot of device Drop Offs ( default plot is for plotting all companies' drop offs, but can plot 1 specified company)
def plot_dropoffs(scdb):
token = '<KEY>' # mapbox access token
scdb_small = scdb
bool_vec = [scdb_small['company_name'][i] for i in range(len(scdb_small))]
avail_boolvec = [scdb_small['event_type'][i]=='available' for i in range(len(scdb_small))]
avail_scdb = scdb_small.loc[avail_boolvec].reset_index() # get all avail event types
points =[literal_eval(avail_scdb['location'][i]) for i in avail_scdb['location'].index]
reasons = [avail_scdb['reason'][i] for i in range(len(avail_scdb))] # extract reaons for the avails
# create dataframes for startpoints and endpoints with lat and long coords
df = {'lat':[], 'lon':[],'reason':[]}
for p in points:
lon,lat = p[0],p[1]
df['lat'].append(lat)
df['lon'].append(lon)
for r in reasons:
df['reason'].append(r)
startdb = pandas.DataFrame.from_dict(df)
COLORS = dict(out_of_service_area_drop_off = 'green',service_start = 'RGB(255,69,0)', user_drop_off = 'RGB(131,111,255)', rebalance_drop_off = 'rgb(2,136,209)', maintenance_drop_off ='rgb(211,47,47)' )
traces = []
for reason, dff in startdb.groupby('reason'):
if reason == 'service_start':
reason_text = "Initial Service Dropoff"
trace1 = dict(
type='scattermapbox',
lon=dff['lon'],
lat=dff['lat'],
name= reason_text,
text = reason_text,
marker = dict(
size=11,
opacity=1,
color=COLORS[reason],
),
)
traces.append(trace1)
if reason == 'user_drop_off':
reason_text = "Constituent Device Drop Off"
trace2 = dict(
type='scattermapbox',
lon=dff['lon'],
lat=dff['lat'],
name= reason_text,
text = reason_text,
marker = dict(
color=COLORS[reason]
),
)
traces.append(trace2)
lay = go.Layout()
lay['hovermode']='closest'
lay['autosize'] = True
lay['mapbox']['accesstoken']=token
lay['mapbox']['zoom'] = 11
lay['mapbox']['center']=dict(
lon=-118.33,
lat=34.017)
lay['mapbox']['bearing']=0
lay['mapbox']['style']="dark"
lay['margin'] = dict(
l=35,
r=35,
b=35,
t=45
)
# if more than 1 unique company name, do not label plot title for a company
if len(scdb_small['company_name'].unique())==1:
co_label = scdb_small['company_name'].unique()[0] + " "
else: # if more than 1 company
co_label = ""
lay['title'] = 'Location of {}Drop Offs'.format(co_label) # if a company is specified, add company name to title
map_fig = go.Figure(data = traces,layout = lay)
return map_fig
dropoffs_fig = plot_dropoffs(scdb) # indice 0 because if shortening then only 1 provider in head
#################################################################### create bar chart for trips per neighborhood in a cd
# read in neighborhoods bounds
print("reading in neighborhoods files ...")
area = fiona.open("../data/shapefiles/la_neighborhoods.shp")
original = pyproj.Proj(area.crs, preserve_units=True)
dest = pyproj.Proj(init='epsg:4326')
hood_dict={}
hood_dict['hoods']=[]
hood_dict['names'] = []
for a in area:
hood_dict['names'].append(a['properties']['COMTY_NAME'])
neighborhood = read_poly(a['geometry']['coordinates'],original,dest)
hood_dict['hoods'].append(neighborhood)
# create a dictionary to use for sankey and bar charts:
# 'hood_names' is a list of lists where the 1st indice has the list of all neighborhoods inside cd 1
# 'hood_bounds' is a list of lists where the 1st indice has the corresponding multipolygon bounds for all neighborhoods inside cd 1
hoods_in_cd = {'hood_names':[],'hood_bounds':[]} # use this ot make sankeys foe hoods in each cd
for i in range(15):
curcd_hood_names = []
curcd_hood_bounds = []
for j in range(len(hood_dict['hoods'])):
if hood_dict['hoods'][j].intersects(all_bounds[i]):
curcd_hood_names.append(hood_dict['names'][j])
curcd_hood_bounds.append(hood_dict['hoods'][j])
hoods_in_cd['hood_names'].append(curcd_hood_names)
hoods_in_cd['hood_bounds'].append(curcd_hood_bounds)
# Function returns an array where the width and height is the number of neighborhoods in a given council disrict, & values are the between neighborhoods
# tdb: trips data | |
<filename>AREM/PeakModel.py
# Time-stamp: <2011-03-01 18:21:42 <NAME>>
"""Description: Emprical model for peak lengths
Copyright (c) 2008,2009 <NAME>, <NAME> <<EMAIL>>
Copyright (c) 2010,2011 <NAME> <<EMAIL>>
This code is free software; you can redistribute it and/or modify it
under the terms of the Artistic License (see the file COPYING included
with the distribution).
@status: beta
@version: $Revision$
@originalauthor: <NAME>, <NAME>
@originalcontact: <EMAIL>
Modifications to probabilistically align reads to regions with highest
enrichment performed by <NAME>. Repackaged as "AREM" in accordance
with copyright restrictions.
@author: <NAME>, <NAME>, <NAME>
@contact: <EMAIL>, <EMAIL>, <EMAIL>
Changes to this file since original release of MACS 1.4 (summer wishes):
December/January 2011
* Updated names (AREM, not MACS14)
* Use alignment probabilities for multi-reads
* Exclude multi-reads from emprical modeling
"""
import sys, time, random
def median (nums):
"""Calculate Median.
Parameters:
nums: list of numbers
Return Value:
median value
"""
p = sorted(nums)
l = len(p)
if l%2 == 0:
return (p[l/2]+p[l/2-1])/2
else:
return p[l/2]
class NotEnoughPairsException(Exception):
def __init__ (self,value):
self.value = value
def __str__ (self):
return repr(self.value)
class PeakModel:
"""Peak Model class.
"""
def __init__ (self, opt=None, treatment=None, max_pairnum=500, gz = 0, umfold=30, lmfold=10, bw=200, ts = 25, bg=0):
self.treatment = treatment
if opt:
self.gz = opt.gsize
self.umfold = opt.umfold
self.lmfold = opt.lmfold
self.tsize = opt.tsize
self.bw = opt.bw
self.info = opt.info
self.debug = opt.debug
self.warn = opt.warn
self.error = opt.warn
else:
self.gz = gz
self.umfold = umfold
self.lmfold = lmfold
self.tsize = ts
self.bg = bg
self.bw = bw
self.info = lambda x: sys.stderr.write(x+"\n")
self.debug = lambda x: sys.stderr.write(x+"\n")
self.warn = lambda x: sys.stderr.write(x+"\n")
self.error = lambda x: sys.stderr.write(x+"\n")
self.max_pairnum = max_pairnum
self.summary = ""
self.plus_line = None
self.minus_line = None
self.shifted_line = None
self.d = None
self.scan_window = None
self.min_tags = None
self.peaksize = None
self.build()
def build (self):
"""Build the model.
prepare self.d, self.scan_window, self.plus_line,
self.minus_line and self.shifted_line to use.
"""
self.peaksize = 2*self.bw
self.min_tags = float(self.treatment.total) * self.lmfold * self.peaksize / self.gz /2 # mininum unique hits on single strand
self.max_tags = float(self.treatment.total) * self.umfold * self.peaksize / self.gz /2 # maximum unique hits on single strand
self.debug("#2 tags required in model: %.2f min, %.2f max" % (self.min_tags, self.max_tags))
# use treatment data to build model
paired_peakpos = self._paired_peaks ()
# select up to 1000 pairs of peaks to build model
# Jake - odd that he selects the first 1000 rather than at random.
num_paired_peakpos = 0
num_paired_peakpos_remained = self.max_pairnum
num_paired_peakpos_picked = 0
for c in paired_peakpos.keys():
num_paired_peakpos +=len(paired_peakpos[c])
if num_paired_peakpos_remained == 0:
paired_peakpos.pop(c)
else:
paired_peakpos[c] = paired_peakpos[c][:num_paired_peakpos_remained]
num_paired_peakpos_remained -= len(paired_peakpos[c])
num_paired_peakpos_picked += len(paired_peakpos[c])
self.info("#2 number of paired peaks: %d" % (num_paired_peakpos))
if num_paired_peakpos < 100:
self.error("Too few paired peaks (%d) so I can not build the model! Broader your MFOLD range parameter may erase this error. If it still can't build the model, please use --nomodel and --shiftsize 100 instead." % (num_paired_peakpos))
self.error("Process for pairing-model is terminated!")
raise NotEnoughPairsException("No enough pairs to build model")
elif num_paired_peakpos < self.max_pairnum:
self.warn("Fewer paired peaks (%d) than %d! Model may not be build well! Lower your MFOLD parameter may erase this warning. Now I will use %d pairs to build model!" % (num_paired_peakpos,self.max_pairnum,num_paired_peakpos_picked))
self.debug("Use %d pairs to build the model." % (num_paired_peakpos_picked))
self._paired_peak_model(paired_peakpos)
def __str__ (self):
"""For debug...
"""
return """
Summary of Peak Model:
Baseline: %d
Upperline: %d
Fragment size: %d
Scan window size: %d
""" % (self.min_tags,self.max_tags,self.d,self.scan_window)
def _paired_peak_model (self, paired_peakpos):
"""Use paired peak positions and treatment tag positions to build the model.
Modify self.(d, model_shift size and scan_window size. and extra, plus_line, minus_line and shifted_line for plotting).
"""
window_size = 1+2*self.peaksize
self.plus_line = [0]*window_size
self.minus_line = [0]*window_size
for chrom in paired_peakpos.keys():
paired_peakpos_chrom = paired_peakpos[chrom]
tags_plus, tags_minus = self.treatment.get_locations_by_chr(chrom)
index_plus, index_minus = self.treatment.get_indexes_by_chr(chrom)
# every paired peak has plus line and minus line
# add plus_line
self.plus_line = self._model_add_line (paired_peakpos_chrom, tags_plus, index_plus, self.plus_line)
# add minus_line
self.minus_line = self._model_add_line (paired_peakpos_chrom, tags_minus, index_minus, self.minus_line)
# find top
plus_tops = []
minus_tops = []
plus_max = max(self.plus_line)
minus_max = max(self.minus_line)
for i in range(window_size):
if self.plus_line[i] == plus_max:
plus_tops.append(i)
if self.minus_line[i] == minus_max:
minus_tops.append(i)
self.d = minus_tops[len(minus_tops)/2] - plus_tops[len(plus_tops)/2] + 1
print 'plus_tops: %s\nminus_tops: %s\nd: %s\nwindow_size: %s' %(plus_tops, minus_tops, self.d, window_size)
shift_size = self.d/2
# find the median point
#plus_median = median(self.plus_line)
#minus_median = median(self.minus_line)
self.scan_window = max(self.d,self.tsize)*2
# a shifted model
self.shifted_line = [0]*window_size
plus_shifted = [0]*shift_size
plus_shifted.extend(self.plus_line[:-1*shift_size])
minus_shifted = self.minus_line[shift_size:]
minus_shifted.extend([0]*shift_size)
for i in range(window_size):
self.shifted_line[i]=minus_shifted[i]+plus_shifted[i]
return True
def _model_add_line (self, pos1, pos2, index2, line):
"""Project each pos in pos2 which is included in
[pos1-self.peaksize,pos1+self.peaksize] to the line.
"""
i1 = 0 # index for pos1
i2 = 0 # index for pos2
i2_prev = 0 # index for pos2 in previous pos1
# [pos1-self.peaksize,pos1+self.peaksize]
# region
i1_max = len(pos1)
i2_max = len(pos2)
last_p2 = -1
flag_find_overlap = False
while i1<i1_max and i2<i2_max:
p1 = pos1[i1]
p2 = pos2[i2]
if p1-self.peaksize > p2 or index2[i2] != 0: # move pos2, skip multi-aligning reads
i2 += 1
elif p1+self.peaksize < p2: # move pos1
i1 += 1
i2 = i2_prev # search minus peaks from previous index
flag_find_overlap = False
else: # overlap!
if not flag_find_overlap:
flag_find_overlap = True
i2_prev = i2 # only the first index is recorded
# project
for i in range(p2-p1+self.peaksize-self.tsize/2,p2-p1+self.peaksize+self.tsize/2):
if i>=0 and i<len(line):
line[i]+=1
i2+=1
return line
def _paired_peaks (self):
"""Call paired peaks from fwtrackI object.
Return paired peaks center positions.
"""
chrs = self.treatment.get_chr_names()
chrs.sort()
paired_peaks_pos = {}
for chrom in chrs:
self.debug("Chromosome: %s" % (chrom))
tags = self.treatment.get_locations_by_chr(chrom)
indexes = self.treatment.get_indexes_by_chr(chrom)
plus_peaksinfo = self._naive_find_peaks (tags[0], indexes[0])
self.debug("Number of unique tags on + strand: %d" % (len(tags[0])))
self.debug("Number of peaks in + strand: %d" % (len(plus_peaksinfo)))
minus_peaksinfo = self._naive_find_peaks (tags[1], indexes[1])
self.debug("Number of unique tags on - strand: %d" % (len(tags[1])))
self.debug("Number of peaks in - strand: %d" % (len(minus_peaksinfo)))
if not plus_peaksinfo or not minus_peaksinfo:
self.debug("Chrom %s is discarded!" % (chrom))
continue
else:
paired_peaks_pos[chrom] = self._find_pair_center (plus_peaksinfo, minus_peaksinfo)
self.debug("Number of paired peaks: %d" %(len(paired_peaks_pos[chrom])))
return paired_peaks_pos
def _find_pair_center (self, pluspeaks, minuspeaks):
ip = 0 # index for plus peaks
im = 0 # index for minus peaks
im_prev = 0 # index for minus peaks in previous plus peak
pair_centers = []
ip_max = len(pluspeaks)
im_max = len(minuspeaks)
flag_find_overlap = False
while ip<ip_max and im<im_max:
(pp,pn) = pluspeaks[ip] # for (peakposition, tagnumber in peak)
(mp,mn) = minuspeaks[im]
if pp-self.peaksize > mp: # move minus
im += 1
elif pp+self.peaksize < mp: # move plus
ip += 1
im = im_prev # search minus peaks from previous index
flag_find_overlap = False
else: # overlap!
if not flag_find_overlap:
flag_find_overlap = True
im_prev = im # only the first index is recorded
if float(pn)/mn < 2 and float(pn)/mn > 0.5: # number tags in plus and minus peak region are comparable...
if pp < mp:
pair_centers.append((pp+mp)/2)
#self.debug ( "distance: %d, minus: %d, plus: %d" % (mp-pp,mp,pp))
im += 1
return pair_centers
def _naive_find_peaks (self, taglist, indexlist ):
"""Naively call peaks based on tags counting.
Do NOT include reads with multiple alignments. This exclusion filters
down through all of the model building phase.
Return peak positions and the tag number in peak region by a tuple list [(pos,num)].
"""
peak_info = [] # store peak pos in every peak region and
# unique tag number in every peak region
if len(taglist)<2:
return peak_info
first_pos = None
for i in xrange(len(taglist)):
# get first non-multi read
if indexlist[i] == 0:
first_pos = i
break
if first_pos is None:
return peak_info
current_tag_list = [taglist[first_pos]]
for i in range(first_pos+1, len(taglist)):
if indexlist[i] != 0: # filter out multi reads in building model
continue
pos = taglist[i]
# Jake - This step seems incorrect. a stretch of bw*2 is considered,
# always originating at the next read. Say the stretch divides a
# peak in half and is most potent in the middle. Then the counts
| |
"""
function used to update the search results from join with one column to join with both this column and time column
:param search_results: list of "DatamartSearchResult"
:return: list of "DatamartSearchResult"
:return:
"""
# find time columns first
# get time ranges on supplied data
time_columns_left = list()
for i in range(self.supplied_dataframe.shape[1]):
if type(self.supplied_data) is d3m_Dataset:
each_selector = (self.res_id, ALL_ELEMENTS, i)
else:
each_selector = (ALL_ELEMENTS, i)
each_column_metadata = self.supplied_data.metadata.query(each_selector)
if "semantic_types" not in each_column_metadata:
self._logger.warning("column No.{} {} do not have semantic type on metadata!".
format(str(i), str(self.supplied_dataframe.columns[i])))
continue
if TIME_SEMANTIC_TYPE in each_column_metadata['semantic_types']:
# if we got original time granularity from metadata, use it directly
time_column = self.supplied_dataframe.iloc[:, i]
if 'time_granularity' in each_column_metadata.keys():
granularity_d3m_format = each_column_metadata['time_granularity']
granularity = Utils.map_d3m_granularity_to_value(granularity_d3m_format['unit'])
else:
try:
granularity_datamart_format = Utils.get_time_granularity(time_column)
granularity = Utils.map_granularity_to_value(granularity_datamart_format)
except ValueError:
self._logger.error("Can't continue because unable to get the time granularity on column No.{} {}".
format(str(i), str(self.supplied_dataframe.columns[i])))
continue
self._logger.info("Get the time granularity of column No.{} {} as {}".
format(str(i), str(self.supplied_dataframe.columns[i]), str(granularity)))
if "datetime" not in time_column.dtype.name:
time_column = pd.to_datetime(time_column)
time_columns_left.append({
"granularity": granularity,
"start_time": min(time_column),
"end_time": max(time_column),
"column_number": i,
})
# get time ranges on search results
time_columns_right = list()
for each_search_result in search_results:
if each_search_result.search_type == "general":
for i in range(each_search_result.d3m_metadata.query((ALL_ELEMENTS,))['dimension']['length']):
each_column_metadata = each_search_result.d3m_metadata.query((ALL_ELEMENTS, i))
# TODO: it seems our current system can't handle multiple time data's condition
if TIME_SEMANTIC_TYPE in each_column_metadata['semantic_types']:
time_information_query = self.augmenter.get_dataset_time_information(each_search_result.id())
if len(time_information_query) == 0:
self._logger.warning("Detect timestamp on dataset {} {} but no time information was found!"
.format(each_search_result.id(),
each_search_result.search_result['title']['value']))
continue
time_columns_right.append({
"granularity": int(time_information_query[0]['time_granularity']['value']),
"start_time": pd.Timestamp(time_information_query[0]['start_time']['value']),
"end_time": pd.Timestamp(time_information_query[0]['end_time']['value']),
"column_number": i,
"dataset_id": each_search_result.id()
})
# only keep the datasets that has overlaped time range and same time granularity
can_consider_datasets = defaultdict(list)
for left_time_info in time_columns_left:
for right_time_info in time_columns_right:
left_range = [left_time_info['start_time'], left_time_info['end_time']]
right_range = [right_time_info['start_time'], right_time_info['end_time']]
# ensure the format are correct
for i in range(len(left_range)):
if isinstance(left_range[i], pd.Timestamp):
left_range[i] = left_range[i].tz_localize('UTC')
elif isinstance(left_range[i], str):
left_range[i] = pd.Timestamp(left_range[i])
# TODO: if time granularity different but time range overlap? should we consider it or not
if left_time_info['granularity'] >= right_time_info['granularity'] and Utils.overlap(left_range, right_range):
can_consider_datasets[right_time_info['dataset_id']].append(
{
"left_column_number": left_time_info["column_number"],
"right_dataset_id": right_time_info['dataset_id'],
"right_join_column_number": right_time_info['column_number'],
"right_join_start_time": right_time_info['start_time'],
"right_join_end_time": right_time_info['end_time'],
"right_join_time_granularity": right_time_info['granularity']
})
filtered_search_result = []
for each_search_result in search_results:
if each_search_result.search_type == "general":
if each_search_result.id() in can_consider_datasets:
for each_combine in can_consider_datasets[each_search_result.id()]:
each_search_result_copied = copy.copy(each_search_result)
# update join pairs information
right_index = None
right_join_column_name = each_search_result.search_result['variableName']['value']
for i in range(each_search_result.d3m_metadata.query((ALL_ELEMENTS,))['dimension']['length']):
each_column_metadata = each_search_result.d3m_metadata.query((ALL_ELEMENTS, i))
if each_column_metadata['name'] == right_join_column_name:
right_index = i
break
if len(each_search_result.query_json['variables'].keys()) > 1:
self._logger.warning("Mutiple variables join results update for time related not supported yet!")
left_join_column_name = list(each_search_result.query_json['variables'].keys())[0]
left_index = self.supplied_dataframe.columns.tolist().index(left_join_column_name)
# right_index = right_df.columns.tolist().index(right_join_column_name)
original_left_index_column = DatasetColumn(resource_id=self.res_id, column_index=left_index)
original_right_index_column = DatasetColumn(resource_id=None, column_index=right_index)
left_columns = [
DatasetColumn(resource_id=self.res_id, column_index=each_combine["left_column_number"]),
original_left_index_column
]
right_columns = [
DatasetColumn(resource_id=None, column_index=each_combine["right_join_column_number"]),
original_right_index_column
]
updated_join_pairs = [TabularJoinSpec(left_columns=[left_columns], right_columns=[right_columns])]
each_search_result_copied.set_join_pairs(updated_join_pairs)
# update the search result with time information
time_search_keyword = TIME_COLUMN_MARK + "____" + right_join_column_name
each_search_result_copied.query_json['keywords'].append(time_search_keyword)
each_search_result_copied.search_result['start_time'] = str(each_combine["right_join_start_time"])
each_search_result_copied.search_result['end_time'] = str(each_combine["right_join_end_time"])
each_search_result_copied.search_result['time_granularity'] = str(
each_combine["right_join_time_granularity"])
filtered_search_result.append(each_search_result_copied)
return filtered_search_result
@singleton
class Datamart(object):
"""
ISI implement of datamart
"""
def __init__(self, connection_url: str = None) -> None:
self._logger = logging.getLogger(__name__)
if connection_url:
self._logger.info("Using user-defined connection url as " + connection_url)
self.connection_url = connection_url
else:
connection_url = os.getenv('DATAMART_URL_ISI', DEFAULT_DATAMART_URL)
self.connection_url = connection_url
self._logger.debug("Current datamart connection url is: " + self.connection_url)
self.augmenter = Augment()
self.supplied_dataframe = None
def search(self, query: 'DatamartQuery') -> DatamartQueryCursor:
"""This entry point supports search using a query specification.
The query specification supports querying datasets by keywords, named entities, temporal ranges, and geospatial ranges.
Datamart implementations should return a DatamartQueryCursor immediately.
Parameters
----------
query : DatamartQuery
Query specification.
Returns
-------
DatamartQueryCursor
A cursor pointing to search results.
"""
return DatamartQueryCursor(augmenter=self.augmenter, search_query=[query], supplied_data=None,
connection_url=self.connection_url, need_run_wikifier=False)
def search_with_data(self, query: 'DatamartQuery', supplied_data: container.Dataset, **kwargs) \
-> DatamartQueryCursor:
"""
Search using on a query and a supplied dataset.
This method is a "smart" search, which leaves the Datamart to determine how to evaluate the relevance of search
result with regard to the supplied data. For example, a Datamart may try to identify named entities and date
ranges in the supplied data and search for companion datasets which overlap.
To manually specify query constraints using columns of the supplied data, use the `search_with_data_columns()`
method and `TabularVariable` constraints.
Datamart implementations should return a DatamartQueryCursor immediately.
Parameters
----------
query : DatamartQuery
Query specification
supplied_data : container.Dataset
The data you are trying to augment.
kwargs : dict
Some extra control parameters. For example:
need_wikidata: (Default is True) If set to Ture, the program will run wikifier on supplied data and find possible
Q nodes, then search for possible attributes with those Q nodes and search for vectors
augment_with_time: (Default is False) If set to True, a pair with two columns will be searched, only data with
both join columns like [time, key] will be considered
consider_time: (Default is True) If set to True, no time columns on datamart will be considered as candidates.
This control parameter will be useless if augment_with_time was True
consider_wikifier_columns_only: (Default is False) If set to True, only columns with Q nodes will be considered
as join candiadates
Returns
-------
DatamartQueryCursor
A cursor pointing to search results containing possible companion datasets for the supplied data.
"""
# update v2019.10.24, add keywords search in search queries
if query.keywords:
query_keywords = []
for each in query.keywords:
translator = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
words_processed = str(each).lower().translate(translator).split()
query_keywords.extend(words_processed)
else:
query_keywords = None
need_wikidata = kwargs.get("need_wikidata", True)
consider_wikifier_columns_only = kwargs.get("consider_wikifier_columns_only", False)
augment_with_time = kwargs.get("augment_with_time", False)
consider_time = kwargs.get("consider_time", True)
if consider_time is False and augment_with_time is True:
self._logger.warning("Augment with time is set to be true! consider_time parameter will be useless.")
# add some special search query in the first search queries
if not need_wikidata:
search_queries = [DatamartQuery(search_type="geospatial")]
need_run_wikifier = False
else:
need_run_wikifier = None
search_queries = [DatamartQuery(search_type="wikidata"),
DatamartQuery(search_type="vector"),
DatamartQuery(search_type="geospatial")]
# try to update with more correct metadata if possible
updated_result = MetadataCache.check_and_get_dataset_real_metadata(supplied_data)
if updated_result[0]: # [0] store whether it success find the metadata
supplied_data = updated_result[1]
if type(supplied_data) is d3m_Dataset:
res_id, self.supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None)
else:
raise ValueError("Incorrect supplied data type as " + str(type(supplied_data)))
# if query is None:
# if not query given, try to find the Text columns from given dataframe and use it to find some candidates
can_query_columns = []
for each in range(len(self.supplied_dataframe.columns)):
if type(supplied_data) is d3m_Dataset:
selector = (res_id, ALL_ELEMENTS, each)
else:
selector = (ALL_ELEMENTS, each)
each_column_meta = supplied_data.metadata.query(selector)
# try to parse each column to DateTime type. If success, add new semantic type, otherwise do nothing
try:
pd.to_datetime(self.supplied_dataframe.iloc[:, each])
new_semantic_type = {"semantic_types": (TIME_SEMANTIC_TYPE, ATTRIBUTE_SEMANTIC_TYPE)}
supplied_data.metadata = supplied_data.metadata.update(selector, new_semantic_type)
except:
pass
if TEXT_SEMANTIC_TYPE in each_column_meta["semantic_types"] \
or TIME_SEMANTIC_TYPE in each_column_meta["semantic_types"]:
can_query_columns.append(each)
if len(can_query_columns) == 0:
self._logger.warning("No column can be used for augment with datamart!")
for each_column_index in can_query_columns:
column_formated = DatasetColumn(res_id, each_column_index)
tabular_variable = TabularVariable(columns=[column_formated], relationship=ColumnRelationship.CONTAINS)
each_search_query = self.generate_datamart_query_from_data(supplied_data=supplied_data,
data_constraints=[tabular_variable])
# if we get keywords from input search query, add it
if query_keywords:
each_search_query.keywords_search = query_keywords
search_queries.append(each_search_query)
return DatamartQueryCursor(augmenter=self.augmenter, search_query=search_queries, supplied_data=supplied_data,
need_run_wikifier=need_run_wikifier, connection_url=self.connection_url,
consider_wikifier_columns_only=consider_wikifier_columns_only,
augment_with_time=augment_with_time,
consider_time=consider_time)
def search_with_data_columns(self, query: 'DatamartQuery', supplied_data: container.Dataset,
data_constraints: typing.List['TabularVariable']) -> DatamartQueryCursor:
"""
Search using a query which can include constraints on supplied data columns (TabularVariable).
This search is similar to the "smart" search provided by `search_with_data()`, but caller must manually specify
constraints using columns from the supplied data; Datamart will not automatically analyze it to determine
relevance or joinability.
Use of the query spec enables callers to compose their own "smart search" implementations.
Datamart implementations should return a DatamartQueryCursor immediately.
Parameters
------_---
query : DatamartQuery
Query specification
supplied_data : container.Dataset
The data you are trying to augment.
data_constraints : list
List of `TabularVariable` constraints referencing the supplied data.
Returns
-------
DatamartQueryCursor
A cursor pointing to search results containing possible companion datasets for the supplied data.
"""
# put entities of all given columns from "data_constraints" into the query's variable part and run the query
# try to update with more correct metadata if possible
| |
import numpy as np
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils, initializers
from chainer import Link, Chain, ChainList
import chainer.links as L
import chainer.functions as F
from lib.utils import *
from lib.functions import *
import time
class Darknet19(Chain):
"""
Darknet19
- It takes (224, 224, 3) or (448, 448, 4) sized image as input
"""
def __init__(self):
initializer = initializers.HeNormal()
super(Darknet19, self).__init__(
##### common layers for both pretrained layers and yolov2 #####
conv1 = L.Convolution2D(3, 32, ksize=3, stride=1, pad=1, nobias=True),
bn1 = L.BatchNormalization(32, use_beta=False),
bias1 = L.Bias(shape=(32,)),
conv2 = L.Convolution2D(32, 64, ksize=3, stride=1, pad=1, nobias=True),
bn2 = L.BatchNormalization(64, use_beta=False),
bias2 = L.Bias(shape=(64,)),
conv3 = L.Convolution2D(64, 128, ksize=3, stride=1, pad=1, nobias=True),
bn3 = L.BatchNormalization(128, use_beta=False),
bias3 = L.Bias(shape=(128,)),
conv4 = L.Convolution2D(128, 64, ksize=1, stride=1, pad=0, nobias=True),
bn4 = L.BatchNormalization(64, use_beta=False),
bias4 = L.Bias(shape=(64,)),
conv5 = L.Convolution2D(64, 128, ksize=3, stride=1, pad=1, nobias=True),
bn5 = L.BatchNormalization(128, use_beta=False),
bias5 = L.Bias(shape=(128,)),
conv6 = L.Convolution2D(128, 256, ksize=3, stride=1, pad=1, nobias=True),
bn6 = L.BatchNormalization(256, use_beta=False),
bias6 = L.Bias(shape=(256,)),
conv7 = L.Convolution2D(256, 128, ksize=1, stride=1, pad=0, nobias=True),
bn7 = L.BatchNormalization(128, use_beta=False),
bias7 = L.Bias(shape=(128,)),
conv8 = L.Convolution2D(128, 256, ksize=3, stride=1, pad=1, nobias=True),
bn8 = L.BatchNormalization(256, use_beta=False),
bias8 = L.Bias(shape=(256,)),
conv9 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn9 = L.BatchNormalization(512, use_beta=False),
bias9 = L.Bias(shape=(512,)),
conv10 = L.Convolution2D(512, 256, ksize=1, stride=1, pad=0, nobias=True),
bn10 = L.BatchNormalization(256, use_beta=False),
bias10 = L.Bias(shape=(256,)),
conv11 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn11 = L.BatchNormalization(512, use_beta=False),
bias11 = L.Bias(shape=(512,)),
conv12 = L.Convolution2D(512, 256, ksize=1, stride=1, pad=0, nobias=True),
bn12 = L.BatchNormalization(256, use_beta=False),
bias12 = L.Bias(shape=(256,)),
conv13 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True),
bn13 = L.BatchNormalization(512, use_beta=False),
bias13 = L.Bias(shape=(512,)),
conv14 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn14 = L.BatchNormalization(1024, use_beta=False),
bias14 = L.Bias(shape=(1024,)),
conv15 = L.Convolution2D(1024, 512, ksize=1, stride=1, pad=0, nobias=True),
bn15 = L.BatchNormalization(512, use_beta=False),
bias15 = L.Bias(shape=(512,)),
conv16 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn16 = L.BatchNormalization(1024, use_beta=False),
bias16 = L.Bias(shape=(1024,)),
conv17 = L.Convolution2D(1024, 512, ksize=1, stride=1, pad=0, nobias=True),
bn17 = L.BatchNormalization(512, use_beta=False),
bias17 = L.Bias(shape=(512,)),
conv18 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True),
bn18 = L.BatchNormalization(1024, use_beta=False),
bias18 = L.Bias(shape=(1024,)),
###### new layer, be careful of output nb to change nb of item had changed
conv19 = L.Convolution2D(1024, 4, ksize=1, stride=1, pad=0),
)
self.train = False
self.finetune = False
def __call__(self, x):
batch_size = x.data.shape[0]
#print('shape', x.data.shape)
##### common layer
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
#print('h data first',h[0][0])
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
#print('h data check',h[0][0])
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=1)
#print('h data check too ',h[0][0])
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1)
###### new layer
h = self.conv19(h)
h = F.average_pooling_2d(h, h.data.shape[-1], stride=1, pad=0)
# reshape
#print('y shape', h.data.shape)
y = F.reshape(h, (batch_size, -1))
return y
class Darknet19Predictor(Chain):
def __init__(self, predictor):
super(Darknet19Predictor, self).__init__(predictor=predictor)
def __call__(self, x, t):
y = self.predictor(x)
#test = self.predict(x).data
#predicted_order = np.argsort(-test.flatten())
#for index in predicted_order:
# prob = test.flatten()[index] * 100
# print("clase: %.2f%%" % ( prob))
#print("results of the operation", F.softmax(y).data)
if t.ndim == 2: # use squared error when label is one hot label
y = F.softmax(y)
#print('loss debug, y', y[0])
#print('shapes', y.shape, t.shape)
loss = F.mean_squared_error(y, t)
#loss = sum_of_squared_error(y, t)
#print('loss value in CNN', y, t)
accuracy = F.accuracy(y, t.data.argmax(axis=1).astype(np.int32))
else: # use softmax cross entropy when label is normal label
#print("cross entropy debug", y, t)
loss = F.softmax_cross_entropy(y, t)
accuracy = F.accuracy(y, t)
return y, loss, accuracy
def predict(self, x):
y = self.predictor(x)
return F.softmax(y)
##################################################
class YOLOv2(Chain):
"""
YOLOv2
- It takes (416, 416, 3) sized image as input
"""
def __init__(self, n_classes, n_boxes):
initialW = initializers.HeNormal()
super(YOLOv2, self).__init__(
conv1 = L.Convolution2D(3, 32, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn1 = L.BatchNormalization(32, use_beta=False, eps=2e-5),
bias1 = L.Bias(shape=(32,)),
conv2 = L.Convolution2D(32, 64, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn2 = L.BatchNormalization(64, use_beta=False, eps=2e-5),
bias2 = L.Bias(shape=(64,)),
conv3 = L.Convolution2D(64, 128, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn3 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias3 = L.Bias(shape=(128,)),
conv4 = L.Convolution2D(128, 64, ksize=1, stride=1, pad=0, nobias=True, initialW=initialW),
bn4 = L.BatchNormalization(64, use_beta=False, eps=2e-5),
bias4 = L.Bias(shape=(64,)),
conv5 = L.Convolution2D(64, 128, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn5 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias5 = L.Bias(shape=(128,)),
conv6 = L.Convolution2D(128, 256, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn6 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias6 = L.Bias(shape=(256,)),
conv7 = L.Convolution2D(256, 128, ksize=1, stride=1, pad=0, nobias=True, initialW=initialW),
bn7 = L.BatchNormalization(128, use_beta=False, eps=2e-5),
bias7 = L.Bias(shape=(128,)),
conv8 = L.Convolution2D(128, 256, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn8 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias8 = L.Bias(shape=(256,)),
conv9 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn9 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias9 = L.Bias(shape=(512,)),
conv10 = L.Convolution2D(512, 256, ksize=1, stride=1, pad=0, nobias=True, initialW=initialW),
bn10 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias10 = L.Bias(shape=(256,)),
conv11 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn11 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias11 = L.Bias(shape=(512,)),
conv12 = L.Convolution2D(512, 256, ksize=1, stride=1, pad=0, nobias=True, initialW=initialW),
bn12 = L.BatchNormalization(256, use_beta=False, eps=2e-5),
bias12 = L.Bias(shape=(256,)),
conv13 = L.Convolution2D(256, 512, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn13 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias13 = L.Bias(shape=(512,)),
conv14 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn14 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias14 = L.Bias(shape=(1024,)),
conv15 = L.Convolution2D(1024, 512, ksize=1, stride=1, pad=0, nobias=True, initialW=initialW),
bn15 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias15 = L.Bias(shape=(512,)),
conv16 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn16 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias16 = L.Bias(shape=(1024,)),
conv17 = L.Convolution2D(1024, 512, ksize=1, stride=1, pad=0, nobias=True, initialW=initialW),
bn17 = L.BatchNormalization(512, use_beta=False, eps=2e-5),
bias17 = L.Bias(shape=(512,)),
conv18 = L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn18 = L.BatchNormalization(1024, use_beta=False, eps=2e-5),
bias18 = L.Bias(shape=(1024,)),
conv19 = L.Convolution2D(1024, 1024, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn19 = L.BatchNormalization(1024, use_beta=False),
bias19 = L.Bias(shape=(1024,)),
conv20 = L.Convolution2D(1024, 1024, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn20 = L.BatchNormalization(1024, use_beta=False),
bias20 = L.Bias(shape=(1024,)),
conv21 = L.Convolution2D(512, 64, ksize=1, stride=1, pad=0, nobias=True, initialW=initialW),
bn21 = L.BatchNormalization(64, use_beta=False),
bias21 = L.Bias(shape=(64,)),
conv22 = L.Convolution2D(1024 + 64 * 4, 1024, ksize=3, stride=1, pad=1, nobias=True, initialW=initialW),
bn22 = L.BatchNormalization(1024, use_beta=False),
bias22 = L.Bias(shape=(1024,)),
conv23 = L.Convolution2D(1024, n_boxes * (5 + n_classes), ksize=1,
stride=1, pad=0, nobias=True,
initialW=initializers.Constant(0)),
bias23 = L.Bias(shape=(n_boxes * (5 + n_classes),)),
)
self.finetune = False
self.n_boxes = n_boxes
self.n_classes = n_classes
def __call__(self, x):
h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1)
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1)
high_resolution_feature = h
h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)
h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias19(self.bn19(self.conv19(h), finetune=self.finetune)), slope=0.1)
h = F.leaky_relu(self.bias20(self.bn20(self.conv20(h), finetune=self.finetune)), slope=0.1)
h2 = high_resolution_feature
h2 = F.leaky_relu(self.bias21(self.bn21(self.conv21(h2), finetune=self.finetune)), slope=0.1)
h2 = reorg(h2)
h = F.concat((h2, h), axis=1)
h = F.leaky_relu(self.bias22(self.bn22(self.conv22(h), finetune=self.finetune)), slope=0.1)
h = self.bias23(self.conv23(h))
return h
class YOLOv2Predictor(Chain):
def __init__(self, predictor):
super(YOLOv2Predictor, self).__init__(predictor=predictor)
self.anchors = [[0.57273, 0.677385], [1.87446, 2.06253], [3.33843, 5.47434], [7.88282, 3.52778], [9.77052, 9.16828]]
self.thresh = 0.7
self.ignore_thresh = 0.1
self.seen = 0
self.unstable_seen = 15000
def __call__(self, input_x, t, ignore_t):
if isinstance(input_x, chainer.Variable):
device = cuda.get_device(input_x.data)
| |
<reponame>jossef/power-scanner<gh_stars>1-10
__author__ = '<NAME>'
def get_ftp_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = None
operating_system = None
if any(hint for hint, os in ftp_servers.iteritems() if hint in banner):
server, operating_system = ((hint, os) for hint, os in ftp_servers.iteritems() if hint in banner).next()
return server, operating_system
def get_smtp_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = None
operating_system = None
if any(hint for hint, os in smtp_servers.iteritems() if hint in banner):
server, operating_system = ((hint, os) for hint, os in smtp_servers.iteritems() if hint in banner).next()
return server, operating_system
def get_http_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = known_banner_web_servers.get(banner, None)
operating_system = None
# If we successfully matched a server
if server:
if any(item in banner for item in windows_hints):
operating_system = 'windows'
elif any(item in banner for item in linux_hints):
distribution = (item in banner for item in linux_hints).next()
operating_system = 'linux ({0})'.format(distribution)
elif any(item in banner for item in mac_os_hints):
operating_system = 'mac os'
# Otherwise, let's try to guess using hints
else:
if any(item in banner for item in hosting_hints):
operating_system = 'filtered (hosting protection)'
server = banner
return server, operating_system
# -------------------------------------------------------------------
# Static hard-coded data below (in real life should be more dynamic..)
# -- -- -- -- -- -- -- --
# Most info has been scarped from http://www.computec.ch/projekte/httprecon/?s=database&t=head_existing&f=banner
known_banner_web_servers = {
'0w/0.8c': '0w 0.8c',
'webstar/2.0 id/33333': '4d webstar 2.0',
'webstar/2.1.1 id/33333': '4d webstar 2.1.1',
'webstar/3.0.2 id/878810': '4d webstar 3.0.2',
'webstar/4.2(ssl) id/79106': '4d webstar 4.2',
'webstar/4.5(ssl) id/878810': '4d webstar 4.5',
'4d_webstar_s/5.3.1 (macos x)': '4d webstar 5.3.1',
'4d_webstar_s/5.3.3 (macos x)': '4d webstar 5.3.3',
'4d_webstar_s/5.4.0 (macos x)': '4d webstar 5.4.0',
'aidex/1.1 (win32)': 'aidex mini-webserver 1.1',
'naviserver/2.0 aolserver/2.3.3': 'aolserver 2.3.3',
'aolserver/3.3.1+ad13': 'aolserver 3.3.1',
'aolserver 3.4.2': 'aolserver 3.4.2',
'aolserver/3.4.2 sp/1': 'aolserver 3.4.2',
'aolserver/3.5.10': 'aolserver 3.4.2',
'aolserver/3.5.0': 'aolserver 3.5.0',
'aolserver/4.0.10': 'aolserver 4.0.10',
'aolserver/4.0.10a': 'aolserver 4.0.10a',
'aolserver/4.0.11a': 'aolserver 4.0.11a',
'aolserver/4.5.0': 'aolserver 4.5.0',
'abyss/2.0.0.20-x2-win32 abysslib/2.0.0.20': 'abyss 2.0.0.20 x2',
'abyss/2.4.0.3-x2-win32 abysslib/2.4.0.3': 'abyss 2.4.0.3 x2',
'abyss/2.5.0.0-x1-win32 abysslib/2.5.0.0': 'abyss 2.5.0.0 x1',
'abyss/2.5.0.0-x2-linux abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.5.0.0-x2-macos x abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.5.0.0-x2-win32 abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.6.0.0-x2-linux abysslib/2.6.0.0': 'abyss 2.6.0.0 x2',
'allegroserve/1.2.50': 'allegroserve 1.2.50',
'anti-web v3.0.7 (fear and loathing on the www)': 'anti-web httpd 3.0.7',
'antiweb/4.0beta13': 'anti-web httpd 4.0beta13',
'apache/1.2.6': 'apache 1.2.6',
'apache/1.3.12 (unix) php/3.0.14': 'apache 1.3.12',
'apache/1.3.17 (win32)': 'apache 1.3.17',
'apache/1.3.26 (linux/suse) mod_ssl/2.8.10 openssl/0.9.6g php/4.2.2': 'apache 1.3.26',
'apache/1.3.26 (unitedlinux) mod_python/2.7.8 python/2.2.1 php/4.2.2': 'apache 1.3.26',
'apache/1.3.26 (unix)': 'apache 1.3.26',
'apache/1.3.26 (unix) debian gnu/linux php/4.1.2': 'apache 1.3.26',
'apache/1.3.26 (unix) debian gnu/linux mod_ssl/2.8.9 openssl/0.9.6g': 'apache 1.3.26',
'mit web server apache/1.3.26 mark/1.5 (unix) mod_ssl/2.8.9': 'apache 1.3.26',
'apache/1.3.27 (linux/suse) mod_ssl/2.8.12 openssl/0.9.6i php/4.3.1': 'apache 1.3.27',
'apache/1.3.27 (turbolinux) mod_throttle/3.1.2 mod_ruby/0.9.7 ruby/1.6.4': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux)': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux) mod_python/2.7.8 python/1.5.2': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux) mod_ssl/2.8.12 openssl/0.9.6b': 'apache 1.3.27',
'apache/1.3.27 (unix) php/4.3.1': 'apache 1.3.27',
'apache/1.3.27 (unix) mod_perl/1.27': 'apache 1.3.27',
'apache/1.3.27 (win32)': 'apache 1.3.27',
'apache/1.3.28 (unix) mod_perl/1.27 php/4.3.3': 'apache 1.3.28',
'apache/1.3.29 (debian gnu/linux) mod_perl/1.29': 'apache 1.3.29',
'apache/1.3.29 (unix)': 'apache 1.3.29',
'apache/1.3.31 (unix)': 'apache 1.3.31',
'anu_webapp': 'apache 1.3.33',
'apache/1.3.33 (darwin) php/5.2.1': 'apache 1.3.33',
'apache/1.3.33 (darwin) mod_ssl/2.8.24 openssl/0.9.7l mod_jk/1.2.25': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) php/4.3.10-20 mod_perl/1.29': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) php/4.3.8-9 mod_ssl/2.8.22': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) mod_gzip/1.3.26.1a php/4.3.10-22': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) mod_python/2.7.10 python/2.3.4': 'apache 1.3.33',
'apache/1.3.33 (openpkg/2.4) mod_gzip/1.3.26.1a php/4.3.11 mod_watch/3.17': 'apache 1.3.33',
'apache/1.3.33 (unix) php/4.3.10 frontpage/5.0.2.2510': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_auth_passthrough/1.8 mod_bwlimited/1.4': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_fastcgi/2.4.2 mod_gzip/1.3.26.1a mod_ssl/2.8.22': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_perl/1.29': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_ssl/2.8.22 openssl/0.9.7d php/4.3.10': 'apache 1.3.33',
'apache/1.3.34': 'apache 1.3.34',
'apache/1.3.34 (debian) authmysql/4.3.9-2 mod_ssl/2.8.25 openssl/0.9.8c': 'apache 1.3.34',
'apache/1.3.34 (debian) php/4.4.4-8+etch4': 'apache 1.3.34',
'apache/1.3.34 (debian) php/5.2.0-8+etch7 mod_ssl/2.8.25 openssl/0.9.8c': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_fastcgi/2.4.2': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_perl/1.30': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_ssl/2.8.25 openssl/0.9.7e': 'apache 1.3.34',
'apache/1.3.34 (unix) php/4.4.2 mod_perl/1.29 dav/1.0.3 mod_ssl/2.8.25': 'apache 1.3.34',
'apache/1.3.34 (unix) mod_jk/1.2.15 mod_perl/1.29 mod_gzip/1.3.26.1a': 'apache 1.3.34',
'apache/1.3.35 (unix)': 'apache 1.3.35',
'apache/1.3.27 (unix) (red-hat/linux) mod_perl/1.26 php/4.3.3': 'apache 1.3.37',
'apache/1.3.37 (unix) frontpage/5.0.2.2635 mod_ssl/2.8.28 openssl/0.9.7m': 'apache 1.3.37',
'apache/1.3.37 (unix) php/4.3.11': 'apache 1.3.37',
'apache/1.3.37 (unix) php/4.4.7 mod_throttle/3.1.2 frontpage/5.0.2.2635': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.1.2': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.0': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.1': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.3 mod_auth_passthrough/1.8': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_auth_passthrough/1.8 mod_log_bytes/1.2': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_perl/1.29': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_perl/1.30 mod_ssl/2.8.28 openssl/0.9.7e-p1': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_ssl/2.8.28 openssl/0.9.7d': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_ssl/2.8.28 openssl/0.9.8d': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_throttle/3.1.2 dav/1.0.3 mod_fastcgi/2.4.2': 'apache 1.3.37',
'apache/1.3.37 ben-ssl/1.57 (unix) mod_gzip/1.3.26.1a mod_fastcgi/2.4.2': 'apache 1.3.37',
'apache/1.3.37.fb1': 'apache 1.3.37',
'apache/1.3.39 (unix) dav/1.0.3 mod_auth_passthrough/1.8': 'apache 1.3.39',
'apache/1.3.39 (unix) php/4.4.7': 'apache 1.3.39',
'apache/1.3.39 (unix) php/5.2.3 mod_bwlimited/1.4': 'apache 1.3.39',
'apache/1.3.39 (unix) php/5.2.5 dav/1.0.3 mod_ssl/2.8.30 openssl/0.9.7c': 'apache 1.3.39',
'apache/1.3.39 (unix) mod_auth_passthrough/1.8 mod_log_bytes/1.2': 'apache 1.3.39',
'apache/1.3.39 (unix) mod_fastcgi/2.4.2 mod_auth_passthrough/1.8': 'apache 1.3.39',
'apache/1.3.39 ben-ssl/1.57 (unix) mod_perl/1.30 frontpage/5.0.2.2624': 'apache 1.3.39',
'apache/1.3.41 (unix) php/5.2.8': 'apache 1.3.41',
'apache/2.0.45 (unix) mod_jk2/2.0.3-dev': 'apache 2.0.45',
'apache/2.0.45 (unix) mod_perl/1.99_09-dev perl/v5.6.1 covalent_auth/2.3': 'apache 2.0.45',
'apache/2.0.46 (centos)': 'apache 2.0.46',
'apache/2.0.46 (red hat)': 'apache 2.0.46',
'apache/2.0.46 (white box)': 'apache 2.0.46',
'apache/2.0.48 (redhat 9/server4you)': 'apache 2.0.48',
'apache/2.0.49 (linux/suse)': 'apache 2.0.49',
'apache/2.0.49 (unix) php/4.3.9': 'apache 2.0.49',
'apache/2.0.50 (linux/suse)': 'apache 2.0.50',
'apache/2.0.50 (ydl)': 'apache 2.0.50',
'apache/2.0.51 (fedora)': 'apache 2.0.51',
'apache/2.0.52 (centos)': 'apache 2.0.52',
'apache/2.0.52 (fedora)': 'apache 2.0.52',
'apache/2.0.52 (red hat)': 'apache 2.0.52',
'apache/2.0.52 (unix)': 'apache 2.0.52',
'apache/2.0.52 (unix) dav/2 php/4.4.1': 'apache 2.0.52',
'apache/2.0.52 (win32)': 'apache 2.0.52',
'apache/2.0.52 (win32) mod_ssl/2.0.52 openssl/0.9.7e mod_auth_sspi/1.0.1': 'apache 2.0.52',
'apache/2.0.53 (linux/suse)': 'apache 2.0.53',
'apache/2.0.54 (debian gnu/linux) dav/2 svn/1.1.4': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/4.3.10-18': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/4.3.10-22 mod_ssl/2.0.54': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/5.1.2': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) mod_jk/1.2.14 php/5.2.4-0.dotdeb.0 with': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) mod_ssl/2.0.54 openssl/0.9.7e php/4.4.6': 'apache 2.0.54',
'apache/2.0.54 (fedora)': 'apache 2.0.54',
'apache/2.0.54 (linux/suse)': 'apache 2.0.54',
'apache/2.0.54 (netware) mod_jk/1.2.14': 'apache 2.0.54',
'apache/2.0.54 (unix) php/4.4.7 mod_ssl/2.0.54 openssl/0.9.7e': 'apache 2.0.54',
'apache/2.0.55': 'apache 2.0.55',
'apache/2.0.55 (freebsd) php/5.2.3 with suhosin-patch': 'apache 2.0.55',
'apache/2.0.55 (ubuntu) dav/2 php/4.4.2-1.1 mod_ssl/2.0.55 openssl/0.9.8b': 'apache 2.0.55',
'apache/2.0.55 (ubuntu) php/5.1.2': 'apache 2.0.55',
'apache/2.0.55 (unix) dav/2 mod_ssl/2.0.55 openssl/0.9.8a php/4.4.4': 'apache 2.0.55',
'apache/2.0.55 (unix) mod_ssl/2.0.55 openssl/0.9.7i mod_jk/1.2.15': 'apache 2.0.55',
'apache/2.0.55 (unix) mod_ssl/2.0.55 openssl/0.9.8a jrun/4.0': 'apache 2.0.55',
'apache/2.0.58 (unix)': 'apache 2.0.58',
'apache/2.0.58 (win32) php/5.1.4': 'apache 2.0.58',
'apache/2.0.58 (win32) php/5.1.5': 'apache 2.0.58',
'apache/2.0.59 (freebsd) dav/2 php/5.2.1 with suhosin-patch': 'apache 2.0.59',
'apache/2.0.59 (freebsd) mod_fastcgi/2.4.2 php/4.4.4 with suhosin-patch': 'apache 2.0.59',
'apache/2.0.59 (netware) mod_jk/1.2.15': 'apache 2.0.59',
'apache/2.0.59 (unix) mod_ssl/2.0.59 openssl/0.9.7e mod_jk/1.2.15': 'apache 2.0.59',
'apache/2.0.59 (unix) mod_ssl/2.0.59 openssl/0.9.8d mod_fastcgi/2.4.2': 'apache 2.0.59',
'apache/2.0.63 (red hat)': 'apache 2.0.63',
'apache/2.2.0 (freebsd) mod_ssl/2.2.0 openssl/0.9.7e-p1 dav/2 php/5.1.2': 'apache 2.2.0',
'apache/2.2.11 (freebsd)': 'apache 2.2.11',
'apache/2.2.2 (fedora)': 'apache 2.2.2',
'apache/2.2.3 (centos)': 'apache 2.2.3',
'apache/2.2.3 (debian) dav/2 svn/1.4.2 mod_python/3.2.10 python/2.4.4': 'apache 2.2.3',
'apache/2.2.3 (debian) php/4.4.4-8+etch4': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7 mod_ssl/2.2.3 openssl/0.9.8c': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7 mod_ssl/2.2.3 openssl/0.9.8e': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch9': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_fastcgi/2.4.2 php/5.2.0-8+etch7 mod_ssl/2.2.3': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_jk/1.2.18 php/5.2.0-8+etch5~pu1 mod_ssl/2.2.3': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_jk/1.2.18 php/5.2.0-8+etch7': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_ssl/2.2.3 openssl/0.9.8c php/5.2.4': 'apache 2.2.3',
'apache/2.2.3 (linux/suse)': 'apache 2.2.3',
'apache/2.2.3 (mandriva linux/prefork-1mdv2007.0)': 'apache 2.2.3',
'apache/2.2.3 (red hat)': 'apache 2.2.3',
'apache/2.2.3 (unix) php/5.2.1': 'apache 2.2.3',
'apache/2.2.4 (debian) php/4.4.4-9+lenny1 mod_ssl/2.2.4 openssl/0.9.8e': 'apache 2.2.4',
'apache/2.2.4 (fedora)': 'apache 2.2.4',
'apache/2.2.4 (fedora) mod_ssl/2.2.4 openssl/0.9.8b dav/2': 'apache 2.2.4',
'apache/2.2.4 (freebsd)': 'apache 2.2.4',
'apache/2.2.4 (unix) dav/2 php/5.2.1rc3-dev mod_ruby/1.2.5': 'apache 2.2.4',
'apache/2.2.4 (unix) mod_ssl/2.2.4 openssl/0.9.7e dav/2 svn/1.4.2': 'apache 2.2.4',
'apache/2.2.4 (win32)': 'apache 2.2.4',
'apache/2.2.6 (debian) dav/2 php/4.4.4-9 mod_ssl/2.2.6 openssl/0.9.8g': 'apache 2.2.6',
'apache/2.2.6 (debian) dav/2 svn/1.4.4 mod_python/3.3.1 python/2.4.4': 'apache 2.2.6',
'apache/2.2.6 (debian) php/5.2.4-2 with suhosin-patch mod_ssl/2.2.6': 'apache 2.2.6',
'apache/2.2.6 (freebsd) mod_ssl/2.2.6 openssl/0.9.8e dav/2': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.7a dav/2 mod_mono/1.2.4': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.7a mod_jk/1.2.25': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.8b dav/2 php/5.2.5 with': 'apache 2.2.6',
'apache': 'apache 2.2.8',
'apache/2.2.8 (freebsd) mod_ssl/2.2.8 openssl/0.9.8g dav/2 php/5.2.5': 'apache 2.2.8',
'apache/2.2.8 (unix) mod_ssl/2.2.8 openssl/0.9.8g': 'apache 2.2.8',
'apache/2.2.8 (unix)': 'apache 2.2.9',
'apache/2.3.0-dev (unix)': 'apache 2.3.0',
'araneida/0.84': 'araneida 0.84',
'\'s webserver': 'ashleys webserver',
'badblue/2.4': 'badblue 2.4',
'badblue/2.5': 'badblue 2.5',
'badblue/2.6': 'badblue 2.6',
'badblue/2.7': 'badblue 2.7',
'barracudaserver.com (posix)': 'barracudadrive 3.9.1',
'basehttp/0.3 python/2.4.4': 'basehttpserver | |
<reponame>VisualComputingInstitute/CROWDBOT_perception<filename>reid/scripts/triplet_reid/datasets/lip.py<gh_stars>1-10
"""
There are multiple folders. One for LIP (What we want), one for Fashion Design (Ac), and one for multiple people (CIHP)
Folder Structure
Testing_images/Testing_images/testing_images: Test images
TrainVal_images/TrainVal_images/train_images: train images, ignore text files
TrainVal_images/TrainVal_images/val_images: train images, ignore text files
TrainVal_parsing_annotations/TrainVal_images/train_images: Train segmetation map
TrainVal_parsing_annotations/TrainVal_images/val_images: Val segmentation map
TrainVal_pose_annotations: json files of pose annotation
from source with caching.
"""
import pandas as pd
from logger import get_logger
import os
from .pose_dataset import JointInfo
from datasets import register_dataset
from datasets.utils import HeaderItem
from datasets.pose_dataset import PoseDataset
from builders import transform_builder
import numpy as np
from settings import Config
from evaluation import Evaluation
import torch
from writers.dummy import DummyWriter
from writers.memory import MemoryWriter
from utils import cache_result_on_disk
from metrics import calculate_pckh
from metrics import calc_seg_score
from transforms.flip_lr_with_pairs import FliplrWithPairs
import imgaug as ia
from metrics import fast_hist
def make_joint_info():
short_names = [
'r_ank', 'r_kne', 'r_hip', 'l_hip', 'l_kne', 'l_ank', 'b_pelv', 'b_spine',
'b_neck', 'b_head', 'r_wri', 'r_elb', 'r_sho', 'l_sho', 'l_elb', 'l_wri']
full_names = [
'right ankle', 'right knee', 'right hip', 'left hip', 'left knee',
'left ankle', 'pelvis', 'spine', 'neck', 'head', 'right wrist',
'right elbow', 'right shoulder', 'left shoulder', 'left elbow',
'left wrist']
joint_info = JointInfo(short_names, full_names)
j = joint_info.ids
joint_info.stick_figure_edges = [
(j.l_sho, j.l_elb), (j.r_sho, j.r_elb), (j.l_elb, j.l_wri),
(j.r_elb, j.r_wri), (j.l_hip, j.l_kne), (j.r_hip, j.r_kne),
(j.l_kne, j.l_ank), (j.r_kne, j.r_ank), (j.b_neck, j.b_head),
(j.b_pelv, j.b_spine)]
return joint_info
CLASSES = {
0: "Background",
1: "Hat",
2: "Hair",
3: "Glove",
4: "Sunglasses",
5: "UpperClothes",
6: "Dress",
7: "Coat",
8: "Socks",
9: "Pants",
10: "Jumpsuits",
11: "Scarf",
12: "Skirt",
13: "Face",
14: "Left-arm",
15: "Right-arm",
16: "Left-leg",
17: "Right-leg",
18: "Left-shoe",
19: "Right-shoe"
}
class SegInfo(object):
# pickle does not like namedtuple
def __init__(self, id_to_label, pairs):
self.id_to_label = id_to_label
self.pairs = pairs
def make_seg_info():
id_to_label = CLASSES
label_to_id = {value: key for key, value in id_to_label.items()}
def build_pairs(label_to_id):
pairs = dict()
for label in label_to_id:
if label.startswith('Left'):
pair1 = label_to_id[label]
label2 = 'Right' + label[len('Left'):]
pair2 = label_to_id[label2]
elif label.startswith('Right'):
pair1 = label_to_id[label]
label2 = 'Left' + label[len('Right'):]
pair2 = label_to_id[label2]
else:
continue
pairs[pair1] = pair2
return pairs
pairs = build_pairs(label_to_id)
return SegInfo(id_to_label, pairs)
COLS = ["image_id",
"r_ank_x", "r_ank_y", "r_ank_v",
"r_kne_x", "r_kne_y", "r_kne_v",
"r_hip_x", "r_hip_y", "r_hip_v",
"l_hip_x", "l_hip_y", "l_hip_v",
"l_kne_x", "l_kne_y", "l_kne_v",
"l_ank_x", "l_ank_y", "l_ank_v",
"b_pel_x", "b_pel_y", "b_pel_v",
"b_spi_x", "b_spi_y", "b_spi_v",
"b_nec_x", "b_nec_y", "b_nec_v",
"b_hea_x", "b_hea_y", "b_hea_v",
"r_wri_x", "r_wri_y", "r_wri_v",
"r_elb_x", "r_elb_y", "r_elb_v",
"r_sho_x", "r_sho_y", "r_sho_v",
"l_sho_x", "l_sho_y", "l_sho_v",
"l_elb_x", "l_elb_y", "l_elb_v",
"l_wri_x", "l_wri_y", "l_wri_v"]
@cache_result_on_disk('cached/lip', [0, 1], forced=False)
def make_dataset(data_path, split="train"):
"""
Makes the LIP dataset.
TODO Test set will not work.
"""
# load images
logger = get_logger()
if split == "train":
img_data_path = os.path.join(data_path, 'train_images')
seg_data_path = os.path.join(data_path, 'TrainVal_parsing_annotations', 'train_segmentations')
pose_anno_path = os.path.join(data_path, 'TrainVal_pose_annotations', 'lip_train_set.csv')
elif split == "val":
img_data_path = os.path.join(data_path, 'val_images')
seg_data_path = os.path.join(data_path, 'TrainVal_parsing_annotations', 'val_segmentations')
pose_anno_path = os.path.join(data_path, 'TrainVal_pose_annotations', 'lip_val_set.csv')
elif split == "test":
# TODO
img_data_path = os.path.join(data_path, 'test_images')
seg_data_path = None
pose_anno_path = None
raise NotImplementedError
pose_anno = pd.read_csv(pose_anno_path, header=0, names=COLS)
joint_info = make_joint_info()
data = []
for index, datum in pose_anno.iterrows():
image_id = datum['image_id'][:-len('.jpg')]
img_path = os.path.join(img_data_path, image_id + '.jpg')
if not os.path.isfile(img_path):
logger.warning('File %s was not found', img_path)
continue
seg_path = os.path.join(seg_data_path, image_id + '.png')
if not os.path.isfile(seg_path):
logger.warning('File %s was not found', seg_path)
continue
coords = datum[1:]
coords = coords.reshape(-1, 3)
# drop visual column
coords = coords[:, [0, 1]]
head_size = None
# TODO Is this correct
head_size = np.linalg.norm(coords[joint_info.ids.b_head] - coords[joint_info.ids.b_neck])
d = {
'path': img_path,
'coords': coords,
'seg_path': seg_path,
'head_size': head_size
}
data.append(d)
header = {
'path': HeaderItem((), ""),
'coords': HeaderItem((), ""),
'seg': HeaderItem((), "")
}
seg_info = make_seg_info()
info = {
'joint_info': joint_info,
'num_joints': joint_info.n_joints,
'seg_info': seg_info,
'num_seg_classes': len(CLASSES)
}
return data, header, info
@register_dataset('lip')
class Lip(PoseDataset):
"""
Look into person
"""
def __init__(self, data, header, info, flip_prob, *args, **kwargs):
super().__init__("lip", data, header, info, *args, **kwargs)
seg_info = info['seg_info']
joint_info = info['joint_info']
self.flip_prob = flip_prob
self.flip_transform = FliplrWithPairs(p=flip_prob,
keypoint_pairs=joint_info.mirror_mapping_pairs,
segmentation_pairs=seg_info.pairs)
def __getitem__(self, index):
datum = self.data[index]
datum = datum.copy()
img = self.loader_fn(datum['path'])
shape = img.shape
coords = datum['coords']
# image is a 3 channel png with identical channels
seg = np.array(self.loader_fn(datum['seg_path']))[:, :, 0]
if self.transform is not None:
# flip transform is outside the pipeline
# segmentation label flipping is not yet supported
# do before possible normalization
num_seg_classes = self.info['num_seg_classes']
if self.flip_prob > 0:
# only execute if the probability is greater 0
# if the image will be flipped is decided by augmenter
det_flip = self.flip_transform.to_deterministic()
#det_flip = self.flip_transform
img = det_flip.augment_image(img)
seg = ia.SegmentationMapOnImage(seg, shape=seg.shape, nb_classes=num_seg_classes)
seg = det_flip.augment_segmentation_maps(seg).get_arr_int()
keypoints_on_image = ia.KeypointsOnImage.from_coords_array(coords, shape=shape)
keypoints_on_image = det_flip.augment_keypoints([keypoints_on_image])
coords = keypoints_on_image[0].get_coords_array()
self.transform.to_deterministic()
img = self.transform.augment_image(img)
seg = self.transform.augment_segmentation(seg, num_seg_classes)
# the shape of the original image
coords = self.transform.augment_keypoint(coords, shape)
# the shape of the augmented image
coords = self.normalize_pose_keypoints(coords, img.shape)
# we need to save the shape to restore the orginal coordinates
datum['height'] = shape[0]
datum['width'] = shape[1]
datum['coords'] = coords
datum['img'] = img
# TODO why long?? Otherwise error in loss
datum['seg'] = np.array(seg, dtype=np.int64)
return datum
def __len__(self):
return len(self.data)
@staticmethod
def build(cfg, *args, **kwargs):
split = cfg['split']
evaluate = cfg.get('evaluate', 'both')
#default to zero to avoid messing up validation
flip_prob = cfg.get('flip_prob', 0.0)
data_dir = Config.LIP_DATA
data, header, info = make_dataset(data_dir, split)
transform = transform_builder.build(cfg['transform'], info)
dataset = Lip(data, header, info, flip_prob, transform, *args, **kwargs)
# TODO very temporay solution
# Looking for a better solution building the evaluation
# to avoid passing too many parameters.
dataset.evaluate_mode = evaluate
return dataset
def get_evaluation(self, model):
pose = segmentation = False
if self.evaluate_mode == 'pose':
pose = True
elif self.evaluate_mode == 'segmentation':
segmentation = True
else:
pose = segmentation = True
joint_info = self.info['joint_info']
num_seg_classes = self.info['num_seg_classes']
if pose and segmentation:
print("LIP: Pose and Segmentation Evaluation started")
return LipPoseSegmentationEvaluation(model, joint_info, num_seg_classes)
elif pose:
print("LIP: Pose Evaluation started")
joint_info = self.info['joint_info']
return LipPoseEvaluation(model, joint_info)
elif segmentation:
print("LIP: Segmentation Evaluation started")
return LipSegmentationEvaluation(model, num_seg_classes)
raise RuntimeError("Not the expected outputs available")
class LipSegmentationEvaluation(Evaluation):
def __init__(self, model, num_classes):
super().__init__("Lip")
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def get_writer(self, output_path):
# for now do everything in memory
self.writer = DummyWriter()
return self.writer
def before_saving(self, endpoints, data):
# Change to Update and remove get_writer function?
predictions = torch.argmax(endpoints['sem-logits'], dim=1).detach().cpu().numpy()
# batch size of one
assert predictions.shape[0] == 1
pred = predictions[0]
gt = data['seg'].detach().cpu().numpy()[0]
self.hist += fast_hist(gt.flatten(), pred.flatten(), self.num_classes)
return {}
def score(self):
score = calc_seg_score(self.hist)
return score
class LipPoseEvaluation(Evaluation):
def __init__(self, model, joint_info):
super().__init__("Lip")
self.joint_info = joint_info
def get_writer(self, output_path):
# for now do everything in memory
self.writer = MemoryWriter()
return self.writer
def before_saving(self, endpoints, data):
data_to_write = {
"pose": endpoints['pose'].cpu(),
"coords": data['coords'],
"head_size": data['head_size'],
"height": data['height'],
"width": data['width']
}
return data_to_write
@staticmethod
def _score(pose, coords, height, width, head_size, ids):
# no inplace
pose = pose.copy()
coords = coords.copy()
# coords are between 0 and 1, rescale for correct error
# broadcast to all joints
pose[:, :, 0] *= width[:, None]
pose[:, :, 1] *= height[:, None]
coords[:, :, 0] *= width[:, None]
coords[:, :, 1] *= height[:, None]
def calc_dist(array1, array2):
return np.linalg.norm(array1 - array2, axis=2)
# TODO ignore head not visible in evaluation
dist = calc_dist(pose, coords)
pck_all, pck_joint = calculate_pckh(dist, head_size)
score = {}
sn = "PCKh {} @ {}"
#threshold: values
for t, v in pck_joint.items():
score[sn.format(t, "Head")] = (v[ids['b_head']] + v[ids['b_neck']]) / 2
score[sn.format(t, "Shoulder")] = (v[ids['l_sho']] + v[ids['r_sho']]) / 2
score[sn.format(t, "Elbow")] = (v[ids['l_elb']] + v[ids['r_elb']]) / 2
score[sn.format(t, "Wrist")] = (v[ids['l_wri']] + v[ids['r_wri']]) / 2
score[sn.format(t, "Hip")] = (v[ids['l_hip']] + v[ids['r_hip']]) / 2
score[sn.format(t, "Knee")] = (v[ids['l_kne']] + v[ids['r_kne']]) / 2
score[sn.format(t, "Ankle")] = (v[ids['l_ank']] + v[ids['r_ank']]) / 2
for t, v in pck_all.items():
score[sn.format(t, "All")] = v
return score
def score(self):
data = self.writer.data
height = np.concatenate(data['height'])
width = np.concatenate(data['width'])
head_size = np.concatenate(data['head_size'])
pose = np.concatenate(data['pose']) # prediction
coords = np.concatenate(data['coords']) # gt
return self._score(pose, coords, height, width, head_size, self.joint_info.ids)
class LipPoseSegmentationEvaluation(Evaluation):
def __init__(self, model, joint_info, num_seg_classes):
super().__init__("Lip")
self.pose = LipPoseEvaluation(model, joint_info)
self.seg = LipSegmentationEvaluation(model, num_seg_classes)
def get_writer(self, output_path):
self.writer = MemoryWriter()
self.seg.writer = self.writer
self.pose.writer = self.writer
return self.writer
def before_saving(self, endpoints, data):
pose_data = self.pose.before_saving(endpoints, data)
seg_data = self.seg.before_saving(endpoints, data)
return {**pose_data, | |
<filename>api/exporter/views.py
import json
import requests
import base64
import re
import csv
from django.views import View
from django.http import JsonResponse
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q, Max
from django.db import transaction
from django.utils import timezone
from django.conf import settings
from .models import Category, Exporter, Release, Official
from headtoken.models import Token
from user.models import Bucket, Star
from user.utils import login_check, admin_decorator
api_url = 'https://api.github.com/repos/'
PATTERN = r"!\[(\w*|\s|\w+( \w+)*)\]\(([^,:!]*|\/[^,:!]*\.\w+|\w*.\w*)\)"
class CategoryView(View):
def get_contents(self, headers):
repo = f"{settings.ORGANIZATION}/exporterhub.io"
url = f"https://api.github.com/repos/{repo}/contents/api/exporter_list.csv"
result = requests.get(url, headers=headers)
data = result.json()
if result.status_code == 200:
contents = {
'sha' : data['sha']
}
elif result.status_code == 404:
contents = {
'sha' : None
}
else:
contents = "GITHUB_GET_REPO_ERROR"
return contents
def get(self, request):
categories = Category.objects.all().order_by('name')
data = {
"categories": [{
"category_id" : category.id,
"category_name": category.name
} for category in categories]
}
return JsonResponse(data, status=200)
@admin_decorator
def post(self, request):
data = json.loads(request.body)
category, is_create = Category.objects.get_or_create(
name = data['category']
)
if not is_create:
return JsonResponse({'message':'EXISTING_CATEGORY'}, status=400)
return JsonResponse({'message':'SUCCESS'}, status=201)
@admin_decorator
@transaction.atomic
def patch(self, request):
data = json.loads(request.body)
category_id = data['category_id']
feature_category_id = data['feature_category_id']
user = request.user
token = user.github_token
repo = f"{settings.ORGANIZATION}/exporterhub.io"
url = f"https://api.github.com/repos/{repo}/contents/api/exporter_list.csv"
responses = []
response = ''
if not Category.objects.filter(id=category_id).exists:
return JsonResponse({'message':'EXISTING_CATEGORY'}, status=400)
if not Category.objects.filter(id=feature_category_id).exists:
return JsonResponse({'message':'EXISTING_CATEGORY'}, status=400)
category = Category.objects.get(id=category_id)
feature_category = Category.objects.get(id=feature_category_id)
file = open('exporter_list.csv', 'r')
reader = [row for row in csv.reader(file)]
file.close()
file = open('exporter_list.csv', 'w', newline='')
writer = csv.writer(file)
for i, row in enumerate(reader):
if reader[i][4] == category.name:
reader[i][4] = feature_category.name
responses.append([reader[i][0], reader[i][1], reader[i][2], reader[i][3], reader[i][4],'\n'])
writer.writerow(row)
else:
writer.writerow(row)
responses.append([reader[i][0], reader[i][1], reader[i][2], reader[i][3], reader[i][4],'\n'])
file.close()
csv_info = self.get_contents(headers={'Authorization' : 'token ' + token})
if csv_info == 'GITHUB_GET_REPO_ERROR':
return JsonResponse({'message': 'GITHUB_API_FAIL'}, status=400)
for detail in responses:
response += ' '.join(detail)
contents = json.dumps({
'sha' : csv_info['sha'],
'message' : 'wip',
'content' : base64.b64encode(response.encode('utf-8')).decode('utf-8')
})
result = requests.put(url, data=contents, headers={'Authorization': 'token ' + token, 'Content-Type':'application/vnd.github.v3+json'})
if result.status_code == 200:
Exporter.objects.filter(category_id = category_id).update(category_id=feature_category_id)
Category.objects.filter(id=category_id).delete()
return JsonResponse({'message':'SUCCESS'}, status=200)
else:
return JsonResponse({'message': 'GITHUB_REPO_API_ERROR'}, status=404)
@admin_decorator
@transaction.atomic
def delete(self, request, category_id):
user = request.user
token = user.github_token
repo = f"{settings.ORGANIZATION}/exporterhub.io"
url = f"https://api.github.com/repos/{repo}/contents/api/exporter_list.csv"
category = Category.objects.get(id=category_id)
content = []
response = ''
if not Category.objects.filter(id=category_id).exists:
return JsonResponse({'message':'EXISTING_CATEGORY'}, status=400)
file = open('exporter_list.csv', 'r')
reader = [row for row in csv.reader(file)]
file.close()
file = open('exporter_list.csv', 'w', newline='')
writer = csv.writer(file)
for i, row in enumerate(reader):
if reader[i][4] == category.name:
continue
else:
writer.writerow(row)
content.append([reader[i][0], reader[i][1], reader[i][2], reader[i][3], reader[i][4], '\n'])
file.close()
csv_info = self.get_contents(headers={'Authorization' : 'token ' + token})
if csv_info == 'GITHUB_GET_REPO_ERROR':
return JsonResponse({'message': 'GITHUB_API_FAIL'}, status=400)
for detail in content:
response += ' '.join(detail)
contents = json.dumps({
'sha' : csv_info['sha'],
'message' : 'wip',
'content' : base64.b64encode(response.encode('utf-8')).decode('utf-8')
})
result = requests.put(url, data=contents, headers={'Authorization': 'token ' + token, 'Content-Type':'application/vnd.github.v3+json'})
if result.status_code == 200:
Exporter.objects.filter(category_id=category_id).delete()
Category.objects.filter(id=category_id).delete()
return JsonResponse({'message':'SUCCESS'}, status=200)
else:
return JsonResponse({'message': 'GITHUB_REPO_API_ERROR'}, status=404)
class ExporterView(View):
def get_repo(self, github_token, repo_url):
headers = {'Authorization' : 'token ' + github_token}
if 'https://github.com/' in repo_url:
repo_api_url = api_url + repo_url.replace('https://github.com/','')
readme_api_url = repo_api_url + '/readme'
release_api_url = repo_api_url + '/releases'
repo = requests.get(repo_api_url, headers=headers)
if repo.status_code == 200:
repo_data = repo.json()
readme = requests.get(readme_api_url, headers=headers)
release = requests.get(release_api_url, headers=headers)
readme_data = readme.json()
release_data = release.json()
data = {
"name" : repo_data["name"],
"logo_url" : repo_data["owner"]["avatar_url"],
"stars" : repo_data["stargazers_count"],
"description" : repo_data["description"],
"readme_url" : repo_url+"/blob/master/README.md",
"readme" : readme_data["content"],
"release" : [{
"release_version": release["tag_name"],
"release_date" : release["created_at"],
"release_url" : release["html_url"]
} for release in release_data]
}
return data
elif repo.status_code == 401:
return 'INVALID_TOKEN'
@login_check
def get(self, request):
try:
user = request.user
category = request.GET.get('category')
official_type = request.GET.get('type')
sort = request.GET.get('sort', 'popular')
sort_dict = {
'popular' : '-stars',
'recent' : 'date',
'trending': '-view_count'
}
q = Q()
if category:
q.add(Q(category__name__icontains=category), Q.AND)
if official_type:
q.add(Q(official__name__istartswith=official_type), Q.AND)
if sort == 'recent':
exporters = Exporter.objects.select_related('category', 'official').prefetch_related('release_set')\
.filter(q).annotate(recent=Max('release__date')).order_by('-recent')
else:
exporters = Exporter.objects.select_related('category', 'official').filter(q).order_by(sort_dict[sort])
data = {
"exporters": [{
"exporter_id" : exporter.id,
"name" : exporter.name,
"logo_url" : exporter.logo_url,
"category" : exporter.category.name,
"official" : exporter.official.name,
"stars" : exporter.stars,
"is_star" : user.starred_exporters.filter(id=exporter.id).exists() if user else False,
"is_bucket" : user.added_exporters.filter(id=exporter.id).exists() if user else False,
"is_new" : (timezone.now() - exporter.created_at).days <= 7,
"repository_url" : exporter.repository_url,
"description" : exporter.description,
}for exporter in exporters]
}
return JsonResponse(data, status=200)
except KeyError:
return JsonResponse({'message': 'KEY_ERROR'}, status=400)
except Exception as e:
return JsonResponse({'message':f"{e}"}, status=400)
@admin_decorator
def post(self, request):
try:
user = request.user
data = json.loads(request.body)
repo_url = data["repo_url"]
category = data["category"]
app_name = data["title"]
if not(repo_url and category and app_name):
return JsonResponse({'message': 'FILL_THE_BLANK'}, status=400)
if Exporter.objects.filter(repository_url=repo_url).exists():
return JsonResponse({'message':'EXISTING_REPOSITORY'}, status=400)
official = Official.objects.get(name='Official') if "prometheus/" in repo_url else Official.objects.get(name='Unofficial')
repo_info = self.get_repo(github_token=user.github_token, repo_url=repo_url)
if repo_info == 'INVALID_TOKEN':
return JsonResponse({'message':'INVALID_TOKEN'}, status=401)
elif repo_info:
readme = base64.b64decode(repo_info["readme"]).decode('utf-8')
matches = re.findall(PATTERN, readme)
repo_name = repo_url.replace('https://github.com/','')
for match in matches:
for element in match:
if '.' in element:
readme = readme.replace(element,f"https://raw.githubusercontent.com/{repo_name}/master/{element}")
exporter = Exporter.objects.create(
category = Category.objects.get(name=category),
official = official,
name = repo_info["name"],
logo_url = repo_info["logo_url"],
stars = repo_info["stars"],
repository_url = repo_url,
description = repo_info["description"],
readme_url = repo_info["readme_url"],
readme = readme.encode('utf-8'),
app_name = app_name
)
release = sorted(repo_info["release"], key=lambda x: x["release_date"])
for info in release:
Release(
exporter_id = exporter.id,
release_url = info["release_url"],
version = info["release_version"],
date = info["release_date"]
).save()
file = open("exporter_list.csv", 'a', newline='')
writer = csv.writer(file)
writer.writerow([app_name, repo_info["name"], repo_url, 1 if "prometheus/" in repo_url else 0, category])
file.close()
return JsonResponse({'message':'SUCCESS'}, status=201)
return JsonResponse({'message':'WRONG_REPOSITORY'}, status=400)
except KeyError:
return JsonResponse({'message':'KEY_ERROR'}, status=400)
except Category.DoesNotExist:
return JsonResponse({'message':'NO_CATEGORY'}, status=400)
except Official.DoesNotExist:
return JsonResponse({'message':'OFFICIAL_OBJECT_DOES_NOT_EXIST'}, status=410)
@admin_decorator
def delete(self, request):
try:
exporter_id = request.GET['exporter-id']
exporter = Exporter.objects.get(id=exporter_id)
release = Release.objects.filter(exporter_id=exporter_id)
if release.exists():
release.delete()
exporter.delete()
return JsonResponse({'message':'SUCCESS'}, status=200)
except Exporter.DoesNotExist:
return JsonResponse({'message':'NO_EXPORTER'}, status=400)
except KeyError:
return JsonResponse({'message':'KEY_ERROR'}, status=400)
@admin_decorator
def patch(self, request):
try:
exporter_id = request.GET['exporter-id']
data = json.loads(request.body)
category = data['category']
exporter = Exporter.objects.get(id=exporter_id)
exporter.category = Category.objects.get(name=category)
exporter.save()
return JsonResponse({'message':'SUCCESS'}, status=200)
except Exporter.DoesNotExist:
return JsonResponse({'message':'NO_EXPORTER'}, status=400)
except Category.DoesNotExist:
return JsonResponse({'message':'NO_CATEGORY'}, status=400)
except KeyError:
return JsonResponse({'message':'KEY_ERROR'}, status=400)
class ExporterDetailView(View):
def check_starred(self, user, exporter, headers, repo_info):
result = requests.get(f'https://api.github.com/user/starred/{repo_info}', headers=headers)
if result.status_code == 204 and not Star.objects.filter(user=user, exporter=exporter).exists():
Star.objects.create(user=user, exporter=exporter)
elif result.status_code == 404 and Star.objects.filter(user=user, exporter=exporter).exists():
Star.objects.filter(user=user, exporter=exporter).delete()
elif result.status_code != 204 and result.status_code != 404:
return 'ERROR'
@login_check
def get(self, request, exporter_id):
try:
user = request.user
exporter = Exporter.objects.select_related('category', 'official').prefetch_related('release_set').get(id=exporter_id)
repo_info = exporter.repository_url.replace('https://github.com/', '')
exporter.view_count += 1
exporter.save()
github_token = user.github_token if user else Token.objects.last().token
headers = {'Authorization' : 'token ' + github_token}
# check starred by user at github
if user:
if self.check_starred(user=user, exporter=exporter, headers=headers, repo_info=repo_info) == 'ERROR':
return JsonResponse({'message': 'GITHUB_API_FAIL_AT_CHECK_STARRED'}, status=400)
get_star_counts = requests.get(f'https://api.github.com/repos/{repo_info}', headers=headers)
if get_star_counts.status_code != 200:
return JsonResponse({'message': 'GITHUB_GET_STAR_COUNT_API_FAIL'}, status=400)
exporter.stars = get_star_counts.json()['stargazers_count']
exporter.save()
if user and user.added_exporters.filter(id=exporter.id).exists():
forked_repository_url = Bucket.objects.get(user_id=user.id, exporter_id=exporter.id).forked_repository_url
else:
forked_repository_url = None
data = {
'exporter_id' : exporter.id,
'name' : exporter.name,
'logo_url' : exporter.logo_url,
'category' : exporter.category.name,
'official' : exporter.official.name,
'title' : exporter.app_name,
'stars' : exporter.stars,
'is_star' : user.starred_exporters.filter(id=exporter.id).exists() if user else False,
'is_bucket' : user.added_exporters.filter(id=exporter.id).exists() if user else False,
'is_new' : (timezone.now() - exporter.created_at).days <= 7,
'repository_url' : exporter.repository_url,
'forked_repository_url' : forked_repository_url,
'description' : exporter.description,
'readme' : exporter.readme.decode('utf-8'),
'recent_release' : exporter.release_set.order_by('date').last().date if exporter.release_set.filter().exists() else '1970-01-01',
'release' : [{
'release_version': release.version,
'release_date' : release.date,
'release_url' : release.release_url
} for release in exporter.release_set.all()],
}
return JsonResponse({'data': data}, status=200)
except Exporter.DoesNotExist:
return JsonResponse({'message':'NO_EXPORTER'}, status=400)
class ExporterTabView(View):
def get_yaml(self, app_name, content_type, file_type, headers):
repo = f"{settings.ORGANIZATION}/exporterhub.io"
url = f"https://api.github.com/repos/{repo}/contents/contents/{app_name}/{app_name}_{content_type}/"
yaml_file = requests.get(url, headers=headers)
data = yaml_file.json()
yaml_contents = []
if yaml_file.status_code == 200:
for each_yaml in data:
yaml_name = each_yaml['name']
if '.yaml' in yaml_name:
yaml_url = f"https://api.github.com/repos/{repo}/contents/contents/{app_name}/{app_name}_{content_type}/{yaml_name}"
result = requests.get(yaml_url, headers=headers)
if result.status_code == 200:
yaml_data = result.json()
yaml_contents.append(
{
'yaml_file_content' : base64.b64decode(yaml_data['content']).decode('utf-8'),
'yaml_url' : yaml_url,
'yaml_sha' : yaml_data['sha']
}
)
else:
pass
return yaml_contents
elif yaml_file.status_code == 404:
yaml_contents = {
'yaml_file_content' : None,
'yaml_url' : None,
'yaml_sha' : None,
}
return yaml_contents
else:
yaml_contents = "GITHUB_GET_REPO_ERROR"
return yaml_contents
def get_csv(self, app_name, content_type, file_type, headers):
repo = f"{settings.ORGANIZATION}/exporterhub.io"
url = f"https://api.github.com/repos/{repo}/contents/contents/{app_name}/{app_name}_{content_type}/{app_name}_{content_type}.{file_type}"
result = requests.get(url, headers=headers)
data = result.json()
csv_files = []
if result.status_code == 200:
content = base64.b64decode(data['content']).decode('utf-8')
details | |
<gh_stars>0
import csv
import vamp
import soundfile as sf
import argparse
import pathlib
import sys
import numpy as np
from warnings import warn
from typing import List
import simfile
class SingleBeatTimestampData(object):
def __init__(self, timestamp: float = None, label: str = None):
self.timestamp = timestamp
self.label = label
def set_timestamp(self, timestamp: float):
if timestamp >= 0.:
self.timestamp = timestamp
else:
raise ValueError("Invalid timestamp {} (is it negative?)".format(timestamp))
def set_label(self, label: str):
if label in {"1", "2", "3", "4"}:
self.label = label
else:
raise ValueError("Invalid beat label {}, should be '1', '2', '3', '4'".format(label))
class BeatsTimestampData(object):
VALID_TIMESTAMP_TYPES = ['samples', 'seconds']
TIMESTAMP_UNSET_TYPE = 'unset'
def __init__(self, data: List[SingleBeatTimestampData] = None, timestamp_type: str = None):
self.beats = data if data is not None else []
self.timestamp_type = timestamp_type if timestamp_type is not None else self.TIMESTAMP_UNSET_TYPE
def set_data(self, data: List[SingleBeatTimestampData]):
self.beats = data
def set_timestamp_type(self, timestamp_type: str):
if timestamp_type in self.VALID_TIMESTAMP_TYPES:
self.timestamp_type = timestamp_type
elif timestamp_type == self.TIMESTAMP_UNSET_TYPE:
raise ValueError("Cannot set timestamp_type to {} here, "
"it must be one of the following options: "
"'{}'".format(self.TIMESTAMP_UNSET_TYPE, "', '".join(self.VALID_TIMESTAMP_TYPES) ))
else:
raise ValueError("Invalid timestamp type '{}'".format(timestamp_type))
class BPMsData(object):
def __init__(self, bpms: List[float] = None, beat_markers: List[int] = None):
self.bpms = bpms if bpms is not None else []
self.beat_markers = beat_markers if beat_markers is not None else []
def set_bpms(self, bpms: List[float]):
self.bpms = bpms
def set_beat_markers(self, beat_markers: List[int]):
self.beat_markers = beat_markers
class AudioBeatsToBPMs(object):
SEC_DIFF_TOLERANCE = 1e-8
MIN_FIRST_BEAT_SEC_FOR_WARN = 10.
PLUGIN_IDENTIFIER = "qm-vamp-plugins:qm-barbeattracker" # https://vamp-plugins.org/plugin-doc/qm-vamp-plugins.html
RUN_FROM_CANDIDATES = {"audio_input", "audio_path", "beats_path"}
def __init__(self, audio: np.ndarray = None, sampling_rate: int = None, input_audio_path=None,
input_beats_path=None, input_beats_sampling_rate=0,
input_simfile_path=None, output_simfile_path=None, output_txt_path=None,
output_beat_markers_bpms_csv_path=None, overwrite_input_simfile=False,
alternate_plugin_identifier=None):
self.audio = audio
self.sampling_rate = sampling_rate
self.input_audio_path = pathlib.Path(input_audio_path) if input_audio_path is not None else None
self.input_beats_path = pathlib.Path(input_beats_path) if input_beats_path is not None else None
if input_beats_sampling_rate:
self.sampling_rate = input_beats_sampling_rate
self.input_beats_in_samples = True
else:
self.input_beats_in_samples = False
self.input_simfile_path = pathlib.Path(input_simfile_path) if input_simfile_path is not None else None
self.output_simfile_path = pathlib.Path(output_simfile_path) if output_simfile_path is not None else None
self.output_txt_path = pathlib.Path(output_txt_path) if output_txt_path is not None else None
self.output_beat_markers_bpms_csv_path = pathlib.Path(output_beat_markers_bpms_csv_path) \
if output_beat_markers_bpms_csv_path is not None else None
self.overwrite_input_simfile = overwrite_input_simfile
self.plugin_identifier = alternate_plugin_identifier if alternate_plugin_identifier is not None else \
self.PLUGIN_IDENTIFIER
self.run_from = None
self._verify_initialization_and_set_running_order()
self.beats_timestamp_data = BeatsTimestampData()
self.bpms_data = BPMsData()
self.offset = 0.
self.simfile_bpms = None
def _verify_initialization_and_set_running_order(self):
# Override order: Input beats path > input audio array > input audio path
if self.input_audio_path is not None:
if not self.input_audio_path.is_file():
raise ValueError("{} is not a valid file path for the input audio".format(self.input_audio_path))
if self.input_beats_path is not None:
self.run_from = "beats_path"
if not self.input_beats_path.is_file():
raise ValueError("{} is not a valid file path for the input beats CSV".format(self.input_beats_path))
if self.input_audio_path is not None:
warn("WARNING: Will not load audio from {} because you have specified an existing file path {} "
"from which the beat timestamps will be extracted.".format(self.input_audio_path,
self.input_beats_path))
if self.audio is not None:
warn("WARNING: Will not compute beat timestamps from the input audio, because you have specified "
"an existing file path {} from which the beat timestamps "
"will be extracted.".format(self.input_beats_path))
else: # No input CSV of beats
if self.audio is not None and self.input_audio_path is not None:
self.run_from = "audio_input"
warn("WARNING: Will not load audio from {} because you have passed an audio array in the "
"initialization of this object.".format(self.input_audio_path))
elif self.audio is None and self.input_audio_path is not None:
self.run_from = "audio_path"
else:
raise ValueError("You must do one of the following things: initialize the audio array, "
"set the input audio path, or set the input beats path.")
if self.overwrite_input_simfile:
if self.input_simfile_path is None:
raise ValueError("Cannot specify --overwrite_input_simfile without --input_simfile_path")
elif self.output_simfile_path is not None and self.output_simfile_path != self.input_simfile_path:
raise ValueError("Ambiguous input: cannot specify both --overwrite_input_simfile and "
"--output_simfile_path, unless the input simfile path is the same as the output")
else:
resolved = False
while not resolved:
user_response = input("WARNING: Are you sure you want to overwrite the "
"existing #OFFSET and #BPMS fields in the input simfile {}? "
"(y/n) ".format(self.input_simfile_path))
if user_response.lower() in ["y", "yes"]:
resolved = True
self.output_simfile_path = self.input_simfile_path
elif user_response.lower() in ["n", "no"]:
print("Stopping program.")
sys.exit()
else: # If --overwrite_input_simfile is not specified
if self.output_simfile_path is not None:
if self.input_simfile_path is None:
raise ValueError("Cannot specify --output_simfile_path without --input_simfile_path")
elif self.output_simfile_path.exists():
resolved = False
while not resolved:
user_response = input("WARNING: The output simfile path {} already exists. "
"Are you sure you want to overwrite the "
"existing #OFFSET and #BPMS fields in this simfile? "
"(y/n) ".format(self.output_simfile_path))
if user_response.lower() in ["y", "yes"]:
resolved = True
elif user_response.lower() in ["n", "no"]:
print("Stopping program.")
sys.exit()
if self.run_from not in self.RUN_FROM_CANDIDATES:
raise ValueError("Invalid run configuration {}, must be one of the options "
"'{}'".format(self.run_from, "', '".join(self.RUN_FROM_CANDIDATES)))
def load_audio_from_path(self, input_audio_path=None):
if input_audio_path is not None:
self.input_audio_path = input_audio_path
elif self.input_audio_path is None:
raise ValueError("No input audio path has been specified!")
elif not self.input_audio_path.is_file():
raise ValueError("Input audio path {} isn't a file".format(self.input_audio_path))
self.audio, self.sampling_rate = sf.read(self.input_audio_path, always_2d=True)
self.audio = self.audio.T # [channels, data]
print("Audio loaded from {}".format(self.input_audio_path))
def calculate_beat_timestamps_from_vamp_plugin(self, return_beats=False):
if self.audio is None:
raise ValueError("No audio loaded!")
data = [x for x in vamp.process_audio(self.audio, self.sampling_rate, self.plugin_identifier)]
timestamp_type = 'seconds'
self.beats_timestamp_data = self._convert_beats_data_from_dicts_to_BeatsTimestampData(data, timestamp_type)
if return_beats:
return self.beats_timestamp_data
def load_beat_timestamps_from_path(self):
if self.input_beats_path is None:
raise ValueError("No input beats path specified!")
elif not self.input_beats_path.is_file():
raise ValueError("Invalid path to input beats file {}".format(self.input_beats_path))
else:
timestamp_type: str = 'unset'
with open(self.input_beats_path, "r") as infile:
read_data = []
csvread = csv.reader(infile)
row = next(csvread)
first_beat_time = float(row[0])
if not self.input_beats_in_samples:
timestamp_type = 'seconds'
if first_beat_time > self.MIN_FIRST_BEAT_SEC_FOR_WARN:
warn("WARNING: Your first timestamp {} from the input CSV is at greater than {} seconds. "
"Are you sure the units are in seconds and not samples? "
"If they are samples, please specify the sampling rate using the flag "
"--samples; for instance --samples 48000, or "
"otherwise you will get very inaccurate BPMs.".format(first_beat_time,
self.MIN_FIRST_BEAT_SEC_FOR_WARN))
else:
if int(first_beat_time) != first_beat_time:
warn("The first beat time {} is detected to be in units of seconds, but you specified the "
"sampling rate, which will not be used in the computation.")
timestamp_type = 'seconds'
self.input_beats_in_samples = False
else:
timestamp_type = 'samples'
read_data.append(row)
for row in csvread:
read_data.append(row)
self.beats_timestamp_data = self._convert_beats_data_from_lists_to_BeatsTimestampData(read_data,
timestamp_type)
def convert_timestamps_to_bpms(self):
if len(self.beats_timestamp_data.beats) == 0:
try:
self.calculate_beat_timestamps_from_vamp_plugin()
except ValueError:
raise ValueError("Beats data is empty; "
"did you load an audio file or an input CSV of beat information?")
else:
if self.beats_timestamp_data.timestamp_type == 'samples':
first_beat_samples = int(self.beats_timestamp_data.beats[0].timestamp)
self.offset = - first_beat_samples / self.sampling_rate
beat_marker = 0
last_beat_diff = 0
for beat in self.beats_timestamp_data.beats[1:]:
second_beat_samples = int(beat.timestamp)
beat_diff = second_beat_samples - first_beat_samples
if last_beat_diff != beat_diff:
bpm = self.sampling_rate / beat_diff * 60 # CHANGE
self.bpms_data.bpms.append(bpm)
self.bpms_data.beat_markers.append(beat_marker)
first_beat_samples = second_beat_samples
beat_marker += 1
last_beat_diff = beat_diff
elif self.beats_timestamp_data.timestamp_type == 'seconds':
beat_marker = 0
last_beat_diff = 0
first_beat_sec = float(self.beats_timestamp_data.beats[0].timestamp)
self.offset = - first_beat_sec
for beat in self.beats_timestamp_data.beats[1:]:
second_beat_sec = float(beat.timestamp)
beat_diff = second_beat_sec - first_beat_sec
if abs(last_beat_diff - beat_diff) > self.SEC_DIFF_TOLERANCE:
bpm = 60 / beat_diff
self.bpms_data.bpms.append(bpm)
self.bpms_data.beat_markers.append(beat_marker)
first_beat_sec = second_beat_sec
beat_marker += 1
last_beat_diff = beat_diff
else:
raise ValueError("Invalid timestamp_type: {}".format(self.beats_timestamp_data.timestamp_type))
@staticmethod
def _convert_beats_data_from_dicts_to_BeatsTimestampData(beats_dicts: List[dict], timestamp_type: str):
return BeatsTimestampData([SingleBeatTimestampData(timestamp=beat_dict['timestamp'], label=beat_dict['label'])
for beat_dict in beats_dicts], timestamp_type)
@staticmethod
def _convert_beats_data_from_lists_to_BeatsTimestampData(beats_lists: List, timestamp_type: str):
return BeatsTimestampData([SingleBeatTimestampData(timestamp=beat_list[0], label=beat_list[1])
for beat_list in beats_lists], timestamp_type)
@staticmethod
def _convert_beats_data_from_dicts_to_lists(beats_dicts: List[dict]):
return [[beat_dict['timestamp'], beat_dict['label']] for beat_dict in beats_dicts]
@staticmethod
def _convert_beats_data_from_lists_to_dicts(beats_list: List):
return [{'timestamp': beat_list[0], 'label': beat_list[1]} for beat_list in beats_list]
def convert_bpms_to_simfile_format(self):
beats_bpms = ["{}={}\n".format(beat_marker, bpm) for beat_marker, bpm in
zip(self.bpms_data.beat_markers, self.bpms_data.bpms)]
self.simfile_bpms = ",".join(beats_bpms)
def write_output_csv(self):
with open(self.output_beat_markers_bpms_csv_path, "w") as outfile:
csvwrite = csv.writer(outfile)
for beat_marker, bpm in zip(self.bpms_data.beat_markers, self.bpms_data.bpms):
csvwrite.writerow([beat_marker, bpm])
def write_output_txt_oneline(self):
"""
Write out a text file that only contains the two lines that you're gonna
stick into the Stepmania .sm file (or .ssc).
Sample output:
#OFFSET:-0.0234;
#BPMS:0.000=132.000,149.000=66.000,173.000=132.000;
:return:
"""
stepmania_offset_out = "#OFFSET:{};\n".format(self.offset)
if self.simfile_bpms is None:
self.convert_bpms_to_simfile_format()
stepmania_bpms_out = "#BPMS:" + self.simfile_bpms + ";"
with open(self.output_txt_path, "w") as outfile:
outfile.write(stepmania_offset_out)
outfile.write(stepmania_bpms_out)
def write_output_simfile(self):
sm = simfile.open(str(self.input_simfile_path))
| |
#Misc
import time, os, sys, pdb
from glob import glob
from fnmatch import fnmatch
#Base
import numpy as np
import pandas as pd
#Save
import json
import scipy.io as sio
import h5py
#User
from utilities import *
#Plot
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.gridspec as gridspec
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.collections import LineCollection
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from matplotlib.backends.backend_pdf import PdfPages
#Colors!
color_names=['windows blue','red','amber','faded green','dusty purple','orange','steel blue','pink','greyish',
'mint','clay','light cyan','forest green','pastel purple','salmon','dark brown','lavender','pale green',
'dark red','gold','dark teal','rust','fuchsia','pale orange','cobalt blue','mahogany','cloudy blue',
'dark pastel green','dust','electric lime','fresh green','light eggplant','nasty green']
color_palette = sns.xkcd_palette(color_names)
cc = sns.xkcd_palette(color_names)
sns.set_style("darkgrid")
sns.set_context("notebook")
#-------------------------------------------------------------------------------
# Dictionaries for decoding trial summary data
Cond_Dict = {0:'100-0', 1:'80-20', 2:'60-40', 3:'Control', 4:'1% Abs-Conc',5:'0.1% Abs-Conc'}
Port_Dict = {2:'Left', 1:'Right'}
Resp_Dict = {1:'Correct',0:'Incorrect'}
Turn_Dict = {2:'Left', 1:'Right'}
Cond_InvDict = {'100-0':0, '80-20':1, '60-40':2, 'Control':3, '1% Abs-Conc':4, '0.1% Abs-Conc':5}
Port_InvDict = {'Left':2, 'Right':1}
Resp_InvDict = {'Correct':1,'Incorrect':0}
Plot_Dir = './plots/'
#-------------------------------------------------------------------------------
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
#-------------------------------------------------------------------------------
def gradient_cmap(gcolors, nsteps=256, bounds=None):
ncolors = len(gcolors)
if bounds is None:
bounds = np.linspace(0, 1, ncolors)
reds = []
greens = []
blues = []
alphas = []
for b, c in zip(bounds, gcolors):
reds.append((b, c[0], c[0]))
greens.append((b, c[1], c[1]))
blues.append((b, c[2], c[2]))
alphas.append((b, c[3], c[3]) if len(c) == 4 else (b, 1., 1.))
cdict = {'red': tuple(reds),
'green': tuple(greens),
'blue': tuple(blues),
'alpha': tuple(alphas)}
cmap = LinearSegmentedColormap('grad_colormap', cdict, nsteps)
return cmap
#-------------------------------------------------------------------------------
def shuffle_colors(used_states):
mask = np.zeros(len(colors), dtype=bool)
mask[used_states] = True
color_names_shortened = [cn for cn, s in zip(color_names,mask) if s]
color_names_shuffled = [x for _, x in sorted(zip(used_states,color_names_shortened), key=lambda pair: pair[0])]
colors_shf = sns.xkcd_palette(color_names_shuffled)
cc = [rgb for rgb in colors_shf]
return cc,colors_shf
#-------------------------------------------------------------------------------
def get_colors(N_used_states):
names = color_names[:N_used_states]
colors_out = sns.xkcd_palette(names)
cc = [rgb for rgb in colors_out]
return cc, colors_out
#-------------------------------------------------------------------------------
def plot_z_samples(zs,used_states,xmax=None,
plt_slice=None,
N_iters=None,
title=None,
ax=None,
pdf=None):
if ax is None:
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
zs = np.array(zs)
if plt_slice is None:
plt_slice = (0, zs.shape[1])
if N_iters is None:
N_iters = zs.shape[0]
# How many states were discovered?
K = len(used_states)
#create a mask for the correct color-map
#mask = np.zeros(len(colors), dtype=bool)
#mask[used_states] = True
#cc = [rgb for rgb, s in zip(colors,mask) if s]
cc, colors_shf = shuffle_colors(used_states)
# Plot StateSeq as a heatmap
im = ax.imshow(zs[:, slice(*plt_slice)], aspect='auto', vmin=0, vmax=K - 1,
cmap=gradient_cmap(color_palette), interpolation="nearest",
extent=plt_slice + (N_iters, 0))
# Create a legend
# get the colors of the values, according to the colormap used by imshow
# create a patch (proxy artist) for every color
patches = [mpatches.Patch(color=colors[int(s)], label="State {:.0f}".format(s)) for s in used_states]
# put those patched as legend-handles into the legend
plt.legend(handles=patches, loc=4)
ax.set_ylabel("Trial")
ax.set_xlabel("Frame #")
if xmax is not None:
plt.xlim(0,xmax)
if title is not None:
ax.set_title(title)
if pdf is not None:
pdf.savefig(fig)
# Close the figures
plt.close('all')
#-------------------------------------------------------------------------------
def plot_MAP_estimates(trMAPs,trMASKs,used_states,trsum,args,mouseID,apply_mask=True,Plot_Dir=None,fname=None):
# Get number of trials from trial summary data frame
nTrials = len(trsum)
timestr = time.strftime('%Y%m%d_%H%M')
if Plot_Dir is None:
Plot_Dir = './plots'
# Create a pdf document for plots
if fname is None:
# Create a pdf document for plots
if args['model'] is 'AR-HDP-HMM':
fname = 'MAP_StateSeq_{}_K{:.0f}G{:.0f}_{}.pdf'.format(mouseID,args['K'],args['G'],timestr)
else:
fname = 'MAP_StateSeq_{}_N{}_{}.pdf'.format(mouseID,args['Nmax'],timestr)
pdfdoc = PdfPages(os.path.join(Plot_Dir,fname))
#Apply mask to MAP state sequence, which is a copy with local scope
if apply_mask:
for MAP,mask in zip(trMAPs,trMASKs):
MAP[mask] = np.nan
#Plot all MAP sequences on one plot
trMAPs_sorted = sorted(trMAPs,key=len,reverse=True)
max_tsteps = len(trMAPs_sorted[0])
MAP_padded = np.empty((nTrials,max_tsteps))
MAP_padded[:] = np.nan
for ii, MAP in enumerate(trMAPs_sorted):
MAP_padded[ii][:len(MAP)] = MAP
# Get a list of trial lengths
MAP_ts = [len(trMAPs[i]) for i in range(nTrials)]
MAP_ts.sort(key=int,reverse=True)
xmax = int(50*round(1.5*np.median(MAP_ts)/50))
if xmax > max_tsteps:
xmax = max_tsteps
# Plot the discrete state sequence of the collection of trials
title = 'MAP estimates for all trials of {}'.format(mouseID)
plot_z_samples(MAP_padded,used_states,xmax,title = title,pdf = pdfdoc)
# Plot ARHMM MAP State Sequences
for resp in Resp_Dict:
# Loop over conditions
for iCond in Cond_Dict:
# # Loop over active odor porth
for lr in Port_Dict:
mask = np.zeros(nTrials, dtype=bool)
# Find the indices of the trlist that correspond to the condition
indy = np.where((trsum['port'] == lr) & (trsum['cond'] == iCond) & (trsum['resp'] == resp))
mask[indy] = True
# Continue onto next condition if no trials exist
if sum(mask) == 0:
continue
# Create a new list based of only that condition
cond_MAPs = [trMAPs[i] for i in range(nTrials) if mask[i]]
# Sort based on length
cond_MAPs.sort(key=len,reverse=True)
max_tsteps = len(cond_MAPs[0])
# Get a list of trial lengths
cond_ts = [len(trMAPs[i]) for i in range(nTrials) if mask[i]]
cond_ts.sort(key=int,reverse=True)
xmax = int(50*round(1.5*np.median(cond_ts)/50))
if xmax > max_tsteps:
xmax = max_tsteps
# Create a numpy array with padded NaNs so we can look at all
# of the trials of a particular condition in a heatmap
MAP_padded = np.empty((sum(mask),max_tsteps))
MAP_padded[:] = np.nan
for ii, MAP in enumerate(cond_MAPs):
MAP_padded[ii][:len(MAP)] = MAP
# Plot the discrete state sequence of the collection of trials
title = '{} Condition, MAP estimates for {} {} trials of the {} Active Odor Port for {}'.format(Cond_Dict[iCond],sum(mask),Resp_Dict[resp],Port_Dict[lr],mouseID)
# title = 'MAP estimates for {} {}, {} trials of the {} Active Odor Port'.format(sum(mask),Cond_Dict[iCond],Resp_Dict[Port_Dict[lr])
plot_z_samples(MAP_padded,used_states,xmax,title = title,pdf = pdfdoc)
# pdb.set_trace()
# End of Port_Dict loop
# End of Cond_Dict loop
# End of Resp_Dict loop
# Close PDF file
pdfdoc.close()
#-------------------------------------------------------------------------------
def plot_trans_matrix(trans_matrix,state_usages,dis_k,title=None,pdf=None):
# Convert numpy arrays into Panda DataFrames
tm = pd.DataFrame(np.log(trans_matrix))
su = pd.DataFrame(state_usages)
# Plotting Properties
fig = plt.figure(figsize=(10,5))
gs = gridspec.GridSpec(1,2)
fp = FontProperties()
fp.set_weight("bold")
if len(state_usages)>dis_k:
# Calculate error bars for plot
ci = 'sd'
else:
# Only 1 array of state usages
ci = None
# Draw a heatmap with the numeric values in each cell
cmap = sns.cubehelix_palette(light=1, as_cmap=True)
ax1 = fig.add_subplot(gs[0,0])
sns.heatmap(tm, cmap=cmap,annot=True, fmt=".2f",vmin=0, vmax=1, linewidths=.5, ax=ax1,square=True,cbar_kws={'label': 'Probability'})
ax1.set_title('log(Transition Probability Matrix)')
# Plot overall state_usages
colors = sns.xkcd_palette(color_names)
ax2 = fig.add_subplot(gs[0,1])
ax2 = sns.barplot(data =su,ci = ci,orient='h',palette=colors)
ax2.set_xlabel('Probability')
ax2.set_ylabel('State')
ax2.set_title('State Usage')
# Set Super Title
if title is not None:
fig.suptitle(title)
else:
fig.suptitle('Overall ARHMM Fit')
if pdf is not None:
pdf.savefig(fig)
#Close figures
plt.close('all')
def construct_trans_matrices(arhmm,trMAPs,trans_matrix_mean,trsum,args,Plot_Dir=None):
nTrials = len(trsum)
used_states = sorted(arhmm.used_states)
dis_k = len(used_states)
##======= Construct transition matrices & State Usages ======##
# 3-3: Using the MAP-seq's in different conditions, calculate conditional
# state-usages (% time-steps in each state) in each different condition.
# 3-4: Using MAP-seqs, calculate the transition matrix for each condition
timestr = time.strftime('%Y%m%d_%H%M')
if Plot_Dir is None:
Plot_Dir = './plots'
if args['model'] is 'AR-HDP-HMM':
fname = 'TransitionMatrices_A{:.0f}_K{:.0f}G{:.0f}_{}.pdf'.format(args['A'],args['K'],args['G'],timestr)
else:
fname = 'TransitionMatrices_{}_N{}_{}.pdf'.format(args['mouseID'],args['Nmax'],timestr)
pdfdoc = PdfPages(os.path.join(Plot_Dir,fname))
# Plot the mean transition matrix calculated from the Gibbs samples
plot_trans_matrix(trans_matrix_mean, [arhmm.state_usages], dis_k,title='Transition Matrix calculated from Gibbs samples',pdf = pdfdoc)
# Plot the transition matrix & state usage for the overall ARHMM fit
plot_trans_matrix(arhmm.trans_distn.trans_matrix, [arhmm.state_usages], dis_k,pdf = pdfdoc)
pdfdoc.close()
trans_matrices = [[],[]]
# Loop over responses
for resp in Resp_Dict:
# Loop over active odor port
for lr in Port_Dict:
# Calculate transition matrix per condition per response
cond_trans_matrix = np.zeros((len(Cond_Dict),dis_k,dis_k))
# Loop over conditions
for iCond in Cond_Dict:
# Reset Mask to False
mask = np.zeros(nTrials, dtype=bool)
# Find the indices of the trsum that correspond to the condition
indy = np.where((trsum['cond'] == iCond) & (trsum['port'] == lr) & (trsum['resp'] == resp))
mask[indy] = True
# Continue onto next condition if no trials exist
if sum(mask) == 0:
continue
# Create a new list based on only that condition
cond_MAPs = [trMAPs[i].copy() for i in range(nTrials) if mask[i]]
# Calculate condition state usages
cond_state_usages = np.array([[sum(MAP == s)/len(MAP) for s in used_states] for MAP in cond_MAPs])
# Loop through the trials of this condition/response type
for iTrial, MAP in enumerate(cond_MAPs):
for t in range(len(MAP)-1):
# Get the state at time t & t+1
s1,s2 = MAP[t],MAP[t+1]
# Get the indices associated with used_states
i1 = np.where(used_states == s1)
i2 = np.where(used_states == s2)
cond_trans_matrix[iCond,i1,i2] += 1
# Divide each row by the number of transitions from that state
tot_trans = np.sum(cond_trans_matrix[iCond],axis = 1)
for i,rowsum in enumerate(tot_trans):
if rowsum == 0:
cond_trans_matrix[iCond,i,:] = 0
else:
cond_trans_matrix[iCond,i,:] = cond_trans_matrix[iCond,i,:]/rowsum
title = '{} Condition, {} Active Odor Port, {} Response'.format(Cond_Dict[iCond],Port_Dict[lr],Resp_Dict[resp])
# Plot transition matrix and state usage for | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import SitesConfiguration
from .operations import GroupsOperations
from .operations import SitesSiteOperations
from .operations import SitesOperations
from .operations import SitesContentTypesOperations
from .operations import SitesListsOperations
from .operations import SitesListsActivitiesOperations
from .operations import SitesListsActivitiesListItemOperations
from .operations import SitesListsActivitiesListItemVersionsOperations
from .operations import SitesListsContentTypesOperations
from .operations import SitesListsItemsOperations
from .operations import SitesListsItemsActivitiesOperations
from .operations import SitesListsItemsActivitiesListItemOperations
from .operations import SitesListsItemsVersionsOperations
from .operations import SitesOnenoteNotebooksOperations
from .operations import SitesOnenoteNotebooksSectionGroupsParentNotebookOperations
from .operations import SitesOnenoteNotebooksSectionGroupsSectionsOperations
from .operations import SitesOnenoteNotebooksSectionGroupsSectionsPagesOperations
from .operations import SitesOnenoteNotebooksSectionGroupsSectionsPagesParentNotebookOperations
from .operations import SitesOnenoteNotebooksSectionGroupsSectionsPagesParentSectionOperations
from .operations import SitesOnenoteNotebooksSectionGroupsSectionsParentNotebookOperations
from .operations import SitesOnenoteNotebooksSectionsOperations
from .operations import SitesOnenoteNotebooksSectionsPagesOperations
from .operations import SitesOnenoteNotebooksSectionsPagesParentNotebookOperations
from .operations import SitesOnenoteNotebooksSectionsPagesParentSectionOperations
from .operations import SitesOnenoteNotebooksSectionsParentNotebookOperations
from .operations import SitesOnenoteNotebooksSectionsParentSectionGroupParentNotebookOperations
from .operations import SitesOnenoteNotebooksSectionsParentSectionGroupSectionsOperations
from .operations import SitesOnenotePagesOperations
from .operations import SitesOnenotePagesParentNotebookOperations
from .operations import SitesOnenotePagesParentNotebookSectionGroupsParentNotebookOperations
from .operations import SitesOnenotePagesParentNotebookSectionGroupsSectionsOperations
from .operations import SitesOnenotePagesParentNotebookSectionGroupsSectionsPagesOperations
from .operations import SitesOnenotePagesParentNotebookSectionGroupsSectionsParentNotebookOperations
from .operations import SitesOnenotePagesParentNotebookSectionsOperations
from .operations import SitesOnenotePagesParentNotebookSectionsPagesOperations
from .operations import SitesOnenotePagesParentNotebookSectionsParentNotebookOperations
from .operations import SitesOnenotePagesParentNotebookSectionsParentSectionGroupParentNotebookOperations
from .operations import SitesOnenotePagesParentNotebookSectionsParentSectionGroupSectionsOperations
from .operations import SitesOnenotePagesParentSectionOperations
from .operations import SitesOnenotePagesParentSectionPagesOperations
from .operations import SitesOnenotePagesParentSectionParentNotebookOperations
from .operations import SitesOnenotePagesParentSectionParentNotebookSectionGroupsParentNotebookOperations
from .operations import SitesOnenotePagesParentSectionParentNotebookSectionGroupsSectionsOperations
from .operations import SitesOnenotePagesParentSectionParentNotebookSectionsOperations
from .operations import SitesOnenotePagesParentSectionParentSectionGroupParentNotebookOperations
from .operations import SitesOnenotePagesParentSectionParentSectionGroupParentNotebookSectionsOperations
from .operations import SitesOnenotePagesParentSectionParentSectionGroupSectionsOperations
from .operations import SitesOnenoteSectionGroupsParentNotebookOperations
from .operations import SitesOnenoteSectionGroupsParentNotebookSectionsOperations
from .operations import SitesOnenoteSectionGroupsParentNotebookSectionsPagesOperations
from .operations import SitesOnenoteSectionGroupsParentNotebookSectionsPagesParentNotebookOperations
from .operations import SitesOnenoteSectionGroupsParentNotebookSectionsPagesParentSectionOperations
from .operations import SitesOnenoteSectionGroupsParentNotebookSectionsParentNotebookOperations
from .operations import SitesOnenoteSectionGroupsSectionsOperations
from .operations import SitesOnenoteSectionGroupsSectionsPagesOperations
from .operations import SitesOnenoteSectionGroupsSectionsPagesParentNotebookOperations
from .operations import SitesOnenoteSectionGroupsSectionsPagesParentNotebookSectionsOperations
from .operations import SitesOnenoteSectionGroupsSectionsPagesParentSectionOperations
from .operations import SitesOnenoteSectionGroupsSectionsParentNotebookOperations
from .operations import SitesOnenoteSectionGroupsSectionsParentNotebookSectionsOperations
from .operations import SitesOnenoteSectionsOperations
from .operations import SitesOnenoteSectionsPagesOperations
from .operations import SitesOnenoteSectionsPagesParentNotebookOperations
from .operations import SitesOnenoteSectionsPagesParentNotebookSectionGroupsParentNotebookOperations
from .operations import SitesOnenoteSectionsPagesParentNotebookSectionGroupsSectionsOperations
from .operations import SitesOnenoteSectionsPagesParentNotebookSectionsOperations
from .operations import SitesOnenoteSectionsPagesParentSectionOperations
from .operations import SitesOnenoteSectionsParentNotebookOperations
from .operations import SitesOnenoteSectionsParentNotebookSectionGroupsParentNotebookOperations
from .operations import SitesOnenoteSectionsParentNotebookSectionGroupsSectionsOperations
from .operations import SitesOnenoteSectionsParentNotebookSectionsOperations
from .operations import SitesOnenoteSectionsParentSectionGroupParentNotebookOperations
from .operations import SitesOnenoteSectionsParentSectionGroupParentNotebookSectionsOperations
from .operations import SitesOnenoteSectionsParentSectionGroupSectionsOperations
from .operations import SitesPagesOperations
from .operations import UsersOperations
from .. import models
class Sites(object):
"""Sites.
:ivar groups: GroupsOperations operations
:vartype groups: sites.aio.operations.GroupsOperations
:ivar sites_site: SitesSiteOperations operations
:vartype sites_site: sites.aio.operations.SitesSiteOperations
:ivar sites: SitesOperations operations
:vartype sites: sites.aio.operations.SitesOperations
:ivar sites_content_types: SitesContentTypesOperations operations
:vartype sites_content_types: sites.aio.operations.SitesContentTypesOperations
:ivar sites_lists: SitesListsOperations operations
:vartype sites_lists: sites.aio.operations.SitesListsOperations
:ivar sites_lists_activities: SitesListsActivitiesOperations operations
:vartype sites_lists_activities: sites.aio.operations.SitesListsActivitiesOperations
:ivar sites_lists_activities_list_item: SitesListsActivitiesListItemOperations operations
:vartype sites_lists_activities_list_item: sites.aio.operations.SitesListsActivitiesListItemOperations
:ivar sites_lists_activities_list_item_versions: SitesListsActivitiesListItemVersionsOperations operations
:vartype sites_lists_activities_list_item_versions: sites.aio.operations.SitesListsActivitiesListItemVersionsOperations
:ivar sites_lists_content_types: SitesListsContentTypesOperations operations
:vartype sites_lists_content_types: sites.aio.operations.SitesListsContentTypesOperations
:ivar sites_lists_items: SitesListsItemsOperations operations
:vartype sites_lists_items: sites.aio.operations.SitesListsItemsOperations
:ivar sites_lists_items_activities: SitesListsItemsActivitiesOperations operations
:vartype sites_lists_items_activities: sites.aio.operations.SitesListsItemsActivitiesOperations
:ivar sites_lists_items_activities_list_item: SitesListsItemsActivitiesListItemOperations operations
:vartype sites_lists_items_activities_list_item: sites.aio.operations.SitesListsItemsActivitiesListItemOperations
:ivar sites_lists_items_versions: SitesListsItemsVersionsOperations operations
:vartype sites_lists_items_versions: sites.aio.operations.SitesListsItemsVersionsOperations
:ivar sites_onenote_notebooks: SitesOnenoteNotebooksOperations operations
:vartype sites_onenote_notebooks: sites.aio.operations.SitesOnenoteNotebooksOperations
:ivar sites_onenote_notebooks_section_groups_parent_notebook: SitesOnenoteNotebooksSectionGroupsParentNotebookOperations operations
:vartype sites_onenote_notebooks_section_groups_parent_notebook: sites.aio.operations.SitesOnenoteNotebooksSectionGroupsParentNotebookOperations
:ivar sites_onenote_notebooks_section_groups_sections: SitesOnenoteNotebooksSectionGroupsSectionsOperations operations
:vartype sites_onenote_notebooks_section_groups_sections: sites.aio.operations.SitesOnenoteNotebooksSectionGroupsSectionsOperations
:ivar sites_onenote_notebooks_section_groups_sections_pages: SitesOnenoteNotebooksSectionGroupsSectionsPagesOperations operations
:vartype sites_onenote_notebooks_section_groups_sections_pages: sites.aio.operations.SitesOnenoteNotebooksSectionGroupsSectionsPagesOperations
:ivar sites_onenote_notebooks_section_groups_sections_pages_parent_notebook: SitesOnenoteNotebooksSectionGroupsSectionsPagesParentNotebookOperations operations
:vartype sites_onenote_notebooks_section_groups_sections_pages_parent_notebook: sites.aio.operations.SitesOnenoteNotebooksSectionGroupsSectionsPagesParentNotebookOperations
:ivar sites_onenote_notebooks_section_groups_sections_pages_parent_section: SitesOnenoteNotebooksSectionGroupsSectionsPagesParentSectionOperations operations
:vartype sites_onenote_notebooks_section_groups_sections_pages_parent_section: sites.aio.operations.SitesOnenoteNotebooksSectionGroupsSectionsPagesParentSectionOperations
:ivar sites_onenote_notebooks_section_groups_sections_parent_notebook: SitesOnenoteNotebooksSectionGroupsSectionsParentNotebookOperations operations
:vartype sites_onenote_notebooks_section_groups_sections_parent_notebook: sites.aio.operations.SitesOnenoteNotebooksSectionGroupsSectionsParentNotebookOperations
:ivar sites_onenote_notebooks_sections: SitesOnenoteNotebooksSectionsOperations operations
:vartype sites_onenote_notebooks_sections: sites.aio.operations.SitesOnenoteNotebooksSectionsOperations
:ivar sites_onenote_notebooks_sections_pages: SitesOnenoteNotebooksSectionsPagesOperations operations
:vartype sites_onenote_notebooks_sections_pages: sites.aio.operations.SitesOnenoteNotebooksSectionsPagesOperations
:ivar sites_onenote_notebooks_sections_pages_parent_notebook: SitesOnenoteNotebooksSectionsPagesParentNotebookOperations operations
:vartype sites_onenote_notebooks_sections_pages_parent_notebook: sites.aio.operations.SitesOnenoteNotebooksSectionsPagesParentNotebookOperations
:ivar sites_onenote_notebooks_sections_pages_parent_section: SitesOnenoteNotebooksSectionsPagesParentSectionOperations operations
:vartype sites_onenote_notebooks_sections_pages_parent_section: sites.aio.operations.SitesOnenoteNotebooksSectionsPagesParentSectionOperations
:ivar sites_onenote_notebooks_sections_parent_notebook: SitesOnenoteNotebooksSectionsParentNotebookOperations operations
:vartype sites_onenote_notebooks_sections_parent_notebook: sites.aio.operations.SitesOnenoteNotebooksSectionsParentNotebookOperations
:ivar sites_onenote_notebooks_sections_parent_section_group_parent_notebook: SitesOnenoteNotebooksSectionsParentSectionGroupParentNotebookOperations operations
:vartype sites_onenote_notebooks_sections_parent_section_group_parent_notebook: sites.aio.operations.SitesOnenoteNotebooksSectionsParentSectionGroupParentNotebookOperations
:ivar sites_onenote_notebooks_sections_parent_section_group_sections: SitesOnenoteNotebooksSectionsParentSectionGroupSectionsOperations operations
:vartype sites_onenote_notebooks_sections_parent_section_group_sections: sites.aio.operations.SitesOnenoteNotebooksSectionsParentSectionGroupSectionsOperations
:ivar sites_onenote_pages: SitesOnenotePagesOperations operations
:vartype sites_onenote_pages: sites.aio.operations.SitesOnenotePagesOperations
:ivar sites_onenote_pages_parent_notebook: SitesOnenotePagesParentNotebookOperations operations
:vartype sites_onenote_pages_parent_notebook: sites.aio.operations.SitesOnenotePagesParentNotebookOperations
:ivar sites_onenote_pages_parent_notebook_section_groups_parent_notebook: SitesOnenotePagesParentNotebookSectionGroupsParentNotebookOperations operations
:vartype sites_onenote_pages_parent_notebook_section_groups_parent_notebook: sites.aio.operations.SitesOnenotePagesParentNotebookSectionGroupsParentNotebookOperations
:ivar sites_onenote_pages_parent_notebook_section_groups_sections: SitesOnenotePagesParentNotebookSectionGroupsSectionsOperations operations
:vartype sites_onenote_pages_parent_notebook_section_groups_sections: sites.aio.operations.SitesOnenotePagesParentNotebookSectionGroupsSectionsOperations
:ivar sites_onenote_pages_parent_notebook_section_groups_sections_pages: SitesOnenotePagesParentNotebookSectionGroupsSectionsPagesOperations operations
:vartype sites_onenote_pages_parent_notebook_section_groups_sections_pages: sites.aio.operations.SitesOnenotePagesParentNotebookSectionGroupsSectionsPagesOperations
:ivar sites_onenote_pages_parent_notebook_section_groups_sections_parent_notebook: SitesOnenotePagesParentNotebookSectionGroupsSectionsParentNotebookOperations operations
:vartype sites_onenote_pages_parent_notebook_section_groups_sections_parent_notebook: sites.aio.operations.SitesOnenotePagesParentNotebookSectionGroupsSectionsParentNotebookOperations
:ivar sites_onenote_pages_parent_notebook_sections: SitesOnenotePagesParentNotebookSectionsOperations operations
:vartype sites_onenote_pages_parent_notebook_sections: sites.aio.operations.SitesOnenotePagesParentNotebookSectionsOperations
:ivar sites_onenote_pages_parent_notebook_sections_pages: SitesOnenotePagesParentNotebookSectionsPagesOperations operations
:vartype sites_onenote_pages_parent_notebook_sections_pages: sites.aio.operations.SitesOnenotePagesParentNotebookSectionsPagesOperations
:ivar sites_onenote_pages_parent_notebook_sections_parent_notebook: SitesOnenotePagesParentNotebookSectionsParentNotebookOperations operations
:vartype sites_onenote_pages_parent_notebook_sections_parent_notebook: sites.aio.operations.SitesOnenotePagesParentNotebookSectionsParentNotebookOperations
:ivar sites_onenote_pages_parent_notebook_sections_parent_section_group_parent_notebook: SitesOnenotePagesParentNotebookSectionsParentSectionGroupParentNotebookOperations operations
:vartype sites_onenote_pages_parent_notebook_sections_parent_section_group_parent_notebook: sites.aio.operations.SitesOnenotePagesParentNotebookSectionsParentSectionGroupParentNotebookOperations
:ivar sites_onenote_pages_parent_notebook_sections_parent_section_group_sections: SitesOnenotePagesParentNotebookSectionsParentSectionGroupSectionsOperations operations
:vartype sites_onenote_pages_parent_notebook_sections_parent_section_group_sections: sites.aio.operations.SitesOnenotePagesParentNotebookSectionsParentSectionGroupSectionsOperations
:ivar sites_onenote_pages_parent_section: SitesOnenotePagesParentSectionOperations operations
:vartype sites_onenote_pages_parent_section: sites.aio.operations.SitesOnenotePagesParentSectionOperations
:ivar sites_onenote_pages_parent_section_pages: SitesOnenotePagesParentSectionPagesOperations operations
:vartype sites_onenote_pages_parent_section_pages: sites.aio.operations.SitesOnenotePagesParentSectionPagesOperations
:ivar sites_onenote_pages_parent_section_parent_notebook: SitesOnenotePagesParentSectionParentNotebookOperations operations
:vartype sites_onenote_pages_parent_section_parent_notebook: sites.aio.operations.SitesOnenotePagesParentSectionParentNotebookOperations
:ivar sites_onenote_pages_parent_section_parent_notebook_section_groups_parent_notebook: SitesOnenotePagesParentSectionParentNotebookSectionGroupsParentNotebookOperations operations
:vartype sites_onenote_pages_parent_section_parent_notebook_section_groups_parent_notebook: sites.aio.operations.SitesOnenotePagesParentSectionParentNotebookSectionGroupsParentNotebookOperations
:ivar sites_onenote_pages_parent_section_parent_notebook_section_groups_sections: SitesOnenotePagesParentSectionParentNotebookSectionGroupsSectionsOperations operations
:vartype sites_onenote_pages_parent_section_parent_notebook_section_groups_sections: sites.aio.operations.SitesOnenotePagesParentSectionParentNotebookSectionGroupsSectionsOperations
:ivar sites_onenote_pages_parent_section_parent_notebook_sections: SitesOnenotePagesParentSectionParentNotebookSectionsOperations operations
:vartype sites_onenote_pages_parent_section_parent_notebook_sections: sites.aio.operations.SitesOnenotePagesParentSectionParentNotebookSectionsOperations
:ivar sites_onenote_pages_parent_section_parent_section_group_parent_notebook: SitesOnenotePagesParentSectionParentSectionGroupParentNotebookOperations operations
:vartype sites_onenote_pages_parent_section_parent_section_group_parent_notebook: sites.aio.operations.SitesOnenotePagesParentSectionParentSectionGroupParentNotebookOperations
:ivar sites_onenote_pages_parent_section_parent_section_group_parent_notebook_sections: SitesOnenotePagesParentSectionParentSectionGroupParentNotebookSectionsOperations operations
:vartype sites_onenote_pages_parent_section_parent_section_group_parent_notebook_sections: sites.aio.operations.SitesOnenotePagesParentSectionParentSectionGroupParentNotebookSectionsOperations
:ivar sites_onenote_pages_parent_section_parent_section_group_sections: SitesOnenotePagesParentSectionParentSectionGroupSectionsOperations operations
:vartype sites_onenote_pages_parent_section_parent_section_group_sections: sites.aio.operations.SitesOnenotePagesParentSectionParentSectionGroupSectionsOperations
:ivar sites_onenote_section_groups_parent_notebook: SitesOnenoteSectionGroupsParentNotebookOperations operations
:vartype sites_onenote_section_groups_parent_notebook: sites.aio.operations.SitesOnenoteSectionGroupsParentNotebookOperations
:ivar sites_onenote_section_groups_parent_notebook_sections: SitesOnenoteSectionGroupsParentNotebookSectionsOperations operations
:vartype sites_onenote_section_groups_parent_notebook_sections: sites.aio.operations.SitesOnenoteSectionGroupsParentNotebookSectionsOperations
:ivar sites_onenote_section_groups_parent_notebook_sections_pages: SitesOnenoteSectionGroupsParentNotebookSectionsPagesOperations operations
:vartype sites_onenote_section_groups_parent_notebook_sections_pages: sites.aio.operations.SitesOnenoteSectionGroupsParentNotebookSectionsPagesOperations
:ivar sites_onenote_section_groups_parent_notebook_sections_pages_parent_notebook: SitesOnenoteSectionGroupsParentNotebookSectionsPagesParentNotebookOperations operations
:vartype sites_onenote_section_groups_parent_notebook_sections_pages_parent_notebook: sites.aio.operations.SitesOnenoteSectionGroupsParentNotebookSectionsPagesParentNotebookOperations
:ivar sites_onenote_section_groups_parent_notebook_sections_pages_parent_section: SitesOnenoteSectionGroupsParentNotebookSectionsPagesParentSectionOperations operations
:vartype sites_onenote_section_groups_parent_notebook_sections_pages_parent_section: sites.aio.operations.SitesOnenoteSectionGroupsParentNotebookSectionsPagesParentSectionOperations
:ivar sites_onenote_section_groups_parent_notebook_sections_parent_notebook: SitesOnenoteSectionGroupsParentNotebookSectionsParentNotebookOperations operations
:vartype sites_onenote_section_groups_parent_notebook_sections_parent_notebook: sites.aio.operations.SitesOnenoteSectionGroupsParentNotebookSectionsParentNotebookOperations
:ivar sites_onenote_section_groups_sections: SitesOnenoteSectionGroupsSectionsOperations operations
:vartype sites_onenote_section_groups_sections: sites.aio.operations.SitesOnenoteSectionGroupsSectionsOperations
:ivar sites_onenote_section_groups_sections_pages: SitesOnenoteSectionGroupsSectionsPagesOperations operations
:vartype sites_onenote_section_groups_sections_pages: sites.aio.operations.SitesOnenoteSectionGroupsSectionsPagesOperations
:ivar sites_onenote_section_groups_sections_pages_parent_notebook: SitesOnenoteSectionGroupsSectionsPagesParentNotebookOperations operations
:vartype sites_onenote_section_groups_sections_pages_parent_notebook: sites.aio.operations.SitesOnenoteSectionGroupsSectionsPagesParentNotebookOperations
:ivar sites_onenote_section_groups_sections_pages_parent_notebook_sections: SitesOnenoteSectionGroupsSectionsPagesParentNotebookSectionsOperations operations
:vartype sites_onenote_section_groups_sections_pages_parent_notebook_sections: sites.aio.operations.SitesOnenoteSectionGroupsSectionsPagesParentNotebookSectionsOperations
:ivar sites_onenote_section_groups_sections_pages_parent_section: SitesOnenoteSectionGroupsSectionsPagesParentSectionOperations operations
:vartype sites_onenote_section_groups_sections_pages_parent_section: sites.aio.operations.SitesOnenoteSectionGroupsSectionsPagesParentSectionOperations
:ivar sites_onenote_section_groups_sections_parent_notebook: SitesOnenoteSectionGroupsSectionsParentNotebookOperations operations
:vartype sites_onenote_section_groups_sections_parent_notebook: sites.aio.operations.SitesOnenoteSectionGroupsSectionsParentNotebookOperations
:ivar sites_onenote_section_groups_sections_parent_notebook_sections: SitesOnenoteSectionGroupsSectionsParentNotebookSectionsOperations operations
:vartype sites_onenote_section_groups_sections_parent_notebook_sections: sites.aio.operations.SitesOnenoteSectionGroupsSectionsParentNotebookSectionsOperations
:ivar sites_onenote_sections: SitesOnenoteSectionsOperations operations
:vartype sites_onenote_sections: sites.aio.operations.SitesOnenoteSectionsOperations
:ivar sites_onenote_sections_pages: SitesOnenoteSectionsPagesOperations operations
:vartype sites_onenote_sections_pages: sites.aio.operations.SitesOnenoteSectionsPagesOperations
:ivar sites_onenote_sections_pages_parent_notebook: SitesOnenoteSectionsPagesParentNotebookOperations operations
:vartype sites_onenote_sections_pages_parent_notebook: sites.aio.operations.SitesOnenoteSectionsPagesParentNotebookOperations
:ivar sites_onenote_sections_pages_parent_notebook_section_groups_parent_notebook: SitesOnenoteSectionsPagesParentNotebookSectionGroupsParentNotebookOperations operations
:vartype sites_onenote_sections_pages_parent_notebook_section_groups_parent_notebook: sites.aio.operations.SitesOnenoteSectionsPagesParentNotebookSectionGroupsParentNotebookOperations
:ivar sites_onenote_sections_pages_parent_notebook_section_groups_sections: SitesOnenoteSectionsPagesParentNotebookSectionGroupsSectionsOperations operations
:vartype sites_onenote_sections_pages_parent_notebook_section_groups_sections: sites.aio.operations.SitesOnenoteSectionsPagesParentNotebookSectionGroupsSectionsOperations
:ivar sites_onenote_sections_pages_parent_notebook_sections: SitesOnenoteSectionsPagesParentNotebookSectionsOperations operations
:vartype sites_onenote_sections_pages_parent_notebook_sections: sites.aio.operations.SitesOnenoteSectionsPagesParentNotebookSectionsOperations
:ivar sites_onenote_sections_pages_parent_section: SitesOnenoteSectionsPagesParentSectionOperations operations
:vartype sites_onenote_sections_pages_parent_section: sites.aio.operations.SitesOnenoteSectionsPagesParentSectionOperations
:ivar sites_onenote_sections_parent_notebook: SitesOnenoteSectionsParentNotebookOperations operations
:vartype sites_onenote_sections_parent_notebook: sites.aio.operations.SitesOnenoteSectionsParentNotebookOperations
:ivar sites_onenote_sections_parent_notebook_section_groups_parent_notebook: SitesOnenoteSectionsParentNotebookSectionGroupsParentNotebookOperations operations
:vartype sites_onenote_sections_parent_notebook_section_groups_parent_notebook: sites.aio.operations.SitesOnenoteSectionsParentNotebookSectionGroupsParentNotebookOperations
:ivar sites_onenote_sections_parent_notebook_section_groups_sections: SitesOnenoteSectionsParentNotebookSectionGroupsSectionsOperations operations
:vartype sites_onenote_sections_parent_notebook_section_groups_sections: sites.aio.operations.SitesOnenoteSectionsParentNotebookSectionGroupsSectionsOperations
:ivar sites_onenote_sections_parent_notebook_sections: SitesOnenoteSectionsParentNotebookSectionsOperations operations
:vartype sites_onenote_sections_parent_notebook_sections: sites.aio.operations.SitesOnenoteSectionsParentNotebookSectionsOperations
:ivar sites_onenote_sections_parent_section_group_parent_notebook: SitesOnenoteSectionsParentSectionGroupParentNotebookOperations operations
:vartype sites_onenote_sections_parent_section_group_parent_notebook: sites.aio.operations.SitesOnenoteSectionsParentSectionGroupParentNotebookOperations
:ivar sites_onenote_sections_parent_section_group_parent_notebook_sections: SitesOnenoteSectionsParentSectionGroupParentNotebookSectionsOperations operations
:vartype sites_onenote_sections_parent_section_group_parent_notebook_sections: sites.aio.operations.SitesOnenoteSectionsParentSectionGroupParentNotebookSectionsOperations
:ivar sites_onenote_sections_parent_section_group_sections: SitesOnenoteSectionsParentSectionGroupSectionsOperations operations
:vartype sites_onenote_sections_parent_section_group_sections: sites.aio.operations.SitesOnenoteSectionsParentSectionGroupSectionsOperations
:ivar sites_pages: SitesPagesOperations operations
:vartype sites_pages: sites.aio.operations.SitesPagesOperations
:ivar users: UsersOperations operations
:vartype users: sites.aio.operations.UsersOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/beta'
self._config = SitesConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.groups = GroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_site = SitesSiteOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites = SitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_content_types = SitesContentTypesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists = SitesListsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists_activities = SitesListsActivitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists_activities_list_item = SitesListsActivitiesListItemOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists_activities_list_item_versions = SitesListsActivitiesListItemVersionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists_content_types = SitesListsContentTypesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists_items = SitesListsItemsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists_items_activities = SitesListsItemsActivitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists_items_activities_list_item = SitesListsItemsActivitiesListItemOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_lists_items_versions = SitesListsItemsVersionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks = SitesOnenoteNotebooksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_section_groups_parent_notebook = SitesOnenoteNotebooksSectionGroupsParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_section_groups_sections = SitesOnenoteNotebooksSectionGroupsSectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_section_groups_sections_pages = SitesOnenoteNotebooksSectionGroupsSectionsPagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_section_groups_sections_pages_parent_notebook = SitesOnenoteNotebooksSectionGroupsSectionsPagesParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_section_groups_sections_pages_parent_section = SitesOnenoteNotebooksSectionGroupsSectionsPagesParentSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_section_groups_sections_parent_notebook = SitesOnenoteNotebooksSectionGroupsSectionsParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_sections = SitesOnenoteNotebooksSectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_sections_pages = SitesOnenoteNotebooksSectionsPagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_sections_pages_parent_notebook = SitesOnenoteNotebooksSectionsPagesParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_sections_pages_parent_section = SitesOnenoteNotebooksSectionsPagesParentSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_sections_parent_notebook = SitesOnenoteNotebooksSectionsParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_sections_parent_section_group_parent_notebook = SitesOnenoteNotebooksSectionsParentSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_notebooks_sections_parent_section_group_sections = SitesOnenoteNotebooksSectionsParentSectionGroupSectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages = SitesOnenotePagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook = SitesOnenotePagesParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_section_groups_parent_notebook = SitesOnenotePagesParentNotebookSectionGroupsParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_section_groups_sections = SitesOnenotePagesParentNotebookSectionGroupsSectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_section_groups_sections_pages = SitesOnenotePagesParentNotebookSectionGroupsSectionsPagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_section_groups_sections_parent_notebook = SitesOnenotePagesParentNotebookSectionGroupsSectionsParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_sections = SitesOnenotePagesParentNotebookSectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_sections_pages = SitesOnenotePagesParentNotebookSectionsPagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_sections_parent_notebook = SitesOnenotePagesParentNotebookSectionsParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_sections_parent_section_group_parent_notebook = SitesOnenotePagesParentNotebookSectionsParentSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_notebook_sections_parent_section_group_sections = SitesOnenotePagesParentNotebookSectionsParentSectionGroupSectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_section = SitesOnenotePagesParentSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_section_pages = SitesOnenotePagesParentSectionPagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_section_parent_notebook = SitesOnenotePagesParentSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_section_parent_notebook_section_groups_parent_notebook = SitesOnenotePagesParentSectionParentNotebookSectionGroupsParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sites_onenote_pages_parent_section_parent_notebook_section_groups_sections = SitesOnenotePagesParentSectionParentNotebookSectionGroupsSectionsOperations(
self._client, self._config, self._serialize, | |
HammerDriver, append_error_func: Callable[[str], None]) -> Optional[dict]:
"""Create a full config to run the output."""
power_input_only = HammerDriver.par_output_to_power_input(driver.project_config)
if power_input_only is None:
driver.log.error("Input config does not appear to contain valid par outputs")
return None
else:
return self.get_full_config(driver, power_input_only)
def sim_to_power_action(self, driver: HammerDriver, append_error_func: Callable[[str], None]) -> Optional[dict]:
"""Create a full config to run the output."""
power_input_only = HammerDriver.sim_output_to_power_input(driver.project_config)
if power_input_only is None:
driver.log.error("Input config does not appear to contain valid sim outputs")
return None
else:
return self.get_full_config(driver, power_input_only)
def synthesis_to_formal_action(self, driver: HammerDriver, append_error_func: Callable[[str], None]) -> Optional[dict]:
"""Create a full config to run the output."""
formal_input_only = HammerDriver.synthesis_output_to_formal_input(driver.project_config)
if formal_input_only is None:
driver.log.error("Input config does not appear to contain valid synthesis outputs")
return None
else:
return self.get_full_config(driver, formal_input_only)
def par_to_formal_action(self, driver: HammerDriver, append_error_func: Callable[[str], None]) -> Optional[dict]:
"""Create a full config to run the output."""
formal_input_only = HammerDriver.par_output_to_formal_input(driver.project_config)
if formal_input_only is None:
driver.log.error("Input config does not appear to contain valid par outputs")
return None
else:
return self.get_full_config(driver, formal_input_only)
def create_synthesis_par_action(self, synthesis_action: CLIActionConfigType, par_action: CLIActionConfigType) -> CLIActionConfigType:
"""
Create a parameterizable synthesis_par action for the CLIDriver.
:param synthesis_action: synthesis action
:param par_action: par action
:return: Custom synthesis_par action
"""
def syn_par_action(driver: HammerDriver, append_error_func: Callable[[str], None]) -> Optional[dict]:
# Synthesis output.
syn_output = synthesis_action(driver, append_error_func)
if syn_output is None:
append_error_func("Synthesis action in syn_par failed")
return None
else:
# Generate place-and-route input from the synthesis output.
syn_output_converted = HammerDriver.synthesis_output_to_par_input(syn_output)
assert syn_output_converted is not None, "syn_output must be generated by CLIDriver"
par_input = self.get_full_config(driver, syn_output_converted) # type: dict
# Dump both synthesis output and par input for debugging/resuming.
# TODO(edwardw): make these output filenames configurable?
assert driver.syn_tool is not None, "Syn tool must exist since we ran synthesis_action successfully"
dump_config_to_json_file(os.path.join(driver.syn_tool.run_dir, "par-input.json"), par_input)
# Use new par input and run place-and-route.
driver.update_project_configs([par_input])
par_output = par_action(driver, append_error_func)
return par_output
return syn_par_action
def create_synthesis_sim_action(self, synthesis_action: CLIActionConfigType, sim_action: CLIActionConfigType) -> CLIActionConfigType:
"""
Create a parameterizable synthesis_sim action for the CLIDriver.
:param synthesis_action: synthesis action
:param sim_action: sim action
:return: Custom synthesis_sim action
"""
def syn_sim_action(driver: HammerDriver, append_error_func: Callable[[str], None]) -> Optional[dict]:
# Synthesis output.
syn_output = synthesis_action(driver, append_error_func)
if syn_output is None:
append_error_func("Synthesis action in syn_sim failed")
return None
else:
# Generate sim input from the synthesis output.
syn_output_converted = HammerDriver.synthesis_output_to_sim_input(syn_output)
assert syn_output_converted is not None, "syn_output must be generated by CLIDriver"
sim_input = self.get_full_config(driver, syn_output_converted) # type: dict
# Dump both synthesis output and sim input for debugging/resuming.
assert driver.syn_tool is not None, "Syn tool must exist since we ran synthesis_action successfully"
dump_config_to_json_file(os.path.join(driver.syn_tool.run_dir, "sim-input.json"), sim_input)
# Use new sim input and run simulation.
driver.update_project_configs([sim_input])
sim_output = sim_action(driver, append_error_func)
return sim_output
return syn_sim_action
def create_par_sim_action(self, par_action: CLIActionConfigType, sim_action: CLIActionConfigType) -> CLIActionConfigType:
"""
Create a parameterizable par_sim action for the CLIDriver.
:param par_action: par action
:param sim_action: sim action
:return: Custom par_sim action
"""
def par_sim_action(driver: HammerDriver, append_error_func: Callable[[str], None]) -> Optional[dict]:
# Synthesis output.
par_output = par_action(driver, append_error_func)
if par_output is None:
append_error_func("PAR action in syn_sim failed")
return None
else:
# Generate sim input from the par output.
par_output_converted = HammerDriver.par_output_to_sim_input(par_output)
assert par_output_converted is not None, "par_output must be generated by CLIDriver"
sim_input = self.get_full_config(driver, par_output_converted) # type: dict
# Dump both par output and sim input for debugging/resuming.
assert driver.par_tool is not None, "PAR tool must exist since we ran par_action successfully"
dump_config_to_json_file(os.path.join(driver.par_tool.run_dir, "sim-input.json"), sim_input)
# Use new sim input and run simulation.
driver.update_project_configs([sim_input])
sim_output = sim_action(driver, append_error_func)
return sim_output
return par_sim_action
### Hierarchical stuff ###
@property
def all_hierarchical_actions(self) -> Dict[str, CLIActionConfigType]:
"""
Return a list of hierarchical actions if the given project configuration is a hierarchical design.
Set when the driver is first created in args_to_driver.
Create syn/synthesis-[block], par-[block], and /syn_par-[block].
:return: Dictionary of actions to use (could be empty).
"""
actions = {} # type: Dict[str, CLIActionConfigType]
if self.hierarchical_auto_action is not None:
actions.update({"auto": self.hierarchical_auto_action})
def add_variants(templates: List[str], block: str, action: CLIActionConfigType) -> None:
"""Just add the given action using the name templates."""
for template in templates:
name = template.format(block=block)
actions.update({name: action})
for module, action in self.hierarchical_synthesis_actions.items():
add_variants([
"syn-{block}",
"synthesis-{block}",
"syn_{block}",
"synthesis_{block}"
], module, action)
for module, action in self.hierarchical_par_actions.items():
add_variants([
"par-{block}",
"par_{block}"
], module, action)
for module, action in self.hierarchical_synthesis_par_actions.items():
add_variants([
"syn-par-{block}",
"syn_par-{block}",
"syn-par_{block}",
"syn_par_{block}"
], module, action)
for module, action in self.hierarchical_drc_actions.items():
add_variants([
"drc-{block}",
"drc_{block}"
], module, action)
for module, action in self.hierarchical_lvs_actions.items():
add_variants([
"lvs-{block}",
"lvs_{block}"
], module, action)
for module, action in self.hierarchical_sim_actions.items():
add_variants([
"sim-{block}",
"sim_{block}"
], module, action)
for module, action in self.hierarchical_power_actions.items():
add_variants([
"power-{block}",
"power_{block}"
], module, action)
for module, action in self.hierarchical_formal_actions.items():
add_variants([
"formal-{block}",
"formal_{block}"
], module, action)
return actions
def get_extra_hierarchical_synthesis_hooks(self, driver: HammerDriver) -> Dict[str, List[HammerToolHookAction]]:
"""
Return a list of extra hierarchical synthesis hooks in this project.
To be overridden by subclasses.
:return: Dictionary of (module name, list of hooks)
"""
return dict()
def get_extra_hierarchical_par_hooks(self, driver: HammerDriver) -> Dict[str, List[HammerToolHookAction]]:
"""
Return a list of extra hierarchical place and route hooks in this project.
To be overridden by subclasses.
:return: Dictionary of (module name, list of hooks)
"""
return dict()
def get_extra_hierarchical_drc_hooks(self, driver: HammerDriver) -> Dict[str, List[HammerToolHookAction]]:
"""
Return a list of extra hierarchical DRC hooks in this project.
To be overridden by subclasses.
:return: Dictionary of (module name, list of hooks)
"""
return dict()
def get_extra_hierarchical_lvs_hooks(self, driver: HammerDriver) -> Dict[str, List[HammerToolHookAction]]:
"""
Return a list of extra hierarchical LVS hooks in this project.
To be overridden by subclasses.
:return: Dictionary of (module name, list of hooks)
"""
return dict()
def get_extra_hierarchical_sim_hooks(self, driver: HammerDriver) -> Dict[str, List[HammerToolHookAction]]:
"""
Return a list of extra hierarchical sim hooks in this project.
To be overridden by subclasses.
:return: Dictionary of (module name, list of hooks)
"""
return dict()
def get_extra_hierarchical_power_hooks(self, driver: HammerDriver) -> Dict[str, List[HammerToolHookAction]]:
"""
Return a list of extra hierarchical power hooks in this project.
To be overridden by subclasses.
:return: Dictionary of (module name, list of hooks)
"""
return dict()
def get_extra_hierarchical_formal_hooks(self, driver: HammerDriver) -> Dict[str, List[HammerToolHookAction]]:
"""
Return a list of extra hierarchical formal hooks in this project.
To be overridden by subclasses.
:return: Dictionary of (module name, list of hooks)
"""
return dict()
# The following functions are present for further user customizability.
def get_hierarchical_synthesis_action(self, module: str) -> CLIActionConfigType:
"""
Get the action associated with hierarchical synthesis for the given module (in hierarchical flows).
"""
return self.hierarchical_synthesis_actions[module]
def set_hierarchical_synthesis_action(self, module: str, action: CLIActionConfigType) -> None:
"""
Set the action associated with hierarchical synthesis for the given module (in hierarchical flows).
"""
self.hierarchical_synthesis_actions[module] = action
def get_hierarchical_par_action(self, module: str) -> CLIActionConfigType:
"""
Get the action associated with hierarchical par for the given module (in hierarchical flows).
"""
return self.hierarchical_par_actions[module]
def set_hierarchical_par_action(self, module: str, action: CLIActionConfigType) -> None:
"""
Set the action associated with hierarchical par for the given module (in hierarchical flows).
"""
self.hierarchical_par_actions[module] = action
def set_hierarchical_drc_action(self, module: str, action: CLIActionConfigType) -> None:
"""
Set the action associated with hierarchical drc for the given module (in hierarchical flows).
"""
self.hierarchical_drc_actions[module] = action
def get_hierarchical_drc_action(self, module: str) -> CLIActionConfigType:
"""
Get the action associated with hierarchical drc for the given module (in hierarchical flows).
"""
return self.hierarchical_drc_actions[module]
def set_hierarchical_lvs_action(self, module: str, action: CLIActionConfigType) -> None:
"""
Set the action associated with hierarchical lvs for the given module (in hierarchical flows).
"""
self.hierarchical_lvs_actions[module] = action
def get_hierarchical_lvs_action(self, module: str) -> CLIActionConfigType:
"""
Get the action associated with hierarchical lvs for the given module (in hierarchical flows).
"""
return self.hierarchical_lvs_actions[module]
def get_hierarchical_synthesis_par_action(self, module: str) -> CLIActionConfigType:
"""
Get the action associated with hierarchical syn_par for the given module (in hierarchical flows).
"""
return self.hierarchical_synthesis_par_actions[module]
def set_hierarchical_synthesis_par_action(self, module: str, action: CLIActionConfigType) -> None:
"""
Set the action associated with hierarchical syn_par for the given module (in hierarchical flows).
"""
self.hierarchical_synthesis_par_actions[module] = action
def set_hierarchical_sim_action(self, module: str, action: CLIActionConfigType) -> None:
"""
Set the action associated with hierarchical sim for the given module (in hierarchical flows).
"""
self.hierarchical_sim_actions[module] = action
def get_hierarchical_sim_action(self, | |
ee -= 1
global ycL
ycL[order] = ee
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
lino -= 1
keepadd = False
elif line.startsWith("TRIBUTARY AREA"): #Tributary area
code = "BLK"
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
while line.startsWith("HOME") \
or line.startsWith("TSFDSN") \
or line.startsWith("FFFDSN") \
or line.startsWith("NLUSE") \
or line.startsWith("NGAGE") \
or line.startsWith(" ") \
or line.startsWith("BRANCH") \
or line[0:] == "" \
or line.startsWith("NODE") \
or line.startsWith(";") \
or line.startsWith("*"):
if stream.atEnd():
break
if not line.startsWith("BRANCH"):
code = "TRBP"
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
else:
tbnum = line[7:12]
if " " in tbnum:
tbnum.replace(" ", "")
code = "TBRA %s" % tbnum
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
while line.startsWith(";") or line.startsWith("*"):
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
if line.startsWith(" NODE") or line.startsWith("NODE"):
break
if line.startsWith(" NODE") or line.startsWith("NODE"):
maxee = len(line)
order = code[5:]
cc = 0
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global tnodeL
tnodeL[order] = ee
cc = ee
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global tgageL
tgageL[order] = ee
cc = ee
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global timprvL
timprvL[order] = ee
cc = ee
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global tfgrssL
tfgrssL[order] = ee
cc = ee
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global tmgrssL
tmgrssL[order] = ee
cc = ee
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global tsgrssL
tsgrssL[order] = ee
cc = ee
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global tforstL
tforstL[order] = ee
cc = ee
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global tagricL
tagricL[order] = ee
cc = ee
ee = cc + 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
cc = tagricL[order]
ee = tagricL[order] + 1
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
ee -= 1
global ttotalL
ttotalL[order] = ee
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
while line.startsWith(" ") or line.startsWith("*") \
or line.startsWith(";") or line.startsWith("DTEN") \
or line.startsWith("DLAY") or line.startsWith("-1"):
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
lino -= 1
keepadd = False
#Network Matrix
elif "MATRIX" in line:
code = "BLK"
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
mxnum = 0
while not "-1" in line[:5]:
if line.startsWith(" CODE"):
code = "MHEAD"
else:
if line.startsWith(";") or line.startsWith("*"):
code = "MXC"
else:
cc = 0
ee = 1
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > 10:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if not line[cc:ee] == " ":
break
if ee > 10:
break
mtrxcd = line[:ee]
mtrxcd.replace(" ","")
mtrxcd.replace(" ","")
global mxstore
if not mtrxcd == "":
code = "MX%s" % mtrxcd
alsocode = code
#if mtrxcd == "5":
mxstore = str(line) # Here I am storing the contents of the line in 'mxstore'
if mtrxcd == "":
mxnum += 1 # This integer will identify matrix lines in interpret
mxnumb = str(mxnum)
global mxparent
mxparent[mxnumb] = mxstore # Here I am storing the contents of the parent line in 'mxparent'
code = "%s-%s" % (alsocode, mxnum) # and assigning a new code to later use in interpret
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
code = "MX"
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
while line.startsWith(";") or line.startsWith("*") or line == "":
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
if line.startsWith("BNODE"):
code = "MTXB"
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
lino -= 1
keepadd = False
elif "OUTPUT LOCATIONS" in line: #Special Output Locations
code = "BLK"
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
while not line.startsWith(" -1"):
code = "OUTL"
if "BRA" in line or "NODE" in line:
cc = 0
ee = cc + 1
maxee = len(line)
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
global outbraL
outbraL = cc
global outbraT
outbraT = line[:cc]
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
global outnodeL
outnodeL = cc
global outnodeT
outnodeT = line[outbraL:cc]
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
global outhead1L
outhead1L = cc
global outhead1T
outhead1T = line[outnodeL:cc]
while line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
while not line[cc:ee] == " ":
cc += 1
ee = cc + 1
if ee > maxee:
break
global outhead2L
outhead2L = cc
global outhead2T
outhead2T = line[outhead1L:cc]
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber = lino
todisplay = line
code = "OUTL"
self.add(DataText(linenumber, code, todisplay))
line = stream.readLine()
lino += 1
linenumber | |
#!/usr/bin/env python3
# Script to print pictures using the NanoLab Microcontroller
# Microcontroller Model: Micos 1860SMC Basic
# Made 2019, <NAME>
# sunyudong [at] outlook [dot] sg
# github.com/sunjerry019/photonLauncher
from PIL import Image
# Pillow reqs https://pillow.readthedocs.io/en/5.1.x/installation.html
# We use Pillow instead of OpenCV to reduce installation overhead
# This comes at the price of speed
# import micron
import numpy as np
import argparse
import os, sys
import micron
import platform
if platform.system() == "Linux":
from matplotlib import cm # For coloring of the cutting lines
import pickle
from extraFunctions import query_yes_no as qyn
import datetime
# CUSTOM ERROR FOR PICCONV:
class ImageError(Exception):
pass
class PicConv():
def __init__(self, filename, xscale = 1, yscale = 1, cut = 0, allowDiagonals = False, prioritizeLeft = False, flipHorizontally = False, flipVertically = False ,frames = False, simulate = False, simulateDrawing = False, micronInstance = None, shutterTime = 800, GUI_Object = None, takeoverControl = False):
# shutterTime in milliseconds
# Set micronInstance to False instead of None to prevent using of micron
self.filename = filename
self.scale = {
"x": xscale,
"y": yscale
}
assert cut in (0, 1), "Invalid cut value (0 or 1)"
self.cut = cut
self.dontcut = self.cut ^ 1
self.allowDiagonals = allowDiagonals
self.prioritizeLeft = prioritizeLeft
self.flipHorizontally = flipHorizontally
self.flipVertically = flipVertically
self.frames = frames if not simulate else True
self.simulate = simulate # for frames
self.simulateDrawing = simulateDrawing
self.controller = micronInstance
self.takeoverControl = takeoverControl
print("Using '{}' at scale {}".format(self.filename, self.scale))
print("Cutting {} parts".format(["Black", "White"][self.cut]))
if self.allowDiagonals:
print("Allow Diagonals")
self.fast_velocity = 400
self.estimatedTime = None
self.estimatedVelocity = None
self.shutterTime = shutterTime / 1000
self.GUI_Object = GUI_Object
def convert(self):
self.image = Image.open(self.filename)
# Sanity Checks
assert self.image.mode == '1', "Your image has mode {}. Please use a 1-bit indexed (mode 1) image, see https://pillow.readthedocs.io/en/stable/handbook/concepts.html#bands. If using GIMP to convert picture to 1-bit index, ensure 'remove colour from palette' is unchecked. ".format(self.image.mode)
# Check if size is within limits if talking directly stage
if isinstance(self.controller, micron.Micos):
# We just check if its within stage limits
shape = (self.image.size[0] * self.scale["x"], self.image.size[1] * self.scale["y"])
if self.GUI_Object:
if abs(shape[0]) > abs(self.controller.stage.xlim[1] - self.controller.stage.xlim[0]) or abs(shape[1]) > abs(self.controller.stage.ylim[1] - self.controller.stage.ylim[0]):
self.GUI_Object.picConvWarn.emit("Image too big!", "Size ({}, {}) is bigger than stage limits\n\nX = [{}, {}],\nY = [{}, {}]".format(*shape, *self.controller.stage.xlim, *self.controller.stage.ylim))
raise ImageError("Image too big! Size ({}, {}) is bigger than stage limits -- X = [{}, {}], Y = [{}, {}]".format(*shape, *self.controller.stage.xlim, *self.controller.stage.ylim))
else:
assert abs(shape[0]) <= abs(self.controller.stage.xlim[1] - self.controller.stage.xlim[0]) and abs(shape[1]) <= abs(self.controller.stage.ylim[1] - self.controller.stage.ylim[0]), "Image too big for stage."
self.imageArray = np.array(self.image, dtype=int)
self.GUI_Object.pDialog.setLabelText("Loaded image into array") if self.GUI_Object else None
# We flip the image about the horizontal and vertical to match with stage position, if necessary
if self.flipVertically:
self.imageArray = np.flipud(self.imageArray)
self.GUI_Object.pDialog.setLabelText("Flipped image vertically") if self.GUI_Object else None
if self.flipHorizontally:
self.imageArray = np.fliplr(self.imageArray)
self.GUI_Object.pDialog.setLabelText("Flipped image horizontally") if self.GUI_Object else None
# 1 = White
# 0 = Black
# Main algorithm part
# We have a function that finds the next point to cut
# From that point, we start crawling around for neighbours based on some settings (self.directions)
# If a right block was found, the right diagonals will be prioritized, and vice versa for the left
# Prioritize left searches from left to right instead of right to left, helping with diagonals that points Southwest (SW)
# Origin is top left corner
RIGHT = ( 0, 1)
LEFT = ( 0, -1)
UP = ( -1, 0)
DOWN = ( 1, 0)
DIRSE = ( 1, 1)
DIRSW = ( 1, -1)
DIRNE = ( -1, 1)
DIRNW = ( -1, -1)
# Var defs
self.shape = np.shape(self.imageArray) # (y, x)
self.lines = []
# These are the directions we prioritize
if self.prioritizeLeft:
self.directions = [LEFT, RIGHT, DOWN, DIRSW, DIRNW, UP, DIRSE, DIRNE] if self.allowDiagonals else [LEFT, RIGHT, DOWN, UP]
else:
self.directions = [RIGHT, LEFT, DOWN, DIRSE, DIRNE, UP, DIRSW, DIRNW] if self.allowDiagonals else [RIGHT, LEFT, DOWN, UP]
self.output = np.zeros_like(self.imageArray)
# Func defs
def find_next(self, point):
# Find the next starting point to cut
y, x = point
if self.prioritizeLeft:
while y < self.shape[0]:
x = self.shape[1] - 1
while x > 0: # CANNOT USE >= 0
if self.imageArray[y, x] == self.cut:
return (y, x)
x -= 1
y += 1
else:
while y < self.shape[0]:
x = 0
while x < self.shape[1]:
if self.imageArray[y, x] == self.cut:
return (y, x)
x += 1
y += 1
return None
def find_neighbour(self, point):
# Find a neighbour of the current point to cut, if exists
y, x = point
for tup in self.directions:
coord = (y + tup[0], x + tup[1])
if coord[0] < 0 or coord[1] < 0:
continue # skip this one
# print("At {}, Looking at {} = {}".format(point, tup, self.imageArray[coord]))
try:
if self.imageArray[coord] == self.cut:
if self.allowDiagonals:
if self.prioritizeLeft:
if tup in (LEFT, DIRSW, DIRNW):
self.directions = [LEFT, RIGHT, DOWN, DIRSW, DIRNW, UP, DIRSE, DIRNE]
elif tup in (RIGHT, DIRSE, DIRNE):
self.directions = [LEFT, RIGHT, DOWN, DIRSE, DIRNE, UP, DIRSW, DIRNW]
else:
if tup in (LEFT, DIRSW, DIRNW):
self.directions = [RIGHT, LEFT, DOWN, DIRSW, DIRNW, UP, DIRSE, DIRNE]
elif tup in (RIGHT, DIRSE, DIRNE):
self.directions = [RIGHT, LEFT, DOWN, DIRSE, DIRNE, UP, DIRSW, DIRNW]
# We do not need to change otherwise
return coord
except IndexError as e:
pass
return None
if self.frames:
self.resultCount = 0
if platform.system() == "Windows":
os.system("del results/*")
else:
os.system("rm results/*")
def print_result(self, final = False):
# normalize the output
# np.savetxt('test.csv', self.output, delimiter=',')
_max = np.max(self.output)
_output = self.output / _max if _max != 0 else self.output
try:
_im = Image.fromarray(np.uint8(cm.gist_ncar(_output)*255))
except NameError as e:
_im = Image.fromarray(np.uint8((_output)*255))
if not final:
_im.save("results/test-{}.png".format(self.resultCount))
else:
_im.save("picconv_test.png")
# https://stackoverflow.com/questions/10965417/how-to-convert-numpy-array-to-pil-image-applying-matplotlib-colormap
self.resultCount += 1
else:
def print_result(self, final):
# final is there just to eat up the parameter passed in
# normalize the output
# np.savetxt('test.csv', self.output, delimiter=',')
_max = np.max(self.output)
_output = self.output / _max if _max != 0 else self.output
try:
_im = Image.fromarray(np.uint8(cm.gist_ncar(_output)*255))
except NameError as e:
_im = Image.fromarray(np.uint8((_output)*255))
_im.save("picconv_test.png")
# https://stackoverflow.com/questions/10965417/how-to-convert-numpy-array-to-pil-image-applying-matplotlib-colormap
startPt = (0, 0) if not self.prioritizeLeft else (0, self.shape[1] - 1)
lineNo = 0
currLine = []
totalPtsToCrawl = np.sum(self.imageArray)
if self.dontcut:
# dontcut is 1 in the array
totalPtsToCrawl = self.imageArray.size - totalPtsToCrawl
crawledPts = 0
while True:
if len(currLine):
self.lines.append(currLine)
currLine = []
lineNo += 1
nextPt = find_next(self, startPt)
if nextPt is None:
break
startPt = nextPt
self.GUI_Object.pDialog_setLabelText("At point ({1:>3}:{0:>3})".format(*nextPt)) if self.GUI_Object else None
print("At point ({1:>3}:{0:>3})".format(*nextPt), end='\r')
# We first set the cell as visited
self.imageArray[startPt] = self.dontcut
self.output[startPt] = lineNo
currLine.append(startPt)
crawledPts += 1
ptg = (crawledPts/totalPtsToCrawl) * 100
self.GUI_Object.pDialog_setValue(ptg / 2) if self.GUI_Object else None
# We start to crawl from this pixel
# For each pixel, we find the closest neighbour in order of priority
# and mark as cut
while True:
print("At point ({:>3}:{:>3}) / ({:.1f}%)".format(*nextPt, ptg), end='\r')
# if nextPt[0] < 0 or nextPt[1] < 0:
# print("")
# Used for catching erroronous pixels
nextPt = find_neighbour(self, nextPt)
if nextPt is None:
break
# print("{}/{}".format(crawledPts, totalPtsToCrawl))
self.imageArray[nextPt] = self.dontcut
self.output[nextPt] = lineNo
currLine.append(nextPt)
crawledPts += 1
ptg = (crawledPts/totalPtsToCrawl) * 100
self.GUI_Object.pDialog_setValue(ptg / 2) if self.GUI_Object else None
# We print here because we prioritize the progress bar
self.GUI_Object.pDialog_setLabelText("At point ({1:>3}:{0:>3})".format(*nextPt)) if self.GUI_Object else None
if self.frames:
print_result(self)
print_result(self, final = True)
# self.image.format, self.image.size, self.image.mode
# print(self.imageArray)
print("\nDone")
if self.simulate and platform.system() == "Linux":
os.system("./generateMovie.sh")
def draw(self, velocity, **kwargs):
assert isinstance(velocity, (int, float)), "velocity must be int or float"
if not isinstance(self.controller, micron.Micos) and self.controller is not False:
# initialize the stage
self.controller = micron.Micos(**kwargs)
gotController = isinstance(self.controller, micron.Micos)
if gotController:
if not self.GUI_Object or not self.takeoverControl:
self.controller.setvel(self.fast_velocity)
self.controller.shutter.close()
xlim = abs(self.controller.stage.xlim[1] - self.controller.stage.xlim[0])
ylim = abs(self.controller.stage.ylim[1] - self.controller.stage.ylim[0])
try:
assert self.shape[0] < ylim, "Image exceed y-limit"
assert self.shape[1] < xlim, "Image exceed x-limit"
except AssertionError as e:
raise AssertionError(e)
except Exception as e:
raise RuntimeError("Did you forget to load self.shape in form of y, x? Error: {}".format(e))
# do a rmove to the (0,0) of the image and let the user move the sample to match the (0,0) point
# checking if the image will exceed limits
dy, dx = self.shape[0] / 2, self.shape[1] / 2
self.controller.rmove(x = dx * self.scale["x"], y = dy * self.scale["y"])
# Estimate time if not yet estimated
if self.estimatedTime is None or velocity != self.estimatedVelocity:
self.estimateTime(velocity = velocity)
deltaTime = datetime.timedelta(seconds = self.estimatedTime)
if not self.GUI_Object or not self.takeoverControl:
print("Given {} sec / shutter movement:\nEstimated time required \t {}".format(self.shutterTime, deltaTime))
if not qyn("This is the (0,0) of the image. Confirm?"):
print("Exiting...")
sys.exit(1)
now = datetime.datetime.now()
finish = now + deltaTime
if self.GUI_Object:
self.OP_Status = "Given {} sec / shutter movement:\nEstimated time required \t {}, Est End = {}.".format(self.shutterTime, deltaTime, finish)
self.GUI_Object.setOperationStatus(self.OP_Status)
else:
print("Printing starting now \t {}".format(now.strftime('%Y-%m-%d %H:%M:%S')))
print("Given {} sec / shutter movement:\nEstimated time required \t {}".format(self.shutterTime, deltaTime))
print("Esimated to finish at \t {}".format(finish.strftime('%Y-%m-%d %H:%M:%S')))
# SVGCODE
svgLine = ["M 0,0"]
svgMap = ["m", "l"] # 0 = off = m, 1 = on = l
# / SVGCODE
# do a rmove to the first point of self.lines from (0,0) of the image
dy, dx = self.lines[0][0][0], self.lines[0][0][1]
svgLine.append("m {},{}".format(dx * self.scale["x"], dy * self.scale["y"]))
if gotController:
self.controller.rmove(x = dx * self.scale["x"], y = dy * self.scale["y"])
self.controller.setvel(velocity)
# then we print
totalLines = len(self.commands)
for i, cmd in enumerate(self.commands):
if not self.GUI_Object:
print(cmd, "{}/{}".format(i + 1, totalLines))
else:
self.GUI_Object.setOperationStatus(self.OP_Status + "\nAt Segment {}/{}".format(i + 1, totalLines))
state = cmd[0] # laser on state
rmoves = cmd[1]
if gotController:
if state:
self.controller.shutter.open()
else:
self.controller.setvel(self.fast_velocity)
for rmove in rmoves:
# SVGCODE
svgLine.append("{} {},{}".format(svgMap[state], rmove[1] | |
<gh_stars>0
# PyAlgoTrade
#
# Copyright 2011-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import collections
import matplotlib.pyplot as plt
from matplotlib import ticker
import six
from pyalgotrade import broker
from pyalgotrade import warninghelpers
def get_last_value(dataSeries):
ret = None
try:
ret = dataSeries[-1]
except IndexError:
pass
return ret
def _filter_datetimes(dateTimes, fromDate=None, toDate=None):
class DateTimeFilter(object):
def __init__(self, fromDate=None, toDate=None):
self.__fromDate = fromDate
self.__toDate = toDate
def includeDateTime(self, dateTime):
if self.__toDate and dateTime > self.__toDate:
return False
if self.__fromDate and dateTime < self.__fromDate:
return False
return True
dateTimeFilter = DateTimeFilter(fromDate, toDate)
return [x for x in dateTimes if dateTimeFilter.includeDateTime(x)]
def _post_plot_fun(subPlot, mplSubplot):
# Legend
legendsize = len(subPlot.getAllSeries().keys())
mplSubplot.legend(list(subPlot.getAllSeries().keys()), shadow=True, loc="best", fontsize = 'xx-small', framealpha=0, ncol=1 if legendsize<=3 else 2, columnspacing=0)
# Don't scale the Y axis
mplSubplot.yaxis.set_major_formatter(ticker.ScalarFormatter(useOffset=False))
class Series(object):
def __init__(self):
self.__values = {}
def getColor(self):
return None
def addValue(self, dateTime, value):
self.__values[dateTime] = value
def getValue(self, dateTime):
return self.__values.get(dateTime, None)
def getValues(self):
return self.__values
def getMarker(self):
raise NotImplementedError()
def needColor(self):
raise NotImplementedError()
def getWidth(self):
return 1
def getStyle(self):
return 'solid'
def getMarkerSize(self):
return 5
def plot(self, mplSubplot, dateTimes, color):
values = []
for dateTime in dateTimes:
values.append(self.getValue(dateTime))
mplSubplot.plot(dateTimes, values, color=color, marker=self.getMarker(), linewidth=self.getWidth(), linestyle=self.getStyle(), markeredgewidth=0, ms=self.getMarkerSize())
class BuyMarker(Series):
def getColor(self):
return 'g'
def getMarker(self):
return "^"
def needColor(self):
return True
class SellMarker(Series):
def getColor(self):
return 'r'
def getMarker(self):
return "v"
def needColor(self):
return True
class CustomMarker(Series):
def __init__(self):
super(CustomMarker, self).__init__()
self.__marker = "."
def needColor(self):
return True
def setMarker(self, marker):
self.__marker = marker
def getMarker(self):
return self.__marker
class LineMarker(Series):
def __init__(self):
super(LineMarker, self).__init__()
self.__marker = "."
def needColor(self):
return True
def setMarker(self, marker):
self.__marker = marker
def getMarker(self):
return self.__marker
class SecondaryMarker(Series):
def __init__(self):
super(SecondaryMarker, self).__init__()
self.__marker = "."
def needColor(self):
return True
def setMarker(self, marker):
self.__marker = marker
def getMarker(self):
return self.__marker
def getWidth(self):
return 0.1
def getMarkerSize(self):
return 2
class InstrumentMarker(Series):
def __init__(self):
super(InstrumentMarker, self).__init__()
self.__useAdjClose = None
self.__marker = "."
def needColor(self):
return True
def setMarker(self, marker):
self.__marker = marker
def getMarker(self):
return self.__marker
def setUseAdjClose(self, useAdjClose):
# Force close/adj_close instead of price.
self.__useAdjClose = useAdjClose
def getValue(self, dateTime):
# If not using candlesticks, the return the closing price.
ret = Series.getValue(self, dateTime)
if ret is not None:
if self.__useAdjClose is None:
ret = ret.getPrice()
elif self.__useAdjClose:
ret = ret.getAdjClose()
else:
ret = ret.getClose()
return ret
class HistogramMarker(Series):
def needColor(self):
return True
def getColorForValue(self, value, default):
return default
def plot(self, mplSubplot, dateTimes, color):
validDateTimes = []
values = []
colors = []
for dateTime in dateTimes:
value = self.getValue(dateTime)
if value is not None:
validDateTimes.append(dateTime)
values.append(value)
colors.append(self.getColorForValue(value, color))
mplSubplot.bar(validDateTimes, values, color=colors)
class MACDMarker(HistogramMarker):
def getColorForValue(self, value, default):
ret = default
if value >= 0:
ret = "g"
else:
ret = "r"
return ret
class Subplot(object):
""" """
colors = ['b', 'c', 'm', 'y', 'k']
def __init__(self):
self.__series = {} # Series by name.
self.__callbacks = {} # Maps a function to a Series.
self.__nextColor = 0
def __getColor(self, series):
ret = series.getColor()
if ret is None:
ret = Subplot.colors[self.__nextColor % len(Subplot.colors)]
self.__nextColor += 1
return ret
def isEmpty(self):
return len(self.__series) == 0
def getAllSeries(self):
return self.__series
def addDataSeries(self, label, dataSeries, defaultClass=LineMarker):
"""Add a DataSeries to the subplot.
:param label: A name for the DataSeries values.
:type label: string.
:param dataSeries: The DataSeries to add.
:type dataSeries: :class:`pyalgotrade.dataseries.DataSeries`.
"""
callback = lambda bars: get_last_value(dataSeries)
self.__callbacks[callback] = self.getSeries(label, defaultClass)
return callback
def addAndProcessDataSeries(self, label, dataSeries, defaultClass=LineMarker):
callback = self.addDataSeries(label, dataSeries, defaultClass)
series = self.__callbacks[callback]
datetimes = dataSeries.getDateTimes()
for i in range(0, len(datetimes)):
series.addValue(datetimes[i], dataSeries[i])
def addCallback(self, label, callback, defaultClass=LineMarker):
"""Add a callback that will be called on each bar.
:param label: A name for the series values.
:type label: string.
:param callback: A function that receives a :class:`pyalgotrade.bar.Bars` instance as a parameter and returns a number or None.
"""
self.__callbacks[callback] = self.getSeries(label, defaultClass)
def addLine(self, label, level):
"""Add a horizontal line to the plot.
:param label: A label.
:type label: string.
:param level: The position for the line.
:type level: int/float.
"""
self.addCallback(label, lambda x: level)
def onBars(self, bars):
dateTime = bars.getDateTime()
for cb, series in six.iteritems(self.__callbacks):
series.addValue(dateTime, cb(bars))
def getSeries(self, name, defaultClass=LineMarker):
try:
ret = self.__series[name]
except KeyError:
ret = defaultClass()
self.__series[name] = ret
return ret
def getCustomMarksSeries(self, name):
return self.getSeries(name, CustomMarker)
def plot(self, mplSubplot, dateTimes, postPlotFun=_post_plot_fun):
for series in self.__series.values():
color = None
if series.needColor():
color = self.__getColor(series)
series.plot(mplSubplot, dateTimes, color)
postPlotFun(self, mplSubplot)
class InstrumentSubplot(Subplot):
"""A Subplot responsible for plotting an instrument."""
def __init__(self, instrument, plotBuySell):
super(InstrumentSubplot, self).__init__()
self.__instrument = instrument
self.__plotBuySell = plotBuySell
self.__instrumentSeries = self.getSeries(instrument, InstrumentMarker)
def setUseAdjClose(self, useAdjClose):
self.__instrumentSeries.setUseAdjClose(useAdjClose)
def onBars(self, bars):
super(InstrumentSubplot, self).onBars(bars)
bar = bars.getBar(self.__instrument)
if bar:
dateTime = bars.getDateTime()
self.__instrumentSeries.addValue(dateTime, bar)
def onOrderEvent(self, broker_, orderEvent):
order = orderEvent.getOrder()
if self.__plotBuySell and orderEvent.getEventType() in (broker.OrderEvent.Type.PARTIALLY_FILLED, broker.OrderEvent.Type.FILLED) and order.getInstrument() == self.__instrument:
action = order.getAction()
execInfo = orderEvent.getEventInfo()
if action in [broker.Order.Action.BUY, broker.Order.Action.BUY_TO_COVER]:
self.getSeries("Buy", BuyMarker).addValue(execInfo.getDateTime(), execInfo.getPrice())
elif action in [broker.Order.Action.SELL, broker.Order.Action.SELL_SHORT]:
self.getSeries("Sell", SellMarker).addValue(execInfo.getDateTime(), execInfo.getPrice())
class StrategyPlotter(object):
"""Class responsible for plotting a strategy execution.
:param strat: The strategy to plot.
:type strat: :class:`pyalgotrade.strategy.BaseStrategy`.
:param plotAllInstruments: Set to True to get a subplot for each instrument available.
:type plotAllInstruments: boolean.
:param plotBuySell: Set to True to get the buy/sell events plotted for each instrument available.
:type plotBuySell: boolean.
:param plotPortfolio: Set to True to get the portfolio value (shares + cash) plotted.
:type plotPortfolio: boolean.
"""
def __init__(self, strat, plotAllInstruments=True, plotBuySell=True, plotPortfolio=True):
self.__dateTimes = set()
self.__plotAllInstruments = plotAllInstruments
self.__plotBuySell = plotBuySell
self.__barSubplots = {}
self.__namedSubplots = collections.OrderedDict()
self.__portfolioSubplot = None
if plotPortfolio:
self.__portfolioSubplot = Subplot()
strat.getBarsProcessedEvent().subscribe(self.__onBarsProcessed)
strat.getBroker().getOrderUpdatedEvent().subscribe(self.__onOrderEvent)
def __checkCreateInstrumentSubplot(self, instrument):
if instrument not in self.__barSubplots:
self.getInstrumentSubplot(instrument)
def __onBarsProcessed(self, strat, bars):
dateTime = bars.getDateTime()
self.__dateTimes.add(dateTime)
if self.__plotAllInstruments:
for instrument in bars.getInstruments():
self.__checkCreateInstrumentSubplot(instrument)
# Notify named subplots.
for subplot in self.__namedSubplots.values():
subplot.onBars(bars)
# Notify bar subplots.
for subplot in self.__barSubplots.values():
subplot.onBars(bars)
# Feed the portfolio evolution subplot.
if self.__portfolioSubplot:
self.__portfolioSubplot.getSeries("Portfolio").addValue(dateTime, strat.getBroker().getEquity())
# This is in case additional dataseries were added to the portfolio subplot.
self.__portfolioSubplot.onBars(bars)
def __onOrderEvent(self, broker_, orderEvent):
# Notify BarSubplots
for subplot in self.__barSubplots.values():
subplot.onOrderEvent(broker_, orderEvent)
def getSubplots(self):
return self.__barSubplots
def getInstrumentSubplot(self, instrument):
"""Returns the InstrumentSubplot for a given instrument
:rtype: :class:`InstrumentSubplot`.
"""
try:
ret = self.__barSubplots[instrument]
except KeyError:
ret = InstrumentSubplot(instrument, self.__plotBuySell)
self.__barSubplots[instrument] = ret
return ret
def getOrCreateSubplot(self, name):
"""Returns a Subplot by name. If the subplot doesn't exist, it gets created.
:param name: The name of the Subplot to get or create.
:type name: string.
:rtype: :class:`Subplot`.
"""
try:
ret = self.__namedSubplots[name]
except KeyError:
ret = Subplot()
self.__namedSubplots[name] = ret
return ret
def getPortfolioSubplot(self):
"""Returns the subplot where the portfolio values get plotted.
:rtype: :class:`Subplot`.
"""
return self.__portfolioSubplot
def __buildFigureImpl(self, fromDateTime=None, toDateTime=None, postPlotFun=_post_plot_fun):
dateTimes = _filter_datetimes(self.__dateTimes, fromDateTime, toDateTime)
dateTimes.sort()
subplots = []
subplots.extend(self.__barSubplots.values())
subplots.extend(self.__namedSubplots.values())
if self.__portfolioSubplot is not None:
subplots.append(self.__portfolioSubplot)
# Build each subplot.
fig, axes = plt.subplots(nrows=len(subplots), sharex=True, squeeze=False)
mplSubplots = []
for i, subplot in enumerate(subplots):
axesSubplot = axes[i][0]
if not subplot.isEmpty():
mplSubplots.append(axesSubplot)
subplot.plot(axesSubplot, dateTimes, postPlotFun=postPlotFun)
axesSubplot.grid(True)
return (fig, mplSubplots)
def buildFigure(self, fromDateTime=None, toDateTime=None):
# Deprecated in v0.18.
warninghelpers.deprecation_warning("buildFigure will be deprecated in the next version. Use buildFigureAndSubplots.", stacklevel=2)
fig, _ = self.buildFigureAndSubplots(fromDateTime, toDateTime)
return fig
def buildFigureAndSubplots(self, fromDateTime=None, toDateTime=None, postPlotFun=_post_plot_fun):
"""
Build a matplotlib.figure.Figure with the subplots. Must be called after running the strategy.
:param fromDateTime: An optional starting datetime.datetime. Everything before it won't get plotted.
:type fromDateTime: datetime.datetime
:param toDateTime: An optional ending datetime.datetime. Everything after it won't get plotted.
:type toDateTime: datetime.datetime
:rtype: A 2 element tuple with matplotlib.figure.Figure and subplots.
"""
fig, mplSubplots = self.__buildFigureImpl(fromDateTime, toDateTime, postPlotFun=postPlotFun)
fig.autofmt_xdate()
return fig, mplSubplots
def plot(self, fromDateTime=None, toDateTime=None, postPlotFun=_post_plot_fun):
"""
Plot | |
<filename>PlatformAgents/com/cognizant/devops/platformagents/agents/alm/jira/JiraAgent.py<gh_stars>0
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on Jun 22, 2016
@author: 463188
'''
from datetime import datetime as dateTime2
import datetime
import copy
from dateutil import parser
from com.cognizant.devops.platformagents.core.BaseAgent import BaseAgent
class JiraAgent(BaseAgent):
def process(self):
self.userid = self.config.get("userid", '')
self.passwd = self.config.get("passwd", '')
baseUrl = self.config.get("baseUrl", '')
startFrom = self.config.get("startFrom", '')
lastUpdated = self.tracking.get("lastupdated", startFrom)
responseTemplate = self.getResponseTemplate()
fields = self.extractFields(responseTemplate)
jiraIssuesUrl = baseUrl+"?jql=updated>='"+lastUpdated+"' ORDER BY updated ASC&maxResults="+str(self.config.get("dataFetchCount", 1000))+'&fields='+fields
changeLog = self.config.get('changeLog', None)
if changeLog:
jiraIssuesUrl = jiraIssuesUrl + '&expand=changelog'
changeLogFields = changeLog['fields']
changeLogMetadata = changeLog['metadata']
changeLogResponseTemplate = changeLog['responseTemplate']
startFromDate = parser.parse(startFrom)
total = 1
maxResults = 0
startAt = 0
updatetimestamp = None
sprintField = self.config.get("sprintField", None)
while (startAt + maxResults) < total:
data = []
workLogData = []
#jiraIssuesUrl = self.buildJiraRestUrl(baseUrl, startFrom, fields) + '&startAt='+str(startAt + maxResults)
response = self.getResponse(jiraIssuesUrl+'&startAt='+str(startAt + maxResults), 'GET', self.userid, self.passwd, None)
jiraIssues = response["issues"]
for issue in jiraIssues:
parsedIssue = self.parseResponse(responseTemplate, issue)
if sprintField:
self.processSprintInformation(parsedIssue, issue, sprintField, self.tracking)
data += parsedIssue
if changeLog:
workLogData += self.processChangeLog(issue, changeLogFields, changeLogResponseTemplate, startFromDate)
maxResults = response['maxResults']
total = response['total']
startAt = response['startAt']
if len(jiraIssues) > 0:
updatetimestamp = jiraIssues[len(jiraIssues) - 1]["fields"]["updated"]
dt = parser.parse(updatetimestamp)
fromDateTime = dt + datetime.timedelta(minutes=01)
fromDateTime = fromDateTime.strftime('%Y-%m-%d %H:%M')
self.tracking["lastupdated"] = fromDateTime
jiraKeyMetadata = {"dataUpdateSupported" : True,"uniqueKey" : ["key"]}
self.publishToolsData(data, jiraKeyMetadata)
#self.publishToolsData(data)
if len(workLogData) > 0:
self.publishToolsData(workLogData, changeLogMetadata)
self.updateTrackingJson(self.tracking)
else:
break
def buildJiraRestUrl(self, baseUrl, startFrom, fields):
lastUpdatedDate = self.tracking.get("lastupdated", startFrom)
endDate = parser.parse(lastUpdatedDate) + datetime.timedelta(hours=24)
endDate = endDate.strftime('%Y-%m-%d %H:%M')
jiraIssuesUrl = baseUrl+"?jql=updated>='"+lastUpdatedDate+"' AND updated<'"+endDate+"' ORDER BY updated ASC&maxResults="+str(self.config.get("dataFetchCount", 1000))+'&fields='+fields
changeLog = self.config.get('changeLog', None)
if changeLog:
jiraIssuesUrl = jiraIssuesUrl + '&expand=changelog'
return jiraIssuesUrl
def processChangeLog(self, issue, workLogFields, responseTemplate, startFromDate):
changeLog = issue.get('changelog', None)
workLogData = []
injectData = {'issueKey' : issue['key'] }
if changeLog:
histories = changeLog.get('histories', [])
for change in histories:
data = self.parseResponse(responseTemplate, change, injectData)[0]
changeDate = parser.parse(data['changeDate'].split('.')[0]);
if changeDate > startFromDate:
items = change['items']
for item in items:
if item['field'] in workLogFields:
dataCopy = copy.deepcopy(data)
dataCopy['changedfield'] = item['field']
dataCopy['fromString'] = item['fromString']
dataCopy['toString'] = item['toString']
dataCopy['from'] = item['from']
dataCopy['to'] = item['to']
workLogData.append(dataCopy)
return workLogData
def scheduleExtensions(self):
extensions = self.config.get('dynamicTemplate', {}).get('extensions', None)
if extensions:
#backlog = extensions.get('backlog', None)
#if backlog:
# self.registerExtension('backlog', self.retrieveBacklogDetails, backlog.get('runSchedule'))
sprints = extensions.get('sprints', None)
if sprints:
self.registerExtension('sprints', self.retrieveSprintDetails, sprints.get('runSchedule'))
sprintReport = extensions.get('sprintReport', None)
if sprintReport:
self.registerExtension('sprintReport', self.retrieveSprintReports, sprintReport.get('runSchedule'))
releaseDetails = extensions.get('releaseDetails', None)
if releaseDetails:
self.registerExtension('releaseDetails', self.retrieveReleaseDetails, releaseDetails.get('runSchedule'))
def extractFields(self, responseTemplate):
fieldsJson = responseTemplate.get("fields", None)
fieldsParam = ''
if fieldsJson:
for field in fieldsJson:
fieldsParam += field + ','
fieldsParam = fieldsParam[:-1]
if self.config.get("sprintField", None):
fieldsParam += ','+ self.config.get("sprintField")
return fieldsParam
def processSprintInformation(self, parsedIssue, issue, sprintField, tracking):
if sprintField:
boardsTracking = tracking.get('boards', None)
if boardsTracking is None:
boardsTracking = {}
tracking['boards'] = boardsTracking
sprintDetails = issue.get("fields", {}).get(sprintField, None)
if sprintDetails:
sprints = []
boards = []
for sprint in sprintDetails:
sprintData = {}
sprintDetail = sprint.split("[")[1][:-1]
sprintPropertieTokens = sprintDetail.split(",")
for propertyToken in sprintPropertieTokens:
propertyKeyValToken = propertyToken.split("=")
if len(propertyKeyValToken) > 1:
sprintData[propertyKeyValToken[0]] = propertyKeyValToken[1]
boardId = sprintData.get('rapidViewId')
sprintId = sprintData.get('id')
boardTracking = boardsTracking.get(boardId, None)
if boardTracking is None:
boardTracking = {}
boardsTracking[boardId] = boardTracking
sprintTracking = boardTracking.get('sprints', None)
if sprintTracking is None:
sprintTracking = {}
boardTracking['sprints'] = sprintTracking
if sprintTracking.get(sprintId, None) is None:
sprintTracking[sprintId] = {}
if boardId not in boards:
boards.append(boardId)
if sprintId not in sprints:
sprints.append(sprintId)
parsedIssue[0]['sprints'] = sprints
parsedIssue[0]['boards'] = boards
#if len(boards) > 1 :
# for board in boards:
# boardTracking = boardsTracking.get(board)
# sprintTracking = boardTracking.get('sprints')
# for sprint in sprints:
# if sprintTracking.get(sprint, None) is None:
# sprintTracking[sprint] = {}
def retrieveSprintDetails(self):
sprintDetails = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('sprints', None)
boardApiUrl = sprintDetails.get('boardApiUrl')
boards = self.tracking.get('boards', None)
if sprintDetails and boards:
responseTemplate = sprintDetails.get('sprintResponseTemplate', None)
sprintMetadata = sprintDetails.get('sprintMetadata')
for boardId in boards:
data = []
board = boards[boardId]
boardRestUrl = boardApiUrl + '/' + str(boardId)
try:
boardResponse = self.getResponse(boardRestUrl, 'GET', self.userid, self.passwd, None)
board['name'] = boardResponse.get('name')
board['type'] = boardResponse.get('type')
board.pop('error', None)
except Exception as ex:
board['error'] = str(ex)
#Get the individual sprint details.
sprints = board.get('sprints')
for sprint in sprints:
sprintApiUrl = sprintDetails.get('sprintApiUrl')+'/'+sprint
try:
sprintResponse = self.getResponse(sprintApiUrl, 'GET', self.userid, self.passwd, None)
data.append(self.parseResponse(responseTemplate, sprintResponse)[0])
except Exception:
pass;
if len(data) > 0 :
self.publishToolsData(data, sprintMetadata)
continue
sprintsUrl = boardRestUrl + '/sprint?startAt='
startAt = 0
isLast = False
injectData = {'boardName' : board['name']}
while not isLast:
try:
sprintsResponse = self.getResponse(sprintsUrl+str(startAt), 'GET', self.userid, self.passwd, None)
except Exception as ex3:
#board['error'] = str(ex3)
break
isLast = sprintsResponse['isLast']
startAt = startAt + sprintsResponse['maxResults']
sprintValues = sprintsResponse['values']
parsedSprints = self.parseResponse(responseTemplate, sprintValues, injectData)
for parsedSprint in parsedSprints:
if str(parsedSprint.get('boardId')) == str(boardId):
data.append(parsedSprint)
if len(data) > 0 :
self.publishToolsData(data, sprintMetadata)
def retrieveBacklogDetails(self):
backlogDetails = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('backlog', None)
boardApiUrl = backlogDetails.get('boardApiUrl')
boards = self.tracking.get('boards', None)
backlogMetadata = backlogDetails.get('backlogMetadata')
if backlogDetails and boards:
for boardId in boards:
data = []
board = boards[boardId]
boardRestUrl = boardApiUrl + '/' + str(boardId)
try:
boardResponse = self.getResponse(boardRestUrl, 'GET', self.userid, self.passwd, None)
board['name'] = boardResponse.get('name')
board['type'] = boardResponse.get('type')
board.pop('error', None)
backlogUrl = boardRestUrl + '/backlog?fields=[]&startAt='
startAt = 0
isLast = False
while not isLast:
backlogResponse = self.getResponse(backlogUrl+str(startAt), 'GET', self.userid, self.passwd, None)
isLast = (startAt + backlogResponse['maxResults']) > backlogResponse['total']
startAt = startAt + backlogResponse['maxResults']
backlogIssues = backlogResponse['issues']
for backlogIssue in backlogIssues:
issue = {}
issue['backlogIssueKey'] = backlogIssue.get('key')
issue['projectKey'] = backlogIssue.get('key').split('-')[0]
issue['boardName'] = board['name']
issue['boardId'] = boardId
data.append(issue)
if len(data) > 0 :
self.publishToolsData(data, backlogMetadata)
except Exception as ex:
board['error'] = str(ex)
#Get the individual sprint details.
def retrieveSprintReports(self):
sprintDetails = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('sprintReport', None)
boardApiUrl = sprintDetails.get('boardApiUrl')
boards = self.tracking.get('boards', None)
if sprintDetails and boards:
sprintReportUrl = sprintDetails.get('sprintReportUrl', None)
responseTemplate = sprintDetails.get('sprintReportResponseTemplate', None)
#sprintMetadata = sprintDetails.get('sprintMetadata')
relationMetadata = sprintDetails.get('relationMetadata')
for boardId in boards:
board = boards[boardId]
boardName = board.get('name', None)
if boardName is None:
boardRestUrl = boardApiUrl + '/' + str(boardId)
try:
boardResponse = self.getResponse(boardRestUrl, 'GET', self.userid, self.passwd, None)
board['name'] = boardResponse.get('name')
board['type'] = boardResponse.get('type')
board.pop('error', None)
except Exception as ex:
board['error'] = str(ex)
continue
sprints = board['sprints']
for sprintId in sprints:
sprint = sprints[sprintId]
#For velocity, only the completed sprints are considered
#extract the project key from the sprint reports to allow the data tagging
sprintClosed = sprint.get('closed', False)
if not sprintClosed:
sprintReportRestUrl = sprintReportUrl + '?rapidViewId='+str(boardId)+'&sprintId='+str(sprintId)
sprintReportResponse = None
try:
sprintReportResponse = self.getResponse(sprintReportRestUrl, 'GET', self.userid, self.passwd, None)
except Exception as ex:
sprint['error'] = str(ex)
if sprintReportResponse:
content = sprintReportResponse.get('contents', None)
if sprintReportResponse.get('sprint', {}).get('state', 'OPEN') == 'CLOSED':
sprint['closed'] = True
injectData = { 'boardId' : int(boardId), 'sprintId' : int(sprintId) }
data = []
data += self.addSprintDetails(responseTemplate, content, 'completedIssues', injectData)
data += self.addSprintDetails(responseTemplate, content, 'issuesNotCompletedInCurrentSprint', injectData)
data += self.addSprintDetails(responseTemplate, content, 'puntedIssues', injectData)
data += self.addSprintDetails(responseTemplate, content, 'issuesCompletedInAnotherSprint', injectData)
if len(data) > 0:
#self.publishToolsData(self.getSprintInformation(sprintReportResponse, boardId, sprintId, board['name'], board['type']), sprintMetadata)
self.publishToolsData(data, relationMetadata)
self.updateTrackingJson(self.tracking)
def getSprintInformation(self, content, boardId, sprintId, boardName, boardType):
data = []
sprint = content.get('sprint')
sprint.pop('linkedPagesCount', None)
sprint.pop('remoteLinks', None)
sprint.pop('sequence', None)
sprint.pop('id', None)
sprint['boardId'] = boardId
sprint['sprintId'] = sprintId
sprint['boardName'] = boardName
sprint['boardType'] = boardType
sprint['sprintName'] = sprint.get('name')
sprint.pop('name', None)
timeStampFormat = '%d/%b/%y'
startDate = sprint.get('startDate', None)
if startDate and startDate != 'None':
sprint['startDateEpoch'] = self.getRemoteDateTime(dateTime2.strptime(startDate.split(' ')[0], timeStampFormat)).get('epochTime')
endDate = sprint.get('endDate', None)
if endDate and endDate != 'None':
sprint['endDateEpoch'] = self.getRemoteDateTime(dateTime2.strptime(endDate.split(' ')[0], timeStampFormat)).get('epochTime')
completeDate = sprint.get('completeDate', None)
if completeDate and completeDate != 'None':
sprint['completeDateEpoch'] = self.getRemoteDateTime(dateTime2.strptime(completeDate.split(' ')[0], timeStampFormat)).get('epochTime')
data.append(sprint)
return data
def addSprintDetails(self, responseTemplate, content, sprintIssueRegion, injectData):
issueKeysAddedDuringSprint = content.get('issueKeysAddedDuringSprint', {})
issues = content.get(sprintIssueRegion, None)
parsedIssues = []
if issues:
parsedIssues = self.parseResponse(responseTemplate, issues, injectData)
for issue in parsedIssues:
issueKey = issue['key']
issue['addedDuringSprint'] = issueKeysAddedDuringSprint.get(issueKey, False)
| |
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
import pytz
from calendar import monthrange
from datetime import datetime, timedelta
from decimal import Decimal
from functools import partial
from tzlocal import get_localzone
from django.db.models import Max, Min, Sum
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db import connection
from django.shortcuts import redirect
from django.utils import timezone
from django.views.generic import TemplateView
from .piedmont import (
is_designated_holiday,
is_summer,
piedmont_bill,
piedmont_tariff,
)
from addons.efergy.models import MinuteData
from addons.efergy.management.commands.data_coverage import get_minute_coverage
from addons.wunderground.models import WeatherData
from .utils import safe_cache_key
from .tariffs import is_weekend
WEEKEND = "weekend"
SUMMER = "summer"
WINTER = "winter"
ZERO = Decimal(0.0)
def get_day_data(day, use_cache=True):
"""Given a single day, return summary data, but in a cached manner."""
cache_key = safe_cache_key("joule:views:get_day_cache:" + str(day.date()))
data = None
if use_cache:
data = cache.get(cache_key, None)
if not data:
temps = WeatherData.objects.filter(
timestamp__range=(day, day + timedelta(days=1, seconds=-1))
).aggregate(high=Max('outside_temp'), low=Min('outside_temp'))
summary = MinuteData.objects.filter(
timestamp__range=(day, day + timedelta(days=1, seconds=-1))
).aggregate(watts=Sum('watts'))
kwhrs = Decimal(summary["watts"]) / Decimal(60000.0) if summary["watts"] else ZERO
data = {
"date": day,
"kwhrs": kwhrs,
"high": temps["high"],
"low": temps["low"],
"high_pct": (Decimal(temps["high"]) / Decimal(1.2) if temps["high"] else ZERO),
"low_pct": Decimal(temps["low"]) / Decimal(1.2) if temps["low"] else ZERO,
}
if use_cache:
cache.set(cache_key, data)
return data
def get_daily_data(start_day, end_day=None, today=None):
if today is None:
raise RuntimeError("get_daily_data requires 'today'")
if end_day is None:
end_day = start_day
if type(start_day) is not datetime:
start_day = datetime.combine(start_day, datetime.min.time())
if type(end_day) is not datetime:
end_day = datetime.combine(end_day, datetime.min.time())
if start_day > end_day:
num_days = 0
else:
num_days = (end_day - start_day).days
today = timezone.now().date()
day_list = []
max_kwhrs = Decimal(0.0)
for day_no in range(0, num_days+1):
day = start_day + timedelta(days=day_no)
if day.date() <= today:
data = get_day_data(day, use_cache=(day.date() < today))
else:
data = {
"date": day, "kwhrs": ZERO, "high": ZERO, "low": ZERO,
"high_pct": ZERO, "low_pct": ZERO,
}
if data["kwhrs"] > max_kwhrs:
max_kwhrs = data["kwhrs"]
day_list.append(data)
for day in day_list:
day.update({"kwhrs_pct": Decimal(100.0) * day["kwhrs"] / max_kwhrs})
return day_list
def get_day_type(day):
"""
Returns the type of "tariff" day for the given date/datetime.
:param day: Can be of type «date» or «datetime»
:return: One of constants: {WEEKEND, SUMMER, WINTER}
"""
if type(day) is not datetime:
day = datetime.combine(day, datetime.min.time())
if is_weekend(day) or is_designated_holiday(day):
return WEEKEND
elif is_summer(day):
return SUMMER
else:
return WINTER
def categorize_days(start_day, end_day=None):
"""
Separates the days provided in the range «start_day» to «end_day»
(inclusive) into three lists:
1. weekend
2. summer
3. winter
summer and winter are according to Piedmont's schedule, weekend days are
those on the weekend or Piedmont-designated holidays
:return: a dict containing 3 lists of dates.
"""
if end_day is None:
end_day = start_day
if type(start_day) is not datetime:
start_day = datetime.combine(start_day, datetime.min.time())
if type(end_day) is not datetime:
end_day = datetime.combine(end_day, datetime.min.time())
if start_day > end_day:
num_past_days = 0
else:
num_past_days = (end_day - start_day).days
all_days = {
WEEKEND: list(),
SUMMER: list(),
WINTER: list(),
}
for day_no in range(0, num_past_days+1):
day = start_day + timedelta(days=day_no)
day_type = get_day_type(day)
all_days[day_type].append(day)
return all_days
def get_aggregate_minute_data(days):
"""
Gets aggregate minute data for the days provided. Uses Memcache to
cache results.
"""
empty_hours = [0] * 24
if days:
days_str = ",".join([day.strftime("\"%Y-%m-%d\"") for day in days])
cache_key = safe_cache_key(
"joule:views:get_aggregate_minute_data:" + days_str)
today = datetime.combine(datetime.today(), datetime.min.time())
if today in days:
wm_hours = None
else:
wm_hours = cache.get(cache_key, None)
if not wm_hours:
# Initialize a list of 24 zeros
wm_hours = empty_hours
cursor = connection.cursor()
query = """
SELECT `minute`, AVG(`watts`) FROM `efergy_minutedata`
WHERE DATE(CONVERT_TZ(`timestamp`, '+00:00', '-04:00')) in (%s)
GROUP BY `minute`
ORDER BY `minute`
""" % (days_str, )
cursor.execute(query)
# Add the minute's average consumption (watt-minutes) to the
# respective hour
for minute_row in cursor.fetchall():
hour = minute_row[0] // 60
wm_hours[hour] = float(wm_hours[hour]) + float(minute_row[1])
cache.set(cache_key, wm_hours)
return wm_hours
return empty_hours
def get_hours(days_dict, tariff, day_counts=None):
"""
Given a dict of tariff-types and their list of days, return a list of
tuples, one for each hour [0..23] of the day containing:
(hour, kwhrs, cost, ext_cost)
:param days: a dict containing tariff-types and respective list of days.
:param tariff: A Tariff object
:param day_counts: A dict containing { tariff-type: num_past_days }, if
present is used instead of num of days taken from
days_dict.
:return: a dict containing tariff-types and respective dict of:
{hour, kwhrs, cost, ext_cost} for each hour.
"""
all_hours_dict = dict()
for tariff_type, days in days_dict.items():
wm_hours = get_aggregate_minute_data(days)
# Convert from Wm to kWHrs, and to a tuple
hours = list()
for hour in range(0, 24):
wm = wm_hours[hour]
kwhrs = wm / 60000
if days:
timestamp = days[0].replace(hour=hour)
cost = kwhrs * tariff.rate(timestamp)
else:
cost = 0.0
if day_counts:
ext_cost = cost * day_counts[tariff_type]
else:
ext_cost = cost * len(days)
hours.append({
"hour": hour,
"kwhrs": kwhrs,
"cost": cost,
"ext_cost": ext_cost,
})
all_hours_dict.update({tariff_type: hours})
return all_hours_dict
def add_relative_sizes(hours, max_kwhrs, max_cost):
new_list = []
for item in hours:
kwhrs = item["kwhrs"]
cost = item["cost"]
item.update({
"pct_kwhrs": kwhrs / max_kwhrs * 100,
"pct_cost": cost / max_cost * 100,
})
new_list.append(item)
return new_list
def get_weighted_merge(a, b, a_weight=1, b_weight=1):
def avg(a, b):
if not a or not b:
return max(a, b)
else:
return (a * a_weight + b * b_weight) / (a_weight + b_weight)
cost = avg(a["cost"], b["cost"])
dict_m = {
"hour": a["hour"],
"kwhrs": avg(a["kwhrs"], b["kwhrs"]),
"cost": cost,
"ext_cost": cost * (a_weight + b_weight),
}
return dict_m
def get_merged_hours(a, b, a_weight=1, b_weight=1):
"""
Merges list of dicts: «a» and «b» into one respecting their weights:
«a_weight» and «b_weight»
"""
hours = {}
for tariff_type in [WEEKEND, SUMMER, WINTER]:
merge = partial(get_weighted_merge,
a_weight=a_weight[tariff_type],
b_weight=b_weight[tariff_type])
hours[tariff_type] = map(merge, a[tariff_type], b[tariff_type])
return hours
class BillEstimateView(TemplateView):
template_name = 'joule/bill.html'
year = None
month = None
active_month = None
num_trend_days = 0
today = None
now = None
tz = None
first_month = None
current_month = False
def get_first_month(self):
if not self.first_month:
first_record = MinuteData.objects.order_by('timestamp').first()
self.first_month = first_record.timestamp.astimezone(
get_localzone()).replace(day=1, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None)
return self.first_month
# @method_decorator(cache_page(60))
def dispatch(self, request, *args, **kwargs):
self.start_time = datetime.now()
return super(BillEstimateView, self).dispatch(request, *args, **kwargs)
def get_timezone(self):
if self.tz is None:
self.tz = pytz.timezone(
getattr(settings, "JOULE_TIMEZONE", "America/New_York"))
return self.tz
def get(self, request, *args, **kwargs):
self.now = timezone.now().astimezone(self.get_timezone())
self.today = self.now.date()
if "year" in kwargs and "month" in kwargs:
self.year = int(kwargs["year"])
self.month = int(kwargs["month"])
if not (self.year and self.month):
url = reverse("bill_view", kwargs={
"year": self.today.year, "month": self.today.month})
return redirect(to=url)
self.active_month = datetime(self.year, self.month, 1)
if self.today.year == self.year and self.today.month == self.month:
# OK, we're in the middle of this month, so, we'll require some
# projections. To project the rest of the month, we'll assume a
# usage profile similar to the previous 7 days. This is guaranteed
# to contain weekend and non-weekend days.
#
# NOTE: During Months October and April, there may be some poor
# results when the previous week was of one tariff type, and the
# rest of the month is another.
self.num_trend_days = 7
return super(BillEstimateView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BillEstimateView, self).get_context_data(**kwargs)
month_end = self.active_month.replace(
day=monthrange(self.active_month.year, self.active_month.month)[1])
active_month_end = self.active_month.replace(
day=monthrange(self.year, self.month)[1])
if self.today == month_end:
self.num_trend_days = 0
if self.num_trend_days:
# These are the days we've already measured this month
past_days = categorize_days(
self.active_month, self.today)
# These are the days we'll be projecting
future_days = categorize_days(
self.today + timedelta(days=1), month_end)
else:
past_days = categorize_days(self.active_month, active_month_end)
future_days = []
self.current_month = (
self.active_month.date() < self.today <= active_month_end.date())
context["current_month"] = self.current_month
context["today"] = self.today
today_type = get_day_type(self.today)
# How many days of each type?
# ----------------------------------------------------------------------
num_past_days = {
WEEKEND: len(past_days[WEEKEND]),
SUMMER: len(past_days[SUMMER]),
WINTER: len(past_days[WINTER]),
}
if future_days:
num_future_days = {
WEEKEND: len(future_days[WEEKEND]),
SUMMER: len(future_days[SUMMER]),
WINTER: len(future_days[WINTER]),
}
else:
num_future_days = {WEEKEND: 0, SUMMER: 0, WINTER: 0}
num_days = {
WEEKEND: num_past_days[WEEKEND] + num_future_days[WEEKEND],
SUMMER: num_past_days[SUMMER] + num_future_days[SUMMER],
WINTER: num_past_days[WINTER] + num_future_days[WINTER],
}
context["num_days"] = num_days
total_days = sum([num_days[WEEKEND], num_days[SUMMER], num_days[WINTER]])
past_hours = get_hours(past_days, piedmont_tariff)
# If we'll be using trend days to project the rest of the month, then
# prepare the trend hours and blend with the past hours.
# ----------------------------------------------------------------------
if self.num_trend_days:
trend_days = categorize_days(
self.today - timedelta(days=self.num_trend_days), self.today)
future_days_count = {
WEEKEND: len(future_days[WEEKEND]),
SUMMER: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.