text stringlengths 38 1.54M |
|---|
import keras
import time
from keras.datasets import cifar10
from keras.models import Sequential
from keras import optimizers
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
from pathlib import Path
from matplotlib import pyplot as plt
# Loading CIFAR-10 data sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalizing data set to 0-to-1 range
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# Setting Hyperparameters
batchSize = 64
epoc = 100
# Converting class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# Defining Numerical Optimizers
#sgd = optimizers.SGD(learning_rate=0.01, decay=1e-6, momentum=0.0, nesterov=False)
#rmsp = optimizers.RMSprop(learning_rate=0.001, rho=0.9)
#adag = optimizers.Adagrad(learning_rate=0.01)
adam = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
# Create a model and add layers
model_adam = Sequential()
model_adam.add(Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3), activation="relu"))
model_adam.add(Conv2D(32, (3, 3), activation="relu"))
model_adam.add(MaxPooling2D(pool_size=(2, 2)))
model_adam.add(Dropout(0.25))
model_adam.add(Conv2D(64, (3, 3), padding='same', activation="relu"))
model_adam.add(Conv2D(64, (3, 3), activation="relu"))
model_adam.add(MaxPooling2D(pool_size=(2, 2)))
model_adam.add(Dropout(0.25))
model_adam.add(Flatten())
model_adam.add(Dense(512, activation="relu"))
model_adam.add(Dropout(0.5))
model_adam.add(Dense(10, activation="softmax"))
# Compile the model
model_adam.compile(
loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy']
)
start_time = time.time()
# Train the model
history = model_adam.fit(
x_train,
y_train,
batch_size=batchSize,
epochs=epoc,
validation_data=(x_test, y_test),
shuffle=True
)
elapsed_time = time.time() - start_time
# Plot for model accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('adam_acc.png', dpi=600)
plt.show()
# Plot for model loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('adam_loss.png', dpi=600)
plt.show()
# Save total training time
print('Execution time: %.3f' % elapsed_time)
file1 = Path("adam_time.txt")
file1.write_text(str(elapsed_time))
# Save neural network structure
model_structure = model_adam.to_json()
f = Path("adam_model_structure.json")
f.write_text(model_structure)
# Save neural network's trained weights
model_adam.save_weights("adam_model_weight.h5")
|
# In the most cases it is enough to train ones with fixed sn and sf (b were assumed to be always fixed)
# and optional S, U depending on the data-set with about 50 to 100 iterations
owd = 'C:/Users/flo9fe/Desktop/GIT_IP/python_ip'
#owd = '/usr/local/home/GIT_IP/python_ip'
import os;
os.chdir(owd)
from LVMvSSGP_opt_SV1_IP import LVMvSSGP_opt
from scipy.optimize import minimize
import scipy.io as sio
import numpy as np
import random
np.set_printoptions(precision=2, suppress=True)
from time import gmtime, strftime, time
strftime("%Y-%m-%d %H:%M:%S", gmtime())
os.chdir('data')
dataset = 'data_usps'
run = input('Enter something: ')
mat = sio.loadmat(dataset + '.mat', squeeze_me=True) # specify filename to load
os.chdir(owd)
X = mat['X'] # output data, structured
lengthscale = mat['lengthscale'] # lengthscales l
lengthscale_p = mat['lengthscale_p'] # lengthscales p
sn = mat['sn'] # noise parameter
sf = mat['sf'] # power parameter
MU_S = mat['MU_S'] # variational spectral points
SIGMA_S = mat['SIGMA_S'] # variational spectral variance
MU = mat['MU'] # variational latent state
SIGMA = mat['SIGMA'] # variational latent state variance
U = np.array(mat['U'], dtype=np.float64) # pseudo input points
b = mat['b'] # phases
# eliminate bad matlab to python coversion
X = np.require(X,dtype=None,requirements='A')
lengthscale = np.require(lengthscale,dtype=None,requirements='A')
lengthscale_p = np.require(lengthscale_p,dtype=None,requirements='A')
sn = np.require(sn,dtype=None,requirements='A')
sf = np.require(sf,dtype=None,requirements='A')
MU_S = np.require(MU_S,dtype=None,requirements='A')
SIGMA_S = np.require(SIGMA_S,dtype=None,requirements='A')
MU = np.require(MU,dtype=None,requirements='A')
SIGMA = np.require(SIGMA,dtype=None,requirements='A')
U = np.require(U,dtype=None,requirements='A')
b = np.require(b,dtype=None,requirements='A')
Q = MU.shape[1] # input data dimension
(N,D) = X.shape # output data dimension
M = U.shape[0]
# S = np.random.normal(0, 1,(M,Q))
#U = np.zeros((M,Q))
# U = np.random.normal(0, 1,(M,Q))
rand_M = random.sample(range(1, 1000), 50)
U = MU[rand_M,:]
hyp = np.zeros((Q,2))
hyp[:,0] = lengthscale
hyp[:,1] = lengthscale_p
lower_bound_values = 1 # show lower bound value for every iteration, less fast
save_iter = 1 # save opt_params every iteration (outcome in \DRGP_VSS\python\...)
opt_params = {'sn': sn, 'sf': sf, 'hyp': hyp, 'U': U, 'MU_S': MU_S, 'SIGMA_S': SIGMA_S, 'MU': MU, 'SIGMA': SIGMA} # optimized parameters
fixed_params = {'b': b,} # other not optimized parameters
inputs = {'X': X} # output data
LVMvSSGP_opt1 = LVMvSSGP_opt(dataset, run, Q, D, N, M, lower_bound_values, save_iter, inputs, opt_params, fixed_params)
# LBFGS
x0 = np.concatenate([np.atleast_2d(opt_params[n]).flatten() for n in LVMvSSGP_opt1.opt_param_names])
LVMvSSGP_opt1.callback(x0)
startTime = time()
# bnds = np.transpose(np.squeeze(np.stack((-15*np.ones((x0.shape[0],1)),15*np.ones((x0.shape[0],1)))),axis=2))
res = minimize(LVMvSSGP_opt1.func, x0, method='L-BFGS-B', jac=LVMvSSGP_opt1.fprime,
options={'ftol': 0, 'disp': False, 'maxiter': 1000}, tol=0, callback=LVMvSSGP_opt1.callback)
opt_param_names = [n for n,_ in opt_params.items()]
opt_param_values = [np.atleast_2d(opt_params[n]) for n in opt_param_names]
shapes = [v.shape for v in opt_param_values]
sizes = [sum([np.prod(x) for x in shapes[:i]]) for i in range(len(shapes)+1)]
x_param_values = [res.x[sizes[i-1]:sizes[i]].reshape(shapes[i-1]) for i in range(1,len(shapes)+1)]
opt_params = {n:v for (n,v) in zip(opt_param_names, x_param_values)}
opt_params1 = opt_params
opt_params1.update(fixed_params)
sio.savemat('matlab_ip/data_optimized/' + 'SV1_IP_' + dataset + run, {'opt_params': opt_params1})
endTime = time()
print('Running Time: '+str(endTime-startTime))
|
import objc
import sys
from PyObjCTools.TestSupport import TestCase, skipUnless
NSObject = objc.lookUpClass("NSObject")
NSArray = objc.lookUpClass("NSArray")
class TestGenericClasses(TestCase):
@skipUnless(sys.version_info[:2] >= (3, 9), "Feature requires python 3.9")
def test_generic_classes(self):
with self.subTest("NSObject"):
int_object = NSObject[int]
value = int_object.new()
self.assertIsInstance(value, NSObject)
with self.assertRaises(TypeError):
isinstance(value, int_object)
self.assertEqual(int_object.__args__, (int,))
self.assertEqual(int_object.__origin__, NSObject)
with self.subTest("NSArray"):
int_array = NSArray[int]
value = int_array.arrayWithArray_([1, 2, 3])
self.assertIsInstance(value, NSArray)
self.assertIn(1, value)
self.assertNotIn(4, value)
with self.assertRaises(TypeError):
isinstance(value, int_array)
self.assertEqual(int_array.__args__, (int,))
self.assertEqual(int_array.__origin__, NSArray)
|
# code for image segmentation and detection
#by tamilselvan
#02,July 2018
#importing necessary libraries.
import cv2
import numpy as np
import os
import time
from PIL import Image
from matplotlib import pyplot as plt
import csv
#for writing into csv file.
f = open('Sample-Output.csv','w')
f.write('FileName,GCPLocation\n')
i=0
flag=0
count=1
def blurring(flag,file,chuck_gray,chuck):
chuck_bw = cv2.threshold(chuck_gray, 230, 255, cv2.THRESH_BINARY)[1]
chuck_bw, contours, hierarchy = cv2.findContours(chuck_bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
cnt = contours[0]
contours=None
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
area = cv2.contourArea(cnt)
save(flag,file,chuck,cx,cy,cnt,defects)
except:
return 0
def save(flag,file,chuck,cx,cy,cnt,defects):
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
sstart = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
#when the centroid and the contours's edge are set apart by 3 pixels, the contour edge is calculated as the GCP
if np.abs(far[0]-cx)<= 3 and np.abs(far[1]-cy)<= 3:
cv2.line(chuck,sstart,end,[0,255,0],2)
cv2.circle(chuck,far,2,[0,0,255],-1)
flag=flag+1
if(flag>=1):
cv2.imwrite(path_to_save+str(pt)+file, chuck)
else:
#it will execute the same save operation but with the minimum distance between centroid and countour's edges as 4
extreme(flag,file,chuck,cx,cy,cnt,defects,chuck_gray)
def extreme(flag,file,chuck,cx,cy,cnt,defects,chuck_gray):
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
sstart = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
print(far,cx,cy,flag)
if np.abs(far[0]-cx)<= 4 and np.abs(far[1]-cy) <= 4:
cv2.line(chuck,sstart,end,[0,255,0],2)
cv2.circle(chuck,far,2,[0,0,255],-1)
#to save the chucks to a path
cv2.imwrite(path_to_save+str(pt)+file, chuck)
#declaring the paths
path = 'dataset/'
template_path= 'template/'
path_to_chucks = 'chucks/'
path_to_save= 'GCP_marked/'
files = os.listdir(path)
templates=os.listdir(template_path)
#looping across every images in the file.
for file in files:
img_rgb= cv2.imread(path+file)
height,width,_=img_rgb.shape
counter=0
pt_prev=(0,0)
c=0
f.write(file+',[')
#looping across the every possible templates
for temp in templates:
start = time.time()
template = cv2.imread(template_path+temp,0)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
#loc will have the points which has matching templates
for pt in zip(*loc[::-1]):
#to remove away chucks that are close to each other and to avoid repeation
if(pt[0]-pt_prev[0])>=10 and (pt[1]-pt_prev[1])>=10:
chuck= img_rgb[pt[1]:(pt[1] + h), pt[0]:(pt[0] + w)]
# to use bilaterial filtering to smoothening the chuck
chuck= cv2.bilateralFilter(chuck,10,75,75)
chuck_gray =cv2.cvtColor(chuck, cv2.COLOR_BGR2GRAY)
chuck_bw = cv2.threshold(chuck_gray, 190, 255, cv2.THRESH_BINARY)[1]
#to remove complete black image and to remove very small chucks
if cv2.countNonZero(chuck_bw) == 0 or (chuck_bw.shape[0]<=26 and chuck_bw.shape[1]<=26):
continue
else:
flag=0
#to normalize black and white image and to smoothen image by gaussian filter
cv2.normalize(chuck_bw, chuck_bw, 0, 255, cv2.NORM_MINMAX)
blur = cv2.GaussianBlur(chuck_bw,(5,5),0)
smooth = cv2.addWeighted(blur,1.5,chuck_bw,-0.5,0)
#to find the contours in the chucks
chuck_bw, contours, hierarchy = cv2.findContours(smooth, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
contours=None
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
#to find the area of the contours so that it will remove false positives
area = cv2.contourArea(cnt)
#if the area of image is more than or less than the desired area it is set to further blurring operations
if int(area)<=100 or int(area)>=1000 :
flag=blurring(flag,file,chuck_gray,chuck)
else:
#if found that image is of desired area, it given into a function where GCP is calculated
flag=save(flag,file,chuck,cx,cy,cnt,defects)
#it marks the chucks on the raw image
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
pt_prev=pt
f.write('['+str(pt[0])+','+str(pt[1])+'],')
c=c+1
counter=counter+1
finish=time.time()
#to print the execution time to the console
print(str(count)+'-'+str(file)+" -mask count "+str(counter)+" took = "+str(finish-start)+' sec')
if c==0:
f.write('NIL')
f.write(']\n')
cv2.imwrite(path_to_chucks+file, img_rgb)
count=count+1
|
# 二分搜索变形之:查找target在升序变形数组中的位置
def t003(nums,target):
def search_one(nums, target):
l = 0
r = len(nums) - 1
while l <= r:
mid = l +(r-l) // 2
if nums[mid] == target:
return mid
if nums[mid] >= nums[l]:
if nums[mid]>target>=nums[l]:
r = mid -1
else:
l = mid + 1
else:
if nums[mid]<target<=nums[r]:
l = mid + 1
else:
r = mid - 1
return -1
return search_one(nums,target)
if __name__ == '__main__':
nums = [9, 10, 12, 24, 34, 2, 3, 6]
print(t003(nums,24))
|
import time
import copy
import numpy as np
import pandas
class matrix():
def __init__(self,x=None,row_names=None,col_names=None):
self.col_names,self.row_names = col_names,row_names
self.x = x
if x is not None:
if row_names is not None:
assert len(row_names) == x.shape[0],\
'shape[0] != len(row_names) '+str(x.shape)+' '+str(len(row_names))
if col_names is not None:
assert len(col_names) == x.shape[1],\
'shape[1] != len(col_names) '+str(x.shape)+' '+str(len(col_names))
self.integer = np.int32
self.double = np.float64
self.char = np.uint8
@property
def shape(self):
if self.x is None:
return None
else:
return self.x.shape
def svd(self):
if self.x is None:
raise Exception('self.x not set')
#--build some name lists
left_names,right_names,sing_names = [],[],[]
for i in range(self.shape[0]):
right_names.append('right_singvec_'+str(i+1))
for j in range(self.shape[1]):
left_names.append('left_singvec_'+str(j+1))
for k in range(min(self.shape)):
sing_names.append('sing_val_'+str(k+1))
u,s,vt = np.linalg.svd(self.x)
u = matrix(u,self.row_names,right_names)
s = matrix(np.atleast_2d(s).transpose(),sing_names,['none'])
vt = matrix(vt,left_names,self.col_names)
return u,s,vt
def vector(self,k):
k = k.lower()
self.col_names = list(self.col_names)
self.row_names = list(self.row_names)
if k in self.col_names:
mat = matrix()
mat.x = np.atleast_2d(self.x[:,self.col_names.index(k)].copy()).transpose()
mat.row_names = copy.deepcopy(self.row_names)
mat.col_names= [k]
return mat
elif k in self.row_names:
mat = matrix()
mat.x = np.atleast_2d(self.x[self.row_names.index(k),:].copy()).transpose()
mat.row_names = copy.deepcopy(self.col_names)
mat.col_names= [k]
return mat
else:
raise Exception(str(k)+' not in col or row names')
def to_binary(self,out_filename=None):
if out_filename is None:
out_filename = self.filename
f = open(out_filename,'wb')
f.close()
def drop(self,names):
for name in names:
if name not in self.col_names and name not in self.row_names:
raise Exception('name not found: '+name)
if name in self.col_names:
idx = self.col_names.index(name)
self.x = np.delete(self.x,idx,1)
self.col_names.remove(name)
if name in self.row_names:
idx = self.row_names.index(name)
self.x = np.delete(self.x,idx,0)
self.row_names.remove(name)
def extract_cols(self,names):
'''extracts and returns
'''
idxs = [],[]
for name in names:
if name not in self.col_names:
raise Exception('name not found col names: '+name)
idx = self.col_names.index(name)
idxs.append(idx)
#self.x = np.delete(self.x,idx,1)
self.col_names.remove(name)
extract = self.x[:,idxs].copy()
self.x = np.delete(self.x,idxs,1)
return extract
def from_binary(self,filename):
f = open(filename,'rb')
#--the header datatype
header_dt = np.dtype([('itemp1',self.integer),('itemp2',self.integer),('icount',self.integer)])
itemp1,itemp2,icount = np.fromfile(f,header_dt,1)[0]
if itemp1 >= 0:
raise TypeError, 'Jco produced by deprecated version of PEST,'+\
'Use JCOTRANS to convert to new format'
if self.shape is not None:
if abs(itemp1) != self.ncol:
raise ValueError,'ncol value not equal to matrix dimensions'\
+str(ncol)+' '+str(abs(itemp1))
if self.shape is not None:
if abs(itemp2) != self.nrow:
raise ValueError,'nrow value not equal to matrix dimensions'\
+str(nrow)+' '+str(abs(itemp2))
ncol,nrow = abs(itemp1),abs(itemp2)
self.shape = (nrow,ncol)
self.x = np.zeros((nrow,ncol))
start = time.clock()
#--the record datatype
rec_dt = np.dtype([('j',self.integer),('dtemp',self.double)])
#--read all data records
#--using this a real memory hog
print "loading rec array"
data = np.fromfile(f,rec_dt,icount)
print "calc icols and irows"
icols = ((data['j'] - 1) / nrow) + 1
irows = data['j'] - ((icols -1) * nrow)
#--uncompress the data into x
#icount = 0
#for i in data:
#for irow,icol,dtemp in zip(irows,icols,data["dtemp"]):
#for ii in range(icount):
#i = np.fromfile(f,rec_dt,1)[0]
#j = i[0]
#icol = ((j-1) / nrow) + 1
#irow = j - ((icol - 1) * nrow)
#print i,ies,irow
#--zero-based indexing translation
#self.x[irow-1,icol-1] = dtemp
#icount += 1
#if icount % 5000 == 0:
# print icount,data.shape
#print time.clock() - start
print "filling..."
self.x[irows-1,icols-1] = data["dtemp"]
#--read parameter names
col_names = []
for i in range(ncol):
cn = np.fromfile(f,self.char, count=12).tostring().lower().strip()
col_names.append(cn)
#print 'par:',pn
#--read obs names
row_names = []
for i in range(nrow):
rn = np.fromfile(f,self.char, count=20).tostring().lower().strip()
row_names.append(rn)
#print 'obs:',on
self.col_names = col_names
self.row_names = row_names
def to_ascii(self,out_filename,icode=2):
#start = time.clock()
nrow,ncol = self.shape
f_out = open(out_filename,'w')
f_out.write(' {0:7.0f} {1:7.0f} {2:7.0f}\n'.format(nrow,ncol,icode))
#for i in range(nrow):
# for j in range(ncol):
# f_out.write(' {0:15.7e} '.format(self.x[i,j]))
# if (j+1) % 7 == 0:
# f_out.write('\n')
# f_out.write('\n')
np.savetxt(f_out,self.x,fmt='%15.7E')
if icode == 1:
f_out.write('* row and column names\n')
for r in self.row_names:
f_out.write(r+'\n')
else:
f_out.write('* row names\n')
for r in self.row_names:
f_out.write(r+'\n')
f_out.write('* column names\n')
for c in self.col_names:
f_out.write(c+'\n')
f_out.close()
#print time.clock() - start
def from_ascii(self,filename):
f = open(filename,'r')
raw = f.readline().strip().split()
nrow,ncol,icode = int(raw[0]),int(raw[1]),int(raw[2])
x,icount = [],0
while True:
line = f.readline().strip().split()
for l in line:
x.append(float(l))
icount += 1
if icount == nrow * ncol:
break
line = f.readline().strip().lower()
if not line.startswith('*'):
raise Exception('error loading ascii file,line should start with '*', not '+line)
if 'row' in line and 'column' in line:
assert nrow == ncol
names = []
for i in range(nrow):
line = f.readline().strip().lower()
names.append(line)
self.row_names = names
self.col_names = names
else:
names = []
for i in range(nrow):
line = f.readline().strip().lower()
names.append(line)
self.row_names = names
line = f.readline().strip().lower()
names = []
for j in range(ncol):
line = f.readline().strip().lower()
names.append(line)
self.col_names = names
f.close()
x = np.array(x,dtype=np.double)
x.resize(nrow,ncol)
self.x = x
class uncert(matrix):
def __init__(self,names):
self.names = list(names)
self.x = np.zeros((len(names),len(names)))
self.row_names = names
self.col_names = names
def to_uncfile(self,unc_file,covmat_file="cov.mat",var_mult=1.0):
f = open(unc_file,'w')
f.write("START COVARIANCE_MATRIX\n")
f.write(" file "+covmat_file+"\n")
f.write(" variance_multiplier {0:15.6E}\n".format(var_mult))
f.write("END COVARIANCE_MATRIX\n")
f.close()
self.to_ascii(covmat_file,icode=1)
def from_obsweights(self,pst_file):
if not pst_file.endswith(".pst"):
pst_file += ".pst"
import pst_handler as phand
pst = phand.pst(pst_file)
visited = [False] * len(self.names)
for i,row in pst.observation_data.iterrows():
if row["obsnme"] in self.names:
idx = self.names.index(row["obsnme"])
w = row["weight"]
if w == 0.0:
#raise Exception("zero weight observation: "+row["obsnme"])
print "resetting weight for",row["obsnme"],"from 0.0 to 1.0e-30"
w = 1.0e-30
self.x[idx,idx] = (1.0/w)**2
visited[idx] = True
if False in visited:
for name,visit in zip(self.names,visited):
if not visit:
print 'entry not found for name:',name
raise Exception('error loading uncertainty from observations weights')
def from_parbounds(self,pst_file):
if not pst_file.endswith(".pst"):
pst_file += ".pst"
import pst_handler as phand
pst = phand.pst(pst_file)
visited = [False] * len(self.names)
for i,row in pst.parameter_data.iterrows():
if row["parnme"] in self.names:
idx = self.names.index(row["parnme"])
t = row["partrans"]
if t in ["fixed","tied"]:
raise Exception("fixed or tied parameter: "+row["parnme"])
lb,ub = row["parlbnd"],row["parubnd"]
if t == "log":
var = ((np.log10(ub) - np.log10(lb))/4.0)**2
else:
var = ((ub - lb)/4.0)**2
self.x[idx,idx] = var
visited[idx] = True
if False in visited:
for name,visit in zip(self.names,visited):
if not visit:
print 'entry not found for name:',name
raise Exception('error loading uncertainty from observations weights')
def from_uncfile(self,filename):
visited = [False] * len(self.names)
f = open(filename,'r')
while True:
line = f.readline().lower()
if len(line) == 0:
break
line = line.strip()
if 'start' in line:
if 'standard_deviation' in line:
while True:
line2 = f.readline().strip().lower()
if 'end' in line2:
break
raw = line2.strip().split()
name,val = raw[0],float(raw[1])
if name in self.names:
idx = self.names.index(name)
self.x[idx,idx] = val**2
visited[idx] = True
elif 'covariance_matrix' in line:
var = 1.0
while True:
line2 = f.readline().strip().lower()
if 'end' in line2:
break
if line2.startswith('file'):
cov = matrix()
cov.from_ascii(line2.split()[1])
drop = []
for rname in cov.row_names:
if rname not in self.names:
drop.append(rname)
cov.drop(drop)
elif line2.startswith('variance_multiplier'):
var = float(line2.split()[1])
else:
raise Exception('unrecognized keyword in std block: '+line2)
if var != 1.0:
#for idx in range(cov.x.shape[0]):
# cov.x[idx,idx] *= var
cov.x *= var
for i,rname in enumerate(cov.row_names):
i_idx = self.names.index(rname)
visited[i_idx] = True
for j,cname in enumerate(cov.col_names):
j_idx = self.names.index(cname)
self.x[i_idx,j_idx] = cov.x[i,j]
else:
raise Exception('unrecognized block:'+str(line))
f.close()
if False in visited:
for name,visit in zip(self.names,visited):
if not visit:
print 'entry not found for name:',name
raise Exception('error loading uncertainty file')
|
N = 1010
n, m = map(int, input().split())
f = [[0] * N for _ in range(N)]
a = ' ' + input()
b = ' ' + input()
for i in range(1, n + 1):
for j in range(1, m + 1):
f[i][j] = max(f[i-1][j], f[i][j-1])
if a[i] == b[j]: f[i][j] = max(f[i][j], f[i-1][j-1] + 1)
print(f[n][m])
|
import sys
from collections import deque
while True:
balanced = True
line = sys.stdin.readline().rstrip()
if line==".":
exit()
stack = deque()
i = 0
while line[i]!=".":
if ((line[i]=="(") | (line[i]=="[")):
stack.append(line[i])
elif ((line[i]==")") | (line[i]=="]")):
if len(stack)==0:
balanced = False
break
if line[i]==")":
if (stack.pop()!="("):
balanced = False
break
else:
if (stack.pop()!="["):
balanced = False
break
i+=1
if ((balanced) & (len(stack)==0)):
print("yes")
else:
print("no")
//if stack : 이 구문은 stack이 비었으면 false이다. |
class BaseBatchfile:
def __init__(self, filename):
self.filename = filename
self.__dict = {"filename": filename}
def set(self, key, value):
self.__dict[key] = value
def get(self, key, default = None):
if key in self.__dict:
return self.__dict[key]
return default
def get_state(self):
return self.__dict
def set_jobid(self, jobid):
self.set("jobid", jobid)
def get_jobid(self):
return get("jobid")
def get_filename(self):
return self.filename
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 23:23:39 2020
@author: Avdhesh Kumar
"""
import os
import time
import requests
import sys
def retrieve_html():
for year in range(2013, 2018):
for month in range(1,13):
if(month < 10):
url='http://en.tutiempo.net/climate/0{}-{}/ws-421820.html'.format(month
,year)
else:
url='http://en.tutiempo.net/climate/{}-{}/ws-421820.html'.format(month
,year)
texts = requests.get(url)
text_utf = texts.text.encode('utf=8')
if not os.path.exists("Data/Html_Data/{}".format(year)):
os.makedirs("Data/Html_Data/{}".format(year))
with open("Data/Html_Data/{}/{}.html".format(year,month),"wb") as output:
output.write(text_utf)
sys.stdout.flush()
if __name__=="__main__":
start_time = time.time()
retrieve_html()
stop_time = time.time()
print("time taken {}".format( stop_time- start_time))
|
from google.appengine.api import background_thread
from threadtest import TestThreadIF
class TestBackgroundThread(TestThreadIF, background_thread.BackgroundThread):
def __init__(self, name):
background_thread.BackgroundThread.__init__(self, name=name)
self.name = name
if __name__ == '__main__':
TestBackgroundThread.test()
|
number=int(input("Please enter the number: "))
for i in range(1,number+1):
print(i, "X",number,"=",i*number) |
def weareintrouble(a_smiles,b_smiles):
if a_smiles ==True and b_smiles== True:
return True
elif a_smiles == False and b_smiles==False:
return True
else:
return False
def int_sum(a,b):
if a==b:
return (2*(a+b))
else:
return a+b
def hours(h):
hours_arr= []
for i in range(h+1,24,1):
hours_arr.append(i)
return hours_arr
def posi(a,b,neg):
if neg:
return (a<0) and (b<0)
else:
return(a>0 and b<0) or (a<0 and b>0)
def front_back(s):
if len(s)==1:
return s
else:
mid=s[1:-1]
l=s[-1]
f=s[0]
return (l + mid +f)
def front3Letters(s,a):
if len(s)<=3:
return str(s)*a
else:
mid=s[0:3]
return str(mid)*a
def extra_end(s,a):
if len(s) >= 2:
new_s = s[-2:]*a
return new_s
def zero_list(g_list):
new_list=[]
g_list_len=len(g_list)
new_list_len=g_list_len*2
last_el=g_list[-1]
for newel in range(new_list_len):
new_list.append(0)
new_list[-1]= last_el
return new_list
def list_count_9(my_list):
count=0
for i in my_list:
if i ==9:
count=1 + count
else:
count=0 + count
return count
def list_count_9_nw(g_list):
return g_list.count(9)
f= lambda a: a*a
if __name__ == '__main__':
val = weareintrouble(True, False)
print(val)
val_3= int_sum(3,3)
print(val_3)
val_4=hours(5)
print(val_4)
val_5 = posi(-1,-5,True)
print(val_5)
Val_6=front_back('gowrison')
print(Val_6)
Val_7=front3Letters('gowrison',5)
print(Val_7)
val_8=extra_end('gowrison',5)
print(val_8)
print(zero_list([1,2,6]))
print(list_count_9([1,9,9]))
print(list_count_9_nw([2,9,9,9]))
f = lambda a: a * a
print(f(12))
h = lambda x,y: str(x * x + y)+'Old'
print(h(3,1))
|
import nltk
import time # For estimate time complexity
import math # For score system
import csv # For output
import os.path # For storing data, which take much time at birth CONVENIENCE
import pickle # For storing data, which take much time at birth CONVENIENCE
import random # For diverse phrase
from nltk.corpus import brown as corpus # Chose brown corpus to ensure formal uses of words
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
"""
It includes some development environment, code pieces for debugging or convenience
This is not released form.
I would left this to let the instructors know my intention well...!
"""
#### Debug functions
def print_def(word, token = ' '):
tab = ' '
print("Definitions of {}".format(word))
for synset in wn.synsets(word):
if token in synset.definition():
print(tab + "{} : {}".format(synset, synset.definition()))
def word_freq(word):
cnt = 0
for a, b in corpus.tagged_words():
if a == word:
cnt += 1
print("{} , {} times used".format(word, cnt))
return cnt
def valid_word(word):
pos_set = set()
for synset in wn.synsets(word):
pos_set.add(synset.pos())
if pos_set == {'n'}:
return False
if word.lower() in nltk.corpus.stopwords.words('english'):
return False
if word.isalpha() and word.islower():
return True
return False
def find_bigram(front, back):
for (a, b) in nltk.bigrams(corpus.tagged_words(tagset = 'universal')):
if a[0] == front and b[0] == back:
print (a, b)
def find_bigram_front(front):
for (a, b) in nltk.bigrams(corpus.tagged_words(tagset = 'universal')):
if a[0] == front:
print (a, b)
def find_bigram_back(back):
for (a, b) in nltk.bigrams(corpus.tagged_words(tagset = 'universal')):
if b[0] == back:
print (a, b)
#### Body functions
def word_def_contains(keyword):
'''
Extracts words that contains keyword in its definition (not noun def)
'''
result = []
words = [i for i in wn.all_synsets() if i.pos() != 'n' and keyword in i.definition()]
for i in words:
result += i.lemma_names()
return [i for i in list(set(result)) if i.isalpha()]
def filter_defs(adverbs):
'''
Filter the list of words containing 'degree' in their defs
1. Delete which has 'degrees' in its def rather than 'degree' since it has meaning of calibration
2. Only qualify which has 'to a/an ~ degree' or 'degree or extent' (vice versa)
return purified adverbs
'''
dels = []
for i in range(len(adverbs)):
for synset in wn.synsets(adverbs[i]):
if synset.pos() in ['a', 's', 'r', 'v'] and 'degree' in synset.definition(): # Among the raw chosen
if 'degrees' in synset.definition():
dels.append(adverbs[i])
elif not meet_condition(synset.definition()):
dels.append(adverbs[i])
return [adverb for adverb in adverbs if adverb not in dels]
def meet_condition(definition):
'''
return Bool whether it meets condition 2, in filter_defs()
Recall) 2. Only qualify which has 'to a/an ~ degree/extent (vice versa)
'''
defs = nltk.word_tokenize(definition)
for i in range(len(defs)-1):
if defs[i] == 'to' and defs[i+1] in ['a', 'an']:
if 'degree' in defs[i+1:] or 'extent' in defs[i+1:]:
return True
return False
def data_init(data, adverbs):
'''
Init dataset
We would use nested dictionary data structure for entire usage data
'''
for adverb in adverbs:
data[adverb] = {'freq': 0, 'used':{}}
def update_data(data, adverbs):
'''
Update dataset searching through the bigrams
Check which lexical items are used with the intensifiers, in a correct semantics
To enhance classification, lemmatize each lexical items
Update usage frequency for each phrases
'''
cnt = 0
corpus_tagged = corpus.tagged_words()
for (a, b) in nltk.bigrams(corpus_tagged):
if a[0] in adverbs and check_semantics(a[1], b[1]) and b[0].isalpha() and b[0].islower():
dict_temp = data[a[0]]
dict_temp['freq'] += 1
b = lemmatize(b)
if b[0] in dict_temp['used']:
dict_temp['used'][b[0]] += 1
else:
dict_temp['used'][b[0]] = 1
def lemmatize(b):
'''
Lemmatize lexical items
For NOUN : Change plural to singular, including some corner cases not handled with WordNetLemmatizer
For VERB : Change to original, no corner case
For ADJ : Change to original, no corner case
For ADV : Change to original, no corner case
'''
wnl = WordNetLemmatizer()
ADJ = ['JJR', 'JJS', 'JJT']
NOUN = ['NNS'] # Ignore 'NP' 'NPS' 'NR'
VERB = ['VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
ADV = ['RBR', 'RBT', 'RN', 'RP']
if b[1] in NOUN:
lemmatized = wnl.lemmatize(b[0], 'n')
if lemmatized != b[0]:
return lemmatized, 'NN'
else:
return lemmatize_corner_case(b)
elif b[1] in VERB:
lemmatized = wnl.lemmatize(b[0], 'v')
return lemmatized, 'VB'
elif b[1] in ADJ:
lemmatized = wnl.lemmatize(b[0], 'a')
return lemmatized, 'JJ'
elif b[1] in ADV:
lemmatized = wnl.lemmatize(b[0], 'r')
return lemmatized, 'RB'
else:
return b
def lemmatize_corner_case(b):
'''
Here are some corner cases that WordNetLemmatizer cannot change plural to singular
coeds, people, men, beasties, headquarters, clothes
If the plural can be singular, we would change both item and tag
If the plural itself has meaning, we would preserve it and just change the tag
'''
if b[0] == 'coeds':
return 'coed', 'NN'
elif b[0] == 'people':
return 'person', 'NN'
elif b[0] == 'men':
return 'man', 'NN'
elif b[0] == 'beasties':
return 'beastie', 'NN'
else:
return b[0], 'NN'
def check_semantics(a, b):
'''
Check the tags and get only valid semantics
'NP' 'NPS' are not used
'NR' 'RP' is not appropriate tags for lexical items
'''
ADJ = ['JJ', 'JJR', 'JJS', 'JJT']
NOUN = ['NN', 'NNS'] # Ignore 'NP' 'NPS' 'NR'
VERB = ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
ADV = ['RB', 'RBR', 'RBT', 'RN'] # Ignore 'RP'
return ((a in ADJ and b in NOUN)
or (a in ADV and b in VERB+ADJ+ADV)
or (a == 'QL' and b in VERB+ADJ+ADV) # qualifier
or (a == 'AP' and b in NOUN)) # post-determiner + NOUN
def get_commonity_score(adverb, data_dict):
'''
This scores out how common the intensifier is.
Criteria 1) How many definition it has
Criteria 2) Rank of definition as intensifier
Criteria 3) How many it is used with lexical item E
Calibration is
Cri3 * (Cri2)/Sum(1, ... , Cri1)
'''
first_cri = 0
second_cri = 0
third_cri = data_dict['freq']
for synset in wn.synsets(adverb):
if synset.pos() in ['a', 's', 'r', 'v']:
first_cri += 1
if 'intensifier' in synset.definition() or 'degree' in synset.definition():
second_cri = first_cri
entity = sum(range(1, first_cri+1))
part = float(second_cri)/float(entity)
return round(third_cri * part / 5, 3)
def find_common_score(common_score, word):
for i in common_score:
if i[0] == word:
return i[1]
def init_elements(elements, dataset):
'''
Initialize dictionary for lexical item E, elements
freq : How many it is used in corpus?
adjective : How many adjective defs?
used_common : Usage with common intensifiers
used_uncommon : Usage with uncommon intensifiers
'''
for i in dataset:
for word in dataset[i]['used']:
elements[word] = {'freq' : 0, 'adjective' : 0, 'used_common' : {}, 'used_uncommon' : {}}
def update_elements(elements, dataset, common_adverbs):
'''
Update elements
freq : See through corpus
adjective : See through synsets
used_common/uncommon : See through dataset
'''
# freq
for fileid in corpus.fileids():
for word in corpus.words(fileid):
if word in elements:
elements[word]['freq'] += 1
# adjective
for element in elements:
for synset in wn.synsets(element):
if synset.pos() in ['a', 's', 'r']:
elements[element]['adjective'] += 1
# used_common
for common in common_adverbs:
data_dict = dataset[common[0]]['used']
for element in data_dict:
num = data_dict[element]
elements[element]['used_common'][common[0]] = num
# used_uncommon
uncommon_adverbs = [word for word in dataset if word not in [common for common, score in common_adverbs]]
for uncommon in uncommon_adverbs:
data_dict = dataset[uncommon]['used']
for element in data_dict:
num = data_dict[element]
elements[element]['used_uncommon'][uncommon] = num
def accuracy_score(scoreboard, elements, common_score):
'''
Focused on how accurately the adverb is used to change intensity of following.
LOW SCORE, HIGH ACCURACY
1. Gather the entity of its usage, for lexical item E, sigma(commonity_of_intensifier * intensifier_freq)
For example, lexicla item E 'people', used 3 times with 'very' and 4 times with 'dead', freq is 12
no adjective definition
entity = commonity(very) * 3 + commonity(dead) * 4
for 'dead people', the accuracy score is
(commonity(dead) * 4 / entity) * freq / (adj+1)
adj+1 is for complementing the case adj == 0
adj gives an advantage for adjectives that it has potential to be used as adjective, which is scalable.
2. If it is used with common adverbs, give penalty of +10 score.
This is restriction score rather than accuracy score, but it was hard to change after first implementation.
In further step, restriction score would be directly added to accuracy score to generate total score.
'''
for e in elements:
freq = elements[e]['freq'] + 3 # Complementation for freq = 0 due to lemmatization
adj = elements[e]['adjective']
com = elements[e]['used_common']
uncom = elements[e]['used_uncommon']
entity = 0.0
for c in com:
entity += com[c] * find_common_score(common_score, c)
for u in uncom:
entity += uncom[u] * find_common_score(common_score, u)
if entity != 0:
for c in com:
score = freq * com[c] * find_common_score(common_score, c) / entity / (adj + 1)
score *= find_common_score(common_score, c) # Disadvantage for common adverbs
scoreboard.append((c, e, score))
for u in uncom:
score = freq * uncom[u] * find_common_score(common_score, u) / entity / (adj + 1)
scoreboard.append((u, e, score))
scoreboard.sort(key = lambda element: element[2])
def restrictive_score(scoreboard, accuracy_scoreboard, elements, dataset):
'''
1. Give penalty to general words for E
a. General words would be used more frquently with many sort of common intensifiers
b. However, difference in frequency and kinds can be cornered by
some words that do not use either common or uncommon.
c. Also, too sparse word can get small score. complement it by penalty.
2. Give penalty to words that used with only commons
a. It filters the phrase of common adverbs and sparse word.
3. Give advantage to the phrase used frequently
a. This means this phrase highly can be qualified to existing 'phrase'.
'''
cnt = 0
for e in elements:
cnt += 1
freq = elements[e]['freq']
common_freq = sum(elements[e]['used_common'].values())
uncommon_freq = sum(elements[e]['used_uncommon'].values())
common_kind = len(elements[e]['used_common'])
uncommon_kind = len(elements[e]['used_uncommon'])
# Words that can be scalable by commons, but also used restrictly by uncommons
diff_score = (abs(common_freq - uncommon_freq) + 1) * (abs(common_kind - uncommon_kind) + 1)
# Process it
# If it is frequent but got low score, enhance it
# If there is no usage with uncommons, deduct it
# If it is too less frequent, deduct it
if diff_score <= 10 and freq >= 100:
diff_score = diff_score / float(freq)
elif freq == 0 or uncommon_freq == 0:
diff_score += 10.0
elif freq <= 2:
diff_score += 5.0
for k in accuracy_scoreboard:
specific_freq = find_freq(k[0], k[1], dataset)
if k[1] == e:
scoreboard.append((k[0], k[1], (k[2]+diff_score)/specific_freq))
scoreboard.sort(key = lambda element: element[2])
def find_freq(D, E, dataset):
return dataset[D]['used'][E]
def main():
total_start_time = time.time()
debug = False
if debug:
return
visualize = False
# Step 1. Extract raw info
'''
Step 1-1) Extract intensifier
Method : Look up the all synsets in wordnet and picks synset that
word 'intensifier' or 'degree' is well included in definition.
And then append all lemmas of that synset.
'''
start_time = time.time()
print("Extracting intensifiers..")
intens = word_def_contains('intensifiers')
degrees = filter_defs(word_def_contains('degree'))
adverbs = list(set(intens + degrees))
print("Intensifiers extracted, RUNTIME : %.3f" % (time.time() - start_time))
print("Number of intensifiers : {}".format(len(adverbs)))
# For visualization
if visualize:
print(adverbs)
print()
'''
Step 1-2) See through bigrams and update adverbs information,
1. Frequency they are used as ADJ/ADV in corpus
2. Which lexical items are used with them in correct semantics(ADJ + NOUN, ADV + NOUN/ADV/VERB)
3. Frequencies they are used with each lexical items.
'''
start_time = time.time()
data = {}
print("Extracting Data..")
data_init(data, adverbs)
update_data(data, adverbs)
print("Data update completed, RUNTIME : %.3f" % (time.time() - start_time))
print()
'''
Step 1-3) Discard unused intensifiers
Investigate the data and delete unused intensifiers
'''
start_time = time.time()
print("Discarding unused intensifiers...")
bef = len(data)
dataset = {}
for i in data:
if data[i]['freq'] != 0:
dataset[i] = data[i]
print("Discarding Done, RUNTIME : %.3f" % (time.time()-start_time))
print("num of elements, from {} to {}".format(bef, len(dataset)))
if visualize:
for i in dataset:
print(i, '->', dataset[i])
print()
print()
# Step 2. Process bigrams
'''
Step 2-1) Extract common intensifiers
Based on three criteria
1. How many other ADJ/ADV definitions rather than sense of intensifiers?
2. How many times it is used as ADJ or ADV?
3. Rank of definition as intensifier
'''
start_time = time.time()
common_score = []
threshold = 1.0
for adverb in dataset:
score = get_commonity_score(adverb, dataset[adverb])
common_score.append((adverb, score))
common_score.sort(key = lambda element: element[1], reverse = True)
uncommons = [adverb for adverb in common_score if adverb[1] <= threshold]
commons = [adverb for adverb in common_score if adverb[1] > threshold]
print("Scoring for commonity completed, RUNTIME : %.3f" % (time.time()-start_time))
print("{} Common intensifiers".format(len(commons)))
if visualize:
print("Commons : {}".format(commons))
print("Uncommons : {}".format(uncommons))
print()
'''
Step 2-2) Build data for lexical items E
For item E, following data would be in
1. How many it is used in corpus?
2. How adjective it is? (How many adjective definitions?)
3. How many times it is used with common-intensifier?
4. How many sort of common-intensifiers are used with it?
5. How many times it is used with uncommon-intensifier?
'''
# Build dictionary for lexical items E
start_time = time.time()
elements = {}
init_elements(elements, dataset)
update_elements(elements, dataset, commons)
print("Element update completed, RUNTIME : %.3f" % (time.time()-start_time))
print("Size of Elements : {} entries".format(len(elements)))
# Visualization
if visualize:
for i in elements:
print(i, " -> ", elements[i])
print()
'''
Step 2-3) Restriction score for each pair
For each pairs of D and E, calculate restriction score
LOW SCORE, HIGH RESTRICTION
1st filter) Accuracy
We would determine whether E is used with D in the really intensity modifying sense.
+ give penalty to common adverbs
2nd filter) Restrictiveness
Determine how restrictively D and E used.
'''
start_time = time.time()
print("Restriction scoring ...")
accuracy_scoreboard = []
accuracy_score(accuracy_scoreboard, elements, common_score)
scoreboard = []
restrictive_score(scoreboard, accuracy_scoreboard, elements, dataset)
print("Restriction scoring complete, RUNTIME : %.3f" % (time.time()-start_time))
print()
if visualize:
cnt = 0
for i in scoreboard:
cnt += 1
print(cnt , i)
# Open the csv file, and make writer
print("Generating Output...")
f = open('CS372_HW2_output_20170490.csv', 'w', encoding='utf-8', newline='')
csvwriter = csv.writer(f)
for i in range(100):
row = scoreboard[i][:2]
csvwriter.writerow(row)
print("Output is generated")
# Write and Close the csv file
f.close()
print("Program terminated, RUNTIME : %.3f" % (time.time()-total_start_time))
main()
|
# Generated by Django 3.1.1 on 2020-09-29 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fichero_alumnos', '0010_auto_20200929_1115'),
]
operations = [
migrations.AlterField(
model_name='alumno',
name='deuda',
field=models.CharField(blank=True, default='', max_length=20, null=True),
),
]
|
import numpy as np
from easydict import EasyDict as edict
cfg = edict()
cfg.test_ratio=0.1
cfg.train_list = ["training.txt"]
cfg.test_list = "validation.txt"
cfg.data_dir='crop'
|
from urllib.parse import urlencode, quote_plus, unquote
from urllib.request import urlopen, Request
import dateutil.parser
import xmltodict
from rest_framework.response import Response
from rest_framework.views import APIView
from config.settings import config
from .models import Content
decode_key = unquote(config['API']['API_key'])
global_url = 'http://www.culture.go.kr/openapi/rest/publicperformancedisplays/'
# 상세 정보 저장 함수
def detail_get(seq):
url = global_url + 'd/'
queryParams = '?' + urlencode({quote_plus('ServiceKey'): decode_key,
quote_plus('seq'): seq})
url_query = Request(url + queryParams)
url_query.get_method = lambda: 'GET'
response_body = urlopen(url_query).read()
data = xmltodict.parse(response_body)
# 상세 정보 DB 저장
item_path = data['response']['msgBody']['perforInfo']
price = item_path['price']
content = item_path['contents1']
ticket_url = item_path['url']
phone = item_path['phone']
place_url = item_path['placeUrl']
place_addr = item_path['placeAddr']
place_seq = item_path['placeSeq']
Content.objects.filter(seq=seq).update(
ticket_url=ticket_url,
phone=phone,
price=price,
content=content,
place_url=place_url,
place_addr=place_addr,
place_seq=place_seq,
)
return Response(data)
# xml을 parser 후 db 저장
def xml_parser_db_save(request):
request.get_method = lambda: 'GET'
response_body = urlopen(request).read()
data = xmltodict.parse(response_body)
try:
item_path = data['response']['msgBody']['perforList']
for index, item in enumerate(item_path):
item_path_index = item_path[index]
seq = item_path_index['seq']
title = item_path_index['title']
place = item_path_index['place']
start_date = item_path_index['startDate']
start_date_parse = dateutil.parser.parse(start_date).date()
end_date = item_path_index['endDate']
end_date_parse = dateutil.parser.parse(end_date).date()
realm_name = item_path_index['realmName']
area = item_path_index['area']
thumbnail = item_path_index['thumbnail']
Content.objects.get_or_create(
seq=seq,
title=title,
place=place,
start_date=start_date_parse,
end_date=end_date_parse,
realm_name=realm_name,
area=area,
thumbnail=thumbnail,
)
detail_get(seq)
return data
except KeyError:
error_message = "유효하지 않은 파라미터 혹은 파라미터 값입니다"
return error_message
# 지역을 검색시 동작
class Area(APIView):
def get(self, request):
search = request.GET.get('search')
rows = request.GET.get('rows', default=10)
url = global_url + 'area'
queryParams = '?' + urlencode({quote_plus('ServiceKey'): decode_key,
quote_plus('sido'): search,
quote_plus('gugun'): '',
quote_plus('rows'): rows})
"""
sido = 지역 (서울, 대구, 등..)
gugun = 구/군 (Null)으로 하면 결과가 더 잘나옴.
"""
url_query = Request(url + queryParams)
data = xml_parser_db_save(url_query)
return Response(data)
# 분야별로 검색시 동작
class Genre(APIView):
def get(self, request):
code = request.GET.get('search')
print(code)
rows = request.GET.get('rows', default=10)
url = global_url + 'realm'
queryParams = '?' + urlencode({quote_plus('ServiceKey'): decode_key,
quote_plus('realmCode'): code,
quote_plus('rows'): rows})
"""
A = 연극
B = 음악 국악 콘서트
C = 무용
D = 미술
"""
url_query = Request(url + queryParams)
data = xml_parser_db_save(url_query)
return Response(data)
# 기간별 검색시 동작
# 검색 조건과 결과가 공공데이터에서 오는 값들도 불분명해서 일단 주석처리합니다
# class Period(APIView):
# def get(self, request):
# start = request.GET.get('start')
# end = request.GET.get('end')
# rows = request.GET.get('rows', default=10)
# url = global_url + 'period'
# queryParams = '?' + urlencode({quote_plus('ServiceKey'): decode_key,
# quote_plus('from'): start,
# quote_plus('to'): end,
# quote_plus('rows'): rows})
# """
# from = 공연 시작일
# to = 공연 종료일
# """
# url_query = Request(url + queryParams)
# data = xml_parser_db_save(url_query)
# return Response(data)
|
BASE_QUANTITY = 48
SUGAR_CUPS = 1.5
BUTTER_CUPS = 1
FLOUR_CUPS = 2.75
num_cookies = int(input("Enter the number of cookies you want to make: "))
sugar_needed = num_cookies * SUGAR_CUPS / BASE_QUANTITY
butter_needed = num_cookies * BUTTER_CUPS / BASE_QUANTITY
flour_needed = num_cookies * FLOUR_CUPS / BASE_QUANTITY
print("Cups of sugar needed:", format(sugar_needed, ".2f"))
print("Cups of butter needed:", format(butter_needed, ".2f"))
print("Cups of flour needed:", format(flour_needed, ".2f"))
|
import re
from .stringbuilder import StringBuilder
OPTION_REGEX = re.compile(' -[a-zA-Z]+')
FLAGS_REGEX = re.compile('-')
SPACE_REGEX = re.compile(' ')
EMPTY_REGEX = re.compile('')
NUMERIC_REGEX = re.compile('\d+(?:\.\d+)?')
#replace occurances of a substring from the back
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
def isNaN(value):
return not NUMERIC_REGEX.fullmatch(value)
#efficiently build a string of variable length when multiple members are found following a query
def buildMultiMatchString(command_prefix,command,mem,member):
strBuilder = StringBuilder(f'Found {len(mem)} possible matches for "{member}":```')
for index,memMatch in enumerate(mem):
strBuilder.append(f'\n{index+1}. {memMatch}')
strBuilder.append(f'```')
if len(mem)==5:
strBuilder.append(f'\n(number of matches shown is capped at 5, there may or may not be more)')
strBuilder.append(f'\nTry using the {command_prefix}{command} command again with a more specific search term!')
return strBuilder.to_string()
#split arguments into seperate (-x) flags where x can be any letter or sequence of letters
#each flag will be seperated even if given as a single string and returned as a list
#non-flag arguments are returned in a list in the order they appear, seperated by flag occurances
#returns values in the form: [[arg0,arg1,arg2,...],[flag0,flag1,...]]
#positional flags and non-positional flags should not be mixed in the same syntax; only accept one per command
def splitArgs(input):
if(not re.search(OPTION_REGEX, input)):
return [input]
spaceSplit = re.split(SPACE_REGEX, input)
optionSplitArgs = OPTION_REGEX.split(input)
optionSplitArgs = [arg.strip() for arg in optionSplitArgs]
rawFlags = list(filter(FLAGS_REGEX.match, spaceSplit))
joinedFlags = ''.join(rawFlags)
rawFlags = re.split(FLAGS_REGEX, joinedFlags)
joinedFlags = ''.join(rawFlags)
flags = re.split(EMPTY_REGEX, joinedFlags)
flags.pop() # remove tailing empty string
flags.pop(0) # remove leading empty string
flags = [flag.lower() for flag in flags]
return [optionSplitArgs, flags] |
from django.contrib import admin
from enrol.models import Enrol,Pay
# Register your models here.
class EnrolAdmin(admin.ModelAdmin):
list_display = ('student','course','enroldate')
class PayAdmin(admin.ModelAdmin):
list_display = ('pnumber','paymethod')
admin.site.register(Enrol, EnrolAdmin)
admin.site.register(Pay, PayAdmin)
|
"""
Copyright (c) 2017 Columbia University.
Network Security Lab, Columbia University, New York, NY, USA
This file is part of HVLearn Project, https://github.com/HVLearn/.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pythondfa import PythonDFA as DFA
from sys import argv
from itertools import product
def parse_alphabet(filename):
"""
Parse an alphabet file
"""
with open(filename, 'r') as f:
return [x.rstrip() for x in f.readlines()]
def save_learnlib_dfa(dfa, filename, alphabet):
"""
"""
num_states = len(dfa.states)
alphabet_size = len(alphabet)
is_final_array = []
for state in dfa:
is_final_array.append(str(int(state.final)))
with open(filename, 'w') as f:
f.write('{} {}\n'.format(num_states, alphabet_size))
f.write(' '.join(is_final_array) + "\n")
for state in dfa:
state_map = { dfa.isyms.find(arc.ilabel) : arc.nextstate for arc in state.arcs }
target_state = []
for symbol in alphabet:
target_state.append(str(state_map[symbol]))
f.write(' '.join(target_state) + "\n")
def load_learnlib_dfa(filename, alphabet):
"""
Create a python DFA from a learnlib dfa file format. The format is as
follows:
[num of states] [alphabet size]
[0/1 array denoting if each state is accepting/rejecting]
[transitions for each state]
"""
dfa = DFA(alphabet)
with open(filename, 'r') as f:
for counter, line in enumerate(f.readlines()):
s = line.rstrip().split()
# read file
if counter == 0:
num_states = int(s[0])
alphabet_size = int(s[1])
elif counter == 1:
is_final_array = [bool(int(b)) for b in s]
else:
cur_state_id = counter - 2
for i, dst in enumerate(s):
dfa.add_arc(cur_state_id, int(dst), alphabet[i])
dfa[cur_state_id].final = is_final_array[cur_state_id]
return dfa
def main(argc, argv):
if argc < 4:
print 'Usage: {} [alphabet file] [dfa_1] ... [dfa_n]'.format(argv[0])
return
dfa_list = []
alphabet = parse_alphabet(argv[1])
for filename in argv[2:]:
dfa_list.append(load_learnlib_dfa(filename, alphabet))
dfa_inter = dfa_list[0]
for dfa in dfa_list[1:]:
dfa_inter.intersect(dfa)
dfa.minimize()
save_learnlib_dfa(dfa, 'dfa_spec.txt', alphabet)
if __name__ == '__main__':
main(len(argv), argv)
|
# -*- coding: utf-8 -*-
"""
pyB64Pic
~~~~~~~~
Powered by AnClark
A simple toolkit to deal with image used on the Web among files, byte streams, and Base64 strings.
Also, it can support convert image into a DATA URL. Data URL is popular since HTML5 was born.
:copyright: (c) 2017 by AnClark Liu @ Huazhong University of Science and Technology
:license: MIT License
Copyright (c) 2017 AnClark Liu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import base64, os
from sys import argv
SCRIPT_ROOT = os.path.split(os.path.realpath(__file__))[0]
"""
=============== PRIVATE METHODS ===============
"""
def _load_stream(filename):
return open(filename, 'rb').read()
def _save_stream(stream, saveto):
return open(saveto, 'wb').write(stream)
"""
================ RELEASED FEATURES =================
"""
def img_file_to_b64str(filename, urlsafe=False):
"""
Open an image file, and convert it into Base64 string.
This can help you store your image in a simple text blob of your SQL databases.
:param filename: The file you want to convert
:param urlsafe: Trigger using URL-Safe format. You'd better set it true if you want to post these string to an HTML page.
:return: Converted string.
"""
# Open image file as byte
file_byte = open(filename, 'rb').read()
if urlsafe:
file_base64 = base64.urlsafe_b64encode(file_byte)
else:
file_base64 = base64.standard_b64encode(file_byte)
file_base64_string = file_base64.decode('utf-8')
return file_base64_string
def b64str_to_img_file(src, saveto, urlsafe=False):
"""
Re-generate image file from any Base64 string generated by img_to_base64_str(), and save it to disk.
:param src: The string you want to decode.
:param saveto: Specify the path of generated file to save to.
:param urlsafe: Trigger using URL-Safe format. It must be consistent with your choice when first generating this string.
:return: An integer of the size of the picture
"""
file_base64 = src.encode('utf-8')
if urlsafe:
file_byte = base64.urlsafe_b64decode(file_base64)
else:
file_byte = base64.standard_b64encode(file_base64)
OUT_FILE = open(saveto, 'wb')
return OUT_FILE.write(file_byte)
def img_stream_to_b64str(stream, urlsafe=False):
"""
Convert a byte stream of image file to Base64 encoded string.
:param stream: The byte stream of the file.
:param urlsafe: Trigger using URL-Safe format.
:return: Encoded Base64 string.
"""
if urlsafe:
stream_base64 = base64.urlsafe_b64encode(stream)
else:
stream_base64 = base64.standard_b64encode(stream)
stream_base64_string = stream_base64.decode('utf-8')
return stream_base64_string
def b64str_to_img_stream(src, urlsafe=False):
"""
Decode a Base64 string of image to the image file's byte stream.
:param src: The Base64 string you want to decode.
:param urlsafe: Trigger using URL-Safe format. Must be consistent with what you generate with.
:return: Decoded byte stream of image.
"""
stream_base64 = src.encode('utf-8')
if urlsafe:
stream = base64.urlsafe_b64decode(stream_base64)
else:
stream = base64.standard_b64decode(stream_base64)
return stream
def getWebImgType_file(filename):
"""
Get image file type of an image stored in disk. Can be used to verify if an image is available for web.
:param filename: The file you want to verify.
:return: Image file type. If image is invalid, returns False.
"""
data = open(filename,'rb').read(11)
# JPG
if data[:4] == b'\xff\xd8\xff\xe0' and data[6:11] == b'JFIF\x00':
return 'JPEG'
# PNG
if data[:6] == b'\x89PNG\r\n':
return 'PNG'
# GIF
if data[:3] == b'\x47\x49\x46\x38':
return 'GIF'
# Format not recognised
return False
def getWebImgType_stream(stream):
"""
Get image file type of an image stream. Can be used to verify if an image is available for web.
:param stream: The file you want to verify.
:return: Image file type. If image is invalid, returns False.
"""
data = stream[:11]
# JPG
if data[:4] == b'\xff\xd8\xff\xe0' and data[6:11] == b'JFIF\x00':
return 'jpeg'
# PNG
if data[:6] == b'\x89PNG\r\n':
return 'png'
# GIF
if data[:3] == b'\x47\x49\x46\x38':
return 'gif'
# Format not recognised
return False
def img_stream_to_b64_dataurl(stream):
"""
Convert a byte stream of an image file to DATA URL, which can be directly used in HTML and CSS.
This is most recommended in my module.
:param stream: The image file's byte stream.
:return: A fresh standard Data URL.
"""
# Check image file type
filetype = getWebImgType_stream(stream)
if not filetype:
return False
# Start convert
b64str = img_stream_to_b64str(stream, urlsafe=False)
b64url = "data:image/%s;base64,%s" % (filetype, b64str)
return b64url
if __name__ == "__main__":
ss = _load_stream(argv[1])
u = img_stream_to_b64_dataurl(ss)
print(u)
|
from django.db import models
from django.contrib import admin
from apps.tenants.models import *
from apps.users.models import User
class Anken_Karte_DispConfig(TenantBaseModel):
"""
案件カルテ表示設定のテーブル
[更新説明]
・案件カルテ画面で、各カードの位置情報を変更した際に更新する。
・user_idでDeleteした後、Insertを行う。
"""
class Meta:
db_table = 'ds_anken_karte_dispconfig'
verbose_name_plural = '案件カルテ表示設定(ds_anken_karte_dispconfig)'
unique_together = ('tenant', 'user', 'dispno')
# フィールド定義(制御用)
id = models.AutoField(verbose_name='ID', primary_key=True)
created_at = models.DateTimeField(verbose_name='作成日時', auto_now_add=True)
updated_at = models.DateTimeField(verbose_name='更新日時', auto_now=True)
lock_id = models.IntegerField(verbose_name='LOCK ID', default=0)
# フィールド定義(データ格納用)
user = models.ForeignKey(User, verbose_name='ユーザー管理ID', null=True, on_delete=models.CASCADE)
user_name = models.CharField(verbose_name='ユーザー名', max_length=100, blank=True, null=True)
dispno = models.SmallIntegerField(verbose_name='表示No')
item_id = models.IntegerField(verbose_name='項目ID')
item_name = models.CharField(verbose_name='項目名称', max_length=200, blank=True, null=True)
def __str__(self):
return 'ID:{0},USER_NAME:{1},DISPNO:{2},ITEM_ID:{3},ITEM_NAME:{3}' \
.format(self.id, self.user_name, self.dispno, self.item_id, self.item_name)
# 管理者用画面での編集内容
class Anken_Karte_DispConfig_Admin(admin.ModelAdmin):
list_display = ('id', 'tenant_id', 'user_name', 'dispno', 'item_id', 'item_name', 'updated_at')
ordering = ('tenant_id', 'id')
readonly_fields = ('id', 'created_at', 'updated_at')
list_filter = ('tenant_id',)
search_fields = ('user_name', 'item_name')
# 管理者画面に表示
admin.site.register(Anken_Karte_DispConfig, Anken_Karte_DispConfig_Admin)
|
from src.cryptography import encrypt, decrypt, address, messages
from src.check import operations, connect_reply, command_reply, download_reply, encryption_reply, OK
from hashlib import sha256
import time
import sqlite3 as sql
import ConfigParser
import getpass
import socket
import os
import requests
import os.path
def tree_ssh(sender,receiver,timestamp,additional1,additional2,additional3,data,tx_hash,signature):
try:
con = sql.connect("info.db")
con.row_factory = sql.Row
con.text_factory = str
cur = con.cursor()
except:
pass
try:
if additional1 == "CONNECT":
if additional2 != "None":
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["EncryptionKey"]
else:
return
data = decrypt.decryptWithRSAKey(EncryptionKey,data)
if data == False:
return
if data != "None":
return
try:
config = ConfigParser.RawConfigParser()
config.read("treesshc")
allowed_users_setting = config.get('Configuration', 'AllowedUsers')
allowed_users = allowed_users_setting.split(",")
except:
allowed_users = []
if sender not in allowed_users:
return
cur.execute('SELECT * FROM connected_to_us')
result = cur.fetchall()
if len(result) == 0:
time_connected = str(int(time.time()))
cur.execute('INSERT INTO connected_to_us (user_connected,time_connected) VALUES (?,?)', (sender,time_connected))
con.commit()
user = getpass.getuser()
hostname = socket.gethostname()
command_line = user + "@" + hostname + ":~$"
connect_reply.send_reply(sender,command_line)
return True
else:
OK.send_OK(sender)
elif additional1 == "CONNECT-REPLY":
cur.execute('SELECT * FROM now_connected WHERE connected_to=?', (sender,))
result = cur.fetchall()
if len(result) == 0:
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["EncryptionKey"]
else:
return
data = decrypt.decryptWithRSAKey(EncryptionKey,data)
if data == False:
return
cur.execute('UPDATE now_connected SET command_line=? WHERE connected_to=?', (data,sender))
con.commit()
elif additional1 == "DISCONNECT":
if additional2 != "None":
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["EncryptionKey"]
else:
return
data = decrypt.decryptWithRSAKey(EncryptionKey,data)
if data == False:
return
if data != "None":
return
cur.execute('SELECT * FROM connected_to_us WHERE user_connected=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
cur.execute('DELETE FROM connected_to_us')
con.commit()
return True
cur.execute('SELECT * FROM now_connected WHERE connected_to=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
cur.execute('DELETE FROM now_connected')
con.commit()
return True
elif additional1 == "COMMAND":
if len(additional2) != 64:
return
cur.execute('SELECT * FROM connected_to_us WHERE user_connected=?', (sender,))
result = cur.fetchall()
if len(result) == 0:
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["EncryptionKey"]
else:
return
data = decrypt.decryptWithRSAKey(EncryptionKey,data)
if data == False:
return
while data[0] == " ":
data = data[1:]
return_data = requests.get("http://127.0.0.1:10001/active_directory/"+sender)
path = return_data.content
return_data = requests.get("http://127.0.0.1:10001/active_directory")
starting_folder = return_data.content
try:
if data == "cd":
current_path = "/"
while path != current_path:
current_path = path
path = os.path.abspath(os.path.join(path, os.pardir))
requests.post("http://127.0.0.1:10001/active_directory/"+sender+"/change", data=path)
result = "Directory changed."
elif data == "cd .." or data == "cd..":
path = os.path.abspath(os.path.join(path, os.pardir))
requests.post("http://127.0.0.1:10001/active_directory/"+sender+"/change", data=path)
result = "Directory changed."
elif "cd" in data:
details = data.split(" ")
directory = details[1]
if os.path.isdir(os.path.join(path,"",directory)) == True:
path = os.path.join(path,"",directory)
requests.post("http://127.0.0.1:10001/active_directory/"+sender+"/change", data=path)
result = "Directory changed."
else:
result = "Directory doesn't exist."
elif data == "ls" or data == "dir":
if path != starting_folder:
result = os.popen(data + " " + path).read()
else:
result = os.popen(data).read()
result = result[0:-1]
else:
result = os.popen(data).read()
result = result[0:-1]
except:
result = "Command not found."
command_reply.send_reply(sender,additional2,result)
return True
elif additional1 == "COMMAND-REPLY":
if len(additional2) != 64:
return
cur.execute('SELECT * FROM commands WHERE sender=? AND unique_id=? AND response=?', (sender,additional2,"None"))
result = cur.fetchall()
if len(result) == 0:
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["EncryptionKey"]
else:
return
data = decrypt.decryptWithRSAKey(EncryptionKey,data)
if data == False:
return
cur.execute('UPDATE commands SET response=? WHERE sender=? AND unique_id=?', (data,sender,additional2))
con.commit()
OK.send_OK(sender)
return True
elif additional1 == "UPLOAD":
cur.execute('SELECT * FROM connected_to_us WHERE user_connected=?', (sender,))
result = cur.fetchall()
if len(result) == 0:
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["EncryptionKey"]
else:
return
try:
details = additional2.split("|")
filename = details[0]
original_filename = filename
filename_details = details[1]
except:
return
filename = decrypt.decryptWithRSAKey(EncryptionKey,filename)
original_filename = filename
if filename == False:
return
filename_details = decrypt.decryptWithRSAKey(EncryptionKey,filename_details)
if filename_details == False:
return
try:
filename_details_details = filename_details.split("/")
current_part = filename_details_details[0]
total_parts = filename_details_details[1]
except:
return
try:
current_part = str(int(current_part))
total_parts = str(int(total_parts))
except:
return
if int(current_part) <= 0 or int(current_part) > int(total_parts) or int(total_parts) <= 0:
return
data = decrypt.decryptWithRSAKey(EncryptionKey,data)
if data == False:
return
return_data = requests.get("http://127.0.0.1:10001/active_directory/"+sender)
path = return_data.content
if os.path.isfile(os.path.join(path,"",filename + "_" + current_part)) == True:
os.remove(os.path.join(path,"",filename + "_" + current_part))
with open(os.path.join(path,"",filename + "_" + current_part), "wb") as dest:
dest.write(data)
completed_file = True
for i in range(1,int(total_parts)+1):
if os.path.isfile(os.path.join(path,"",filename + "_" + str(i))) == False:
completed_file = False
break
if completed_file == True:
file_already_exists = False
if os.path.isfile(os.path.join(path,"",filename)) == True:
file_already_exists = True
if file_already_exists == True:
starting = 1
found = False
while found == False:
os.path.join(path,"","(" + str(starting) + ")" + filename)
if os.path.isfile("(" + str(starting) + ")" + filename) == False:
filename = "(" + str(starting) + ")" + filename
found = True
starting += 1
final_file = open(os.path.join(path,"",filename), "wb")
for i in range(1,int(total_parts)+1):
with open(os.path.join(path,"",original_filename + "_" + str(i)), 'r') as current_file:
content = current_file.read()
final_file.write(content)
os.remove(os.path.join(path,"",original_filename + "_" + str(i)))
final_file.close()
OK.send_OK(sender)
elif additional1 == "DOWNLOAD":
if len(additional2) != 64:
return
cur.execute('SELECT * FROM connected_to_us WHERE user_connected=?', (sender,))
result = cur.fetchall()
if len(result) == 0:
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["EncryptionKey"]
else:
return
filename = decrypt.decryptWithRSAKey(EncryptionKey,data)
if filename == False:
return
download_reply.send_file(sender,additional2,filename)
elif additional1 == "DOWNLOAD-REPLY":
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["EncryptionKey"]
else:
return
additional2_details = additional2.split("|")
unique_id = additional2_details[0]
cur.execute('SELECT * FROM downloads WHERE sender=? AND unique_id=?', (sender,unique_id))
result = cur.fetchall()
if len(result) == 0:
return
filename = additional2_details[1]
filename_details = additional2_details[2]
filename = decrypt.decryptWithRSAKey(EncryptionKey,filename)
if filename == False:
return
original_filename = filename
cur.execute('SELECT * FROM downloads WHERE sender=? AND unique_id=? AND filename=?', (sender,unique_id,filename))
result = cur.fetchall()
if len(result) == 0:
return
filename_details = decrypt.decryptWithRSAKey(EncryptionKey,filename_details)
if filename_details == False:
return
try:
filename_details_details = filename_details.split("/")
current_part = filename_details_details[0]
total_parts = filename_details_details[1]
except:
return
try:
current_part = str(int(current_part))
total_parts = str(int(total_parts))
except:
return
if int(current_part) <= 0 or int(current_part) > int(total_parts) or int(total_parts) <= 0:
return
data = decrypt.decryptWithRSAKey(EncryptionKey,data)
if data == False:
return
if os.path.isfile(os.path.join("downloads","",filename + "_" + current_part)) == True:
os.remove(os.path.join("downloads","",filename + "_" + current_part))
with open(os.path.join("downloads","",filename + "_" + current_part), "wb") as dest:
dest.write(data)
completed_file = True
for i in range(1,int(total_parts)+1):
if os.path.isfile(os.path.join("downloads","",filename + "_" + str(i))) == False:
completed_file = False
break
if completed_file == True:
file_already_exists = False
if os.path.isfile(os.path.join("downloads","",filename)) == True:
file_already_exists = True
if file_already_exists == True:
starting = 1
found = False
while found == False:
if os.path.isfile(os.path.join("downloads","","(" + str(starting) + ")" + filename)) == False:
filename = "(" + str(starting) + ")" + filename
found = True
starting += 1
final_file = open(os.path.join("downloads","",filename), "wb")
for i in range(1,int(total_parts)+1):
with open(os.path.join("downloads","",filename + "_" + str(i)), 'r') as current_file:
content = current_file.read()
final_file.write(content)
os.remove(os.path.join("downloads","",filename + "_" + str(i)))
final_file.close()
cur.execute('DELETE FROM downloads WHERE sender=? AND unique_id=? AND filename=?', (sender,unique_id,filename))
con.commit()
OK.send_OK(sender)
elif additional1 == "ENCRYPT":
try:
config = ConfigParser.RawConfigParser()
config.read("treesshc")
allowed_users_setting = config.get('Configuration', 'AllowedUsers')
allowed_users = allowed_users_setting.split(",")
except:
allowed_users = []
if sender not in allowed_users:
return
if additional2 != "None":
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 0:
EncryptionKey = decrypt.decryptfromPubKey(data)
if EncryptionKey == False:
return
try:
testEncryptionKey = EncryptionKey.decode("hex")
except:
return
result = encrypt.encryptWithRSAKey(EncryptionKey,"test")
if result == False:
return
test_result = decrypt.decryptWithRSAKey(EncryptionKey,result)
if test_result == False:
return
if test_result != "test":
return
time_created = str(int(time.time()))
cur.execute('INSERT INTO users (identifier,EncryptionKey,NewEncryptionKey,time_generated,encryption) VALUES (?,?,?,?,?)', (sender,EncryptionKey,EncryptionKey,time_created,"INCOMING"))
con.commit()
result = encryption_reply.send_reply(sender,EncryptionKey)
if result == True:
return True
elif len(result) == 1:
time_generated = result[0]["time_generated"]
encryption_type = result[0]["encryption"]
if encryption_type == "INCOMING":
if time.time() - float(time_generated) > 600:
EncryptionKey = decrypt.decryptfromPubKey(data)
if EncryptionKey == False:
return
try:
testEncryptionKey = EncryptionKey.decode("hex")
except:
return
Result = encrypt.encryptWithRSAKey(EncryptionKey,"test")
if Result == False:
return
test_result = decrypt.decryptWithRSAKey(EncryptionKey,Result)
if test_result == False:
return
if test_result != "test":
return
oldEncryptionKey = result[0]["EncryptionKey"]
time_created = str(int(time.time()))
cur.execute('UPDATE users SET EncryptionKey=?,NewEncryptionKey=?,time_generated=? WHERE identifier=?', (EncryptionKey,oldEncryptionKey,time_created,sender))
con.commit()
result = encryption_reply.send_reply(sender,EncryptionKey)
if result == True:
return True
else:
return
elif additional1 == "ENCRYPT-REPLY":
if additional2 != "None":
return
cur.execute('SELECT * FROM users WHERE identifier=?', (sender,))
result = cur.fetchall()
if len(result) == 1:
EncryptionKey = result[0]["NewEncryptionKey"]
encryption = result[0]["encryption"]
else:
return
if encryption != "OUTGOING":
return
data = decrypt.decryptWithRSAKey(EncryptionKey,data)
if data == False:
return
if data == EncryptionKey:
cur.execute('UPDATE users SET EncryptionKey=? WHERE identifier=?', (data,sender))
con.commit()
OK.send_OK(sender)
elif additional1 == "OK":
requests.get("http://127.0.0.1:10001/received/"+sender+"/OK")
return True
else:
return
except:
return
finally:
try:
con.close()
except:
pass
def constructor(payload):
details = payload.split(",")
operation = details[0]
sender = details[1]
receiver = details[2]
timestamp = details[3]
additional1 = details[4]
additional2 = details[5]
additional3 = details[6]
data = details[7]
tx_hash = details[8]
signature = details[9]
result = tree_ssh(sender,receiver,timestamp,additional1,additional2,additional3,data,tx_hash,signature)
return result
|
import os
import json
from click.testing import CliRunner
from flask_jsondash import model_factories
from flask_jsondash.settings import CHARTS_CONFIG
from conftest import read
_db = model_factories.adapter
def test_get_random_group():
conf_vals = CHARTS_CONFIG.values()
data = model_factories.get_random_group()
assert isinstance(data, dict)
assert 'charts' in data
assert data in conf_vals
def test_get_random_chart():
chart = model_factories.get_random_group()
data = model_factories.get_random_chart(chart)
assert isinstance(data, tuple)
def test_make_fake_dashboard():
fdash = model_factories.make_fake_dashboard(name='Foo', max_charts=4)
assert isinstance(fdash, dict)
assert fdash.get('name') == 'Foo'
def test_make_fake_chart_data():
chartdata = model_factories.make_fake_chart_data(name='Foo')
chartconfig = json.loads(chartdata[1])
assert isinstance(chartdata, tuple)
assert isinstance(chartconfig, dict)
assert chartconfig.get('name') == 'Foo'
def test_insert_dashboards(monkeypatch):
records = []
runner = CliRunner()
args = ['--max-charts', 5, '--records', 5]
monkeypatch.setattr(_db, 'create', lambda *a, **kw: records.append(a))
result = runner.invoke(model_factories.insert_dashboards, args)
assert result.exit_code == 0
assert len(records) == 5
def test_delete_all(monkeypatch):
monkeypatch.setattr(_db, 'delete_all', lambda *a, **kw: [])
assert model_factories.delete_all() is None
def test_load_fixtures(monkeypatch):
records = []
runner = CliRunner()
args = ['--fixtures', 'example_app/examples/config']
monkeypatch.setattr(_db, 'create', lambda *a, **kw: records.append(a))
result = runner.invoke(model_factories.insert_dashboards, args)
assert result.exit_code == 0
assert len(records) == 19 # Changed as new examples are added.
def test_dump_fixtures_empty(monkeypatch, tmpdir):
records = []
monkeypatch.setattr(_db, 'read', lambda *args, **kwargs: records)
runner = CliRunner()
tmp = tmpdir.mkdir('dumped_fixtures_test')
args = ['--dump', tmp.strpath]
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Nothing to dump.' in result.output
assert result.exit_code == 0
assert len(os.listdir(tmp.strpath)) == len(records)
def test_dump_fixtures(monkeypatch, tmpdir):
records = [
model_factories.make_fake_dashboard(name=i, max_charts=1)
for i in range(10)]
# Also ensure _id is popped off.
for r in records:
r.update(_id='foo')
monkeypatch.setattr(_db, 'read', lambda *args, **kwargs: records)
runner = CliRunner()
tmp = tmpdir.mkdir('dumped_fixtures_test')
args = ['--dump', tmp.strpath]
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Saving db as fixtures to:' in result.output
assert result.exit_code == 0
assert len(os.listdir(tmp.strpath)) == len(records)
def test_dump_fixtures_delete(monkeypatch, tmpdir):
records = [
model_factories.make_fake_dashboard(name=i, max_charts=1)
for i in range(10)]
def delete_all():
global records
records = []
monkeypatch.setattr(_db, 'read', lambda *args, **kwargs: records)
monkeypatch.setattr(_db, 'delete_all', lambda *a, **kw: [])
runner = CliRunner()
tmp = tmpdir.mkdir('dumped_fixtures_test')
args = ['--dump', tmp.strpath, '--delete']
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Saving db as fixtures to:' in result.output
assert result.exit_code == 0
assert len(os.listdir(tmp.strpath)) == 10
assert len(read()) == 0
def test_dump_fixtures_delete_bad_path_show_errors_no_exception(monkeypatch):
records = [
model_factories.make_fake_dashboard(name=i, max_charts=1)
for i in range(1)]
def delete_all():
global records
records = []
monkeypatch.setattr(_db, 'read', lambda *args, **kwargs: records)
monkeypatch.setattr(_db, 'delete_all', lambda *a, **kw: [])
runner = CliRunner()
args = ['--dump', '/fakepath/', '--delete']
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Saving db as fixtures to:' in result.output
assert result.exit_code == 0
assert len(read()) == 0
err_msg = "The following records could not be dumped: ['//fakepath/"
assert err_msg in result.output
def test_delete_all_cli(monkeypatch):
runner = CliRunner()
args = ['--delete']
monkeypatch.setattr(_db, 'delete_all', lambda *a, **kw: [])
assert model_factories.delete_all() is None
result = runner.invoke(model_factories.insert_dashboards, args)
assert 'Deleting all records!' in result.output
assert result.exit_code == 0
|
import string
import random
menu = ("1: Print List\n"
"2: Add To Head\n"
"3: Add To Tail\n"
"4: Remove From Head\n"
"5: Remove From Tail\n"
"6: Find Index of Value\n"
"7: Remove Node of Value\n"
"0: Exit\n")
def switch_menu(argument, listHead):
switcher = {
0: "break",
1: listHead.addToHead(random.randint(0,10000)),
2: print("test")
}
return switcher.get(argument, "nothing")
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def addToHead(self, value=None):
newNode = Node(value, self.head)
if self.head is not None:
self.head.prev = newNode
else:
self.tail = newNode
self.head = newNode
print(f"New node of value {value} added to the head of the list\n")
def addToTail(self, value=None):
newNode = Node(value, None, self.tail)
if self.tail is not None:
self.tail.next = newNode
else:
self.head = newNode
self.tail = newNode
print(f"New node of value {value} added to the tail of the list\n")
def removeHead(self):
if self.head is None:
return None
head = self.head
self.head = head.next
if self.head is not None:
self.head.prev = None
else:
self.tail = None
return head.value
def removeTail(self):
if self.tail is None:
return None
tail = self.tail
self.tail = tail.prev
if self.tail is not None:
self.tail.next = None
else:
self.head = None
return tail.value
def searchValue(self, value=None):
if self.head is not None:
node = self.head
else:
return None
while node is not None:
if node.value == value:
return node
node = node.next
return None
def removeNode(self, node=None):
if node is not None:
if node is self.head:
return self.removeHead
elif node is self.tail:
return self.removeTail
else:
node.prev.next = node.next
node.next.prev = node.prev
return node.value
return None
def indexOf(self, value=None):
if self.head is not None:
node = self.head
else:
return None
index = 0
while node is not None:
if node.value == value:
return index
node = node.next
index += 1
return None
def printList(self):
node = self.head
while node is not None:
print(node.value)
node = node.next
class Node:
def __init__(self, value=None, next=None, prev=None):
self.value = value
self.next = next
self.prev = prev
def start():
myList = LinkedList()
while True:
selection = input(f"\n{menu}\n\nPlease select an option: ")
if selection == "0":
break
elif selection == "1":
myList.printList()
elif selection == "2":
myList.addToHead(random.randint(0,10000))
elif selection == "3":
myList.addToTail(random.randint(0,10000))
elif selection == "4":
myList.removeHead()
elif selection == "5":
myList.removeTail()
elif selection == "6":
value = int(input("Please enter the value you would like to find an index for: "))
index = myList.indexOf(value)
if index is not None:
print(f'The index of {value} is {index}')
else:
print(f"The value {value} does not exist in the list")
elif selection == "7":
value = int(input("Please enter the value you would like to remove from the list: "))
myList.removeNode(myList.searchValue(value))
start() |
# -*- coding:utf-8 -*-
# 如何读物excel文件
# 使用pip安装。pip install xlrd xlwt
# 使用第三方库xlrd和xlwt,这两个库分别用于excel的读和写
import xlrd
book = xlrd.open_workbook('demo.xlsx')
book.sheets()
sheet = book.sheet_by_index(0) # 第一个sheet
print sheet.nrows # 行数
print sheet.ncols # 列数
cell = sheet.cell(0, 0)
print cell.ctype #类型
print cell.value #值
print sheet.row() #获取某一行
print sheet.row(1) #返回是一个列表
print sheet.row_values(1) #类似切片操作
print sheet.row_values(1,1) #类似切片操作,跳过第一个
print sheet.col_values()
print sheet.col()
#添加一个单元格
# sheet.put_cell(rowx, colx, ctype, value, xf_index)
|
#
# Copyright (c) 2010-2017 Fabric Software Inc. All rights reserved.
#
class Visibility(object):
public = 0
protected = 1
private = 2
|
import tkinter as tk
import mysql.connector
#Connects to the database
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "sD6G7Bx@f8cve$i3",
database = "forum"
)
#Allows editing of the database
mycursor = mydb.cursor()
#Displays the change password page UI
def displayUI(window, username):
window.refresh() #Clears the Window
window.displayNavbar(username)
#Adds the frame for the main section of the frame
mainPage = tk.Frame(window.frame)
mainPage.place(relwidth=1, relheight=0.925, relx=0, rely=0.075)
#Input fields and labels
changeLabel = tk.Label(mainPage, text="Change Password")
changeLabel.place(relwidth=0.9, relheight=0.04, relx=0.05, rely=0.05)
oldPasswordLabel = tk.Label(mainPage, text="Old Password:", anchor="nw", justify="left")
oldPasswordLabel.place(relwidth=0.3, relheight=0.04, relx=0.05, rely=0.15)
oldPassword = tk.Entry(mainPage)
oldPassword.place(relwidth=0.9, relheight=0.05, relx=0.05, rely=0.2)
newPassword1Label = tk.Label(mainPage, text="New Password:", anchor="nw", justify="left")
newPassword1Label.place(relwidth=0.3, relheight=0.04, relx=0.05, rely=0.35)
newPassword1 = tk.Entry(mainPage)
newPassword1.place(relwidth=0.9, relheight=0.05, relx=0.05, rely=0.4)
newPassword2Label = tk.Label(mainPage, text="Repeat New Password:", anchor="nw", justify="left")
newPassword2Label.place(relwidth=0.3, relheight=0.04, relx=0.05, rely=0.55)
newPassword2 = tk.Entry(mainPage)
newPassword2.place(relwidth=0.9, relheight=0.05, relx=0.05, rely=0.6)
#Adds the confirm button
#Lambda means that the function is run when the button is clicked not when the button is made
confirmBtn = tk.Button(mainPage,
text="Confirm",
command=lambda: changePassword(username, oldPassword.get(), newPassword1.get(), newPassword2.get(), window)
)
confirmBtn.place(relwidth=0.3, relheight=0.1, relx=0.35, rely=0.75)
def changePassword(username, p1, p2, p3, window):
# Here p1 refers to the current password of the user
# p2 and p3 refers to the new password
#Ensures all the fields have been completed
if p1 == "" or p2 == "" or p3 == "":
print("Complete all fields")
else:
#Fetches the password for the specified user
mycursor.execute("SELECT password FROM users WHERE username = %s", (username, ))
#Stores the password from the database
dbPassword = mycursor.fetchall()[0][0]
#Ensures it is the correct user that is changing the password
if dbPassword != p1:
print("Incorrect password")
else:
#Ensures the user does not make a typo while entering the new password
if p2 != p3:
print("Passwords do not match")
else:
#Updates the users password
mycursor.execute("UPDATE users SET password = %s WHERE username = %s",
(p2, username))
mydb.commit()
print("Success")
#Refreshes the window
displayUI(window, username)
#
|
from django import forms
from . models import Customer
from django.forms.widgets import PasswordInput
class SignupForm(forms.Form):
user_name = forms.CharField()
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
class SignupModelForm(forms.ModelForm):
class Meta:
model = Customer
fields = '__all__'
widgets = {'password':PasswordInput}
|
#문제: 두 정수 A와 B를 입력받은 다음, A+B를 출력하는 프로그램을 작성하시오.
#입력: 입력은 여러 개의 테스트 케이스로 이루어져 있다.각 테스트 케이스는 한 줄로 이루어져 있으며, 각 줄에 A와 B가 주어진다. (0 < A, B < 10)
#출력:각 테스트 케이스마다 A+B를 출력한다.
A,B=1,1
while A>0 and B<10:
try:
A,B=map(int,input().split())
print(A+B)
except:
break
#try는 밑에거를 바로 실행하고 뒤에 예외가 발생했을경우 except 실행 그렇기 때문에 무한루프 도는곳에서 오류가 나는것을 방지
|
# vague
import time
from requests_html import HTMLSession
session = HTMLSession()
r = session.get('http://monstersvault.com/')
rand_page=list(r.html.find('#random-article')[0].links)[0]
print(rand_page)
r = session.get(rand_page)
# TODO Find a way to check if the element exists without using try/except and use it for each individual field
while True:
print('==================================================================')
print(rand_page)
# some pages are inconsistent
page_title = 'n/a'
name = 'n/a'
author = 'n/a'
origin = 'n/a'
#rating_design = 'n/a'
#rating_originality = 'n/a'
#rating_survivability = 'n/a'
#rating_fearfactor = 'n/a'
#rating_vault = 'n/a'
#rating_prefer_nightmares = 'n/a'
#rating_burn_it = 'n/a'
#rating_keep_one = 'n/a'
#rating_love = 'n/a'
#rating_what = 'n/a'
rating_design = r.html.find('.number')[0].text
rating_originality = r.html.find('.number')[1].text
rating_survivability = r.html.find('.number')[2].text
rating_fearfactor = r.html.find('.number')[3].text
rating_vault = r.html.find('.number')[4].text
rating_prefer_nightmares = r.html.find('.number')[5].text
rating_burn_it = r.html.find('.number')[6].text
rating_keep_one = r.html.find('.number')[7].text
rating_love = r.html.find('.number')[8].text
rating_what = r.html.find('.number')[9].text
try:
name=r.html.find('.detail-item')[0].text
author=r.html.find('.detail-item')[1].text
origin=r.html.find('.detail-item')[2].text
page_title = r.html.find('.main-title')[0].text
except:
print(r.html.find('.detail-item'))
pass
print(page_title)
print(name)
print(author)
print(origin)
print(rating_design)
print(rating_originality)
print(rating_survivability)
print(rating_fearfactor)
print(rating_vault)
print(rating_prefer_nightmares)
print(rating_burn_it)
print(rating_keep_one)
print(rating_love)
print(rating_what)
rand_page=list(r.html.find('#random-article')[0].links)[0]
r = session.get(rand_page)
time.sleep(1)
|
#coding: utf-8
import select
import socket
import os
import Queue
# 回显服务器:
# 服务器:python ./server.py
# 客户端:telnet 127.0.0.1 8888
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
server.bind(("0.0.0.0", 8888))
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server.listen(100)
# 连接进来的客户端
connections = {}
# 等待处理的action
wait_for_input = [server]
wait_for_output = []
wait_for_exception = []
# 消息队列,类似于管道
message_queues = {}
while True:
# 某个连接断开后,select 表明可读,但是读取到的字节数为0。
readable, wirteable, exception = select.select(wait_for_input, wait_for_output, wait_for_exception, 10)
# 读队列
for file_io in readable:
if file_io == server:
client, client_address = server.accept()
connections[client] = client_address
print(">>> new connection:", client_address)
wait_for_input.append(client)
wait_for_exception.append(client)
else:
data = file_io.recv(1024)
if data != "":
if file_io not in message_queues.keys():
message_queues[file_io] = Queue.Queue()
message_queues[file_io].put(data)
wait_for_output.append(file_io)
else:
print(">>> connection closed:", connections[file_io])
del connections[file_io]
if file_io in wait_for_input:
wait_for_input.remove(file_io)
if file_io in wait_for_output:
wait_for_output.remove(file_io)
if file_io in wait_for_exception:
wait_for_exception.remove(file_io)
if file_io in message_queues.keys():
del message_queues[file_io]
file_io.close()
# 写队列
for file_io in wirteable:
data = ""
if file_io not in message_queues:
pass
else:
data = message_queues[file_io].get_nowait()
file_io.send("server return: " + data)
wait_for_output.remove(file_io)
# exception队列
pass
# 处理老旧的连接
for file_io in connections:
pass
|
# Leetcode 740. Delete and Earn
# Time Complexity : O(n) where n is the largest number of the array
# Space Complexity : O(n)
# Did this code successfully run on Leetcode : Yes
# Any problem you faced while coding this : No
# Approach: Create a value array that holds the values of the nums array in a sorted order and has 0
# in other places. Populate the no case using the maximum value of previous row and yes case using the
# sum of current value and the no case of the previous row. Return the max of last row.
# Your code here along with comments explaining your approach
class Solution:
def deleteAndEarn(self, nums: List[int]) -> int:
if not nums:
return 0
val=[0]*(max(nums)+1)
for num in nums:
val[num] += num
dp = [[0,0] for i in range(len(val))]
for i in range(len(dp)):
dp[i][0]=max(dp[i-1][0],dp[i-1][1])
dp[i][1]= val[i]+dp[i-1][0]
return max(dp[len(dp)-1][0],dp[len(dp)-1][1]) |
# -*- coding: utf-8 -*-
#script containing all pertinent tasks to prepare for software termination.
#successful completion of this process at last runtime, will skip extra validation
#steps on next run
def shutdown(config,interinput,interoutput):
#check for lingering runtime errors
#finishing writing log queues to file
#if none: write clean.start file in config directory
pass
#perhaps add garbage collector control here?
|
# coding: utf-8
import numpy as np
from scipy.signal import get_window
from scipy.signal import fftconvolve
import audioproc as ap
# generate swept-sine signal
def gentsp(n=18):
N = 2 ** n
m = N // 4
SS = np.r_[
np.exp(-1.j * np.pi / N * np.arange(0, N // 2 + 1) ** 2),
np.exp(1.j * np.pi / N * (N - np.arange(N // 2 + 1, N)) ** 2)
]
upward = np.roll(np.fft.ifft(SS).real, m)
upward /= np.max(np.abs(upward))
downward = upward[::-1].copy()
return upward, downward
# Exponent of next higher power of 2
def nextpow2(n):
return int(np.ceil(np.log2(n)))
# The maximum absolute value
def absmax(x, axis=None):
return np.max(np.abs(x), axis=axis)
# The argument of the maximum absolute value
def argabsmax(x, axis=None):
return np.argmax(np.abs(x), axis=axis)
# The minimum absolute value
def absmin(x, axis=None):
return np.min(np.abs(x), axis=axis)
# The argument of the minimum absolute value
def argabsmin(x, axis=None):
return np.argmin(np.abs(x), axis=axis)
# Cross-correlation function
def fftxcorr(x, y):
# Rxy = E[x(n) y(n + m)]
fftpoint = 2 ** nextpow2(len(x) + len(y) - 1)
X = np.fft.rfft(x, n=fftpoint)
Y = np.fft.rfft(y, n=fftpoint)
c = np.fft.irfft(np.conj(X) * Y)
return c
# Auto-correlation function
def fftacorr(x):
fftpoint = 2 ** nextpow2(2 * len(x) - 1)
X = np.fft.rfft(x, n=fftpoint)
c = np.fft.irfft(np.conj(X) * X)
return c
# Cross-correlation function (returned with lag time)
def xcorr(x, y):
fftpoint = 2 ** nextpow2(len(x) + len(y) - 1)
X = np.fft.rfft(x[::-1], n=fftpoint)
Y = np.fft.rfft(y, n=fftpoint)
cf = np.fft.irfft(X * Y)[:len(x) + len(y) - 1]
n_axis = np.arange(len(x) + len(y) - 1) - len(x) + 1
return n_axis, cf
# L times upsample for fixed sample rate signal
def upsample(x, K, N, window='hamming'):
'''
Parameters
----------
K: int
The multiple of upsampling.
N: int
The number of taps of interpolation function.
The longer this number, the higher the accuracy,
but the higher the calculation load.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
'''
if type(K) != int:
print('Only integer multiples please.')
# upsample
x_upsamp = np.zeros((x.shape[0] - 1) * K + 1)
x_upsamp[::K] = x[:]
# LPF
n = np.arange(N) - (N - 1) / 2
w = get_window(window, N)
LPF = w * np.sinc(n / K)
y = fftconvolve(x_upsamp, LPF)
return y
# amplitude limiter
def limiter(signal, threshold, deepcopy=True):
if deepcopy:
s = np.copy(signal)
else:
s = signal
index = np.where(np.abs(s) > np.abs(threshold))
s[index] = np.sign(s[index]) * np.abs(threshold)
return s
# convolution using overlap-save method
# with low memory consumption for long input
def conv_lessmemory(longinput, fir, fftpoint, verbose=False):
len_input = longinput.shape[-1]
M = fir.shape[-1]
N = fftpoint
L = N - (M - 1)
if longinput.ndim != 2:
raise Exception('longinput must be 2 dim')
if fir.ndim != 3:
raise Exception('fir must be 3 dim')
if longinput.shape[0] != fir.shape[1]:
raise Exception('fir shape does not match input')
n_input = longinput.shape[0]
n_output = fir.shape[0]
if L < 1:
raise Exception('fftpoint must be more than %d' % M)
fir_f = np.fft.rfft(fir, n=N)
del fir
block_in = np.empty([n_input, 1, N])
block_in[:, 0, L:] = 0.
point_read = 0
len_out = len_input + M - 1
out = np.empty([n_output, len_out])
out_cnt = 0
nblocks = int(np.ceil(len_out / L))
if verbose:
print('fftpoint:%d, ' % N, end='')
print('blocksize:%d, ' % L, end='')
print('nblocks:%d' % nblocks)
pg = ap.ProgressBar2(nblocks, slug='=', space=' ')
for l in range(nblocks):
# overlap
block_in[:, 0, :-L] = block_in[:, 0, L:]
# read input
if (len_input - point_read) >= L:
block_in[:, 0, -L:] = longinput[:, point_read:point_read+L]
point_read += L
else:
ll = len_input - point_read
block_in[:, 0, -L:-L+ll] = longinput[:, point_read:]
block_in[:, 0, -L+ll:] = 0
point_read += ll
# convolution
block_in_f = np.fft.rfft(block_in, n=N)
block_out_f = np.matmul(
fir_f.transpose(2, 0, 1), block_in_f.transpose(2, 0, 1)
).transpose(1, 2, 0)
block_out = np.fft.irfft(block_out_f)[:, 0, -L:]
# write output
if (len_out - out_cnt) >= L:
out[:, out_cnt:out_cnt+L] = block_out
out_cnt += L
else:
out[:, out_cnt:] = block_out[:, :len_out - out_cnt]
out_cnt = len_out
if verbose:
pg.bar()
return out
# convolution using overlap-save method
# with low memory consumption for long input (Frequency-domain FIR version)
#
# longinput: time-domain
# rfft_fir : freq-domain (by np.fft.rfft that fftpoint have to be even number)
# ntaps_fir: length of FIR in time-domain
#
def conv_lessmemory_fdomfir(longinput, rfft_fir, ntaps_fir, verbose=False):
len_input = longinput.shape[-1]
M = ntaps_fir
N = 2 * rfft_fir.shape[-1] - 2 # original N is an even number
L = N - (M - 1)
if longinput.ndim != 2:
raise Exception('longinput must be 2 dim')
if rfft_fir.ndim != 3:
raise Exception('fir must be 3 dim')
if longinput.shape[0] != rfft_fir.shape[1]:
raise Exception('fir shape does not match input')
n_input = longinput.shape[0]
n_output = rfft_fir.shape[0]
if L < 1:
raise Exception('fftpoint must be more than %d' % M)
fir_f = rfft_fir
block_in = np.empty([n_input, 1, N])
block_in[:, 0, L:] = 0.
point_read = 0
len_out = len_input + M - 1
out = np.empty([n_output, len_out])
out_cnt = 0
nblocks = int(np.ceil(len_out / L))
if verbose:
print('fftpoint:%d, ' % N, end='')
print('blocksize:%d, ' % L, end='')
print('nblocks:%d' % nblocks)
pg = ap.ProgressBar2(nblocks, slug='=', space=' ')
for l in range(nblocks):
# overlap
block_in[:, 0, :-L] = block_in[:, 0, L:]
# read input
if (len_input - point_read) >= L:
block_in[:, 0, -L:] = longinput[:, point_read:point_read+L]
point_read += L
else:
ll = len_input - point_read
block_in[:, 0, -L:-L+ll] = longinput[:, point_read:]
block_in[:, 0, -L+ll:] = 0
point_read += ll
# convolution
block_in_f = np.fft.rfft(block_in, n=N)
block_out_f = np.matmul(
fir_f.transpose(2, 0, 1), block_in_f.transpose(2, 0, 1)
).transpose(1, 2, 0)
block_out = np.fft.irfft(block_out_f)[:, 0, -L:]
# write output
if (len_out - out_cnt) >= L:
out[:, out_cnt:out_cnt+L] = block_out
out_cnt += L
else:
out[:, out_cnt:] = block_out[:, :len_out - out_cnt]
out_cnt = len_out
if verbose:
pg.bar()
return out
# combine two MIMO FIR
# shape of fir1 and fir2 -> (out-ch, in-ch, taps)
# return fir2 @ fir1
def combine_fir(fir1, fir2):
len_new = fir1.shape[-1] + fir2.shape[-1] - 1
fftpt = 2 ** ap.nextpow2(len_new)
fir1_f = np.fft.rfft(fir1, n=fftpt)
fir2_f = np.fft.rfft(fir2, n=fftpt)
new_f = np.matmul(
fir2_f.transpose(2, 0, 1), fir1_f.transpose(2, 0, 1)
).transpose(1, 2, 0)
new = np.fft.irfft(new_f)[:, :, :len_new]
return new
|
import autoarray as aa
import numpy as np
class TestDataVectorFromData:
def test__simple_blurred_mapping_matrix__correct_data_vector(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
data_vector = aa.util.inversion.data_vector_from_blurred_mapping_matrix_and_data(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([2.0, 3.0, 1.0])).all()
def test__simple_blurred_mapping_matrix__change_image_values__correct_data_vector(
self
):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([3.0, 1.0, 1.0, 10.0, 1.0, 1.0])
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
data_vector = aa.util.inversion.data_vector_from_blurred_mapping_matrix_and_data(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([4.0, 14.0, 10.0])).all()
def test__simple_blurred_mapping_matrix__change_noise_values__correct_data_vector(
self
):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])
noise_map = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
data_vector = aa.util.inversion.data_vector_from_blurred_mapping_matrix_and_data(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([2.0, 3.0, 1.0])).all()
def test__data_vector_via_transformer_mapping_matrix_method_same_as_blurred_method(
self
):
mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
data = np.array([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])
noise_map = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
data_vector_via_blurred = aa.util.inversion.data_vector_from_blurred_mapping_matrix_and_data(
blurred_mapping_matrix=mapping_matrix, image=data, noise_map=noise_map
)
data_vector_via_transformed = aa.util.inversion.data_vector_from_transformed_mapping_matrix_and_data(
transformed_mapping_matrix=mapping_matrix,
visibilities=data,
noise_map=noise_map,
)
assert (data_vector_via_blurred == data_vector_via_transformed).all()
class TestCurvatureMatrixFromBlurred:
def test__simple_blurred_mapping_matrix(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
curvature_matrix = aa.util.inversion.curvature_matrix_from_blurred_mapping_matrix(
blurred_mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
assert (
curvature_matrix
== np.array([[2.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]])
).all()
def test__simple_blurred_mapping_matrix__change_noise_values(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
noise_map = np.array([2.0, 1.0, 1.0, 1.0, 1.0, 1.0])
curvature_matrix = aa.util.inversion.curvature_matrix_from_blurred_mapping_matrix(
blurred_mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
assert (
curvature_matrix
== np.array([[1.25, 0.25, 0.0], [0.25, 2.25, 1.0], [0.0, 1.0, 1.0]])
).all()
def test__curvature_matrix_via_transformer_mapping_matrix_method_same_as_blurred_method(
self
):
mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
noise_map = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
curvature_matrix_via_blurred = aa.util.inversion.curvature_matrix_from_blurred_mapping_matrix(
blurred_mapping_matrix=mapping_matrix, noise_map=noise_map
)
curvature_matrix_via_transformed = aa.util.inversion.curvature_matrix_from_transformed_mapping_matrix(
transformed_mapping_matrix=mapping_matrix, noise_map=noise_map
)
assert (curvature_matrix_via_blurred == curvature_matrix_via_transformed).all()
class TestPixelizationResiduals:
def test__pixelization_perfectly_reconstructed_data__quantities_like_residuals_all_zeros(
self
):
pixelization_values = np.ones(3)
reconstructed_data_1d = np.ones(9)
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
]
pixelization_residuals = aa.util.inversion.inversion_residual_map_from_pixelization_values_and_data(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_residuals == np.zeros(3)).all()
def test__pixelization_not_perfect_fit__quantities_like_residuals_non_zero(self):
pixelization_values = np.ones(3)
reconstructed_data_1d = 2.0 * np.ones(9)
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
]
pixelization_residuals = aa.util.inversion.inversion_residual_map_from_pixelization_values_and_data(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_residuals == 1.0 * np.ones(3)).all()
pixelization_values = np.ones(3)
reconstructed_data_1d = np.array([1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
]
pixelization_residuals = aa.util.inversion.inversion_residual_map_from_pixelization_values_and_data(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_residuals == np.array([0.0, 1.0, 2.0])).all()
class TestPixelizationNormalizedResiduals:
def test__pixelization_perfectly_reconstructed_data__quantities_like_residuals_all_zeros(
self
):
pixelization_values = np.ones(3)
reconstructed_data_1d = np.ones(9)
noise_map_1d = np.ones(9)
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
]
pixelization_normalized_residuals = aa.util.inversion.inversion_normalized_residual_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
noise_map_1d=noise_map_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_normalized_residuals == np.zeros(3)).all()
def test__pixelization_not_perfect_fit__quantities_like_residuals_non_zero(self):
pixelization_values = np.ones(3)
reconstructed_data_1d = 2.0 * np.ones(9)
noise_map_1d = 2.0 * np.ones(9)
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
]
pixelization_normalized_residuals = aa.util.inversion.inversion_normalized_residual_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
noise_map_1d=noise_map_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_normalized_residuals == 0.5 * np.ones(3)).all()
pixelization_values = np.ones(3)
reconstructed_data_1d = np.array([1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0])
noise_map_1d = np.array([0.5, 0.5, 0.5, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
]
pixelization_normalized_residuals = aa.util.inversion.inversion_normalized_residual_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
noise_map_1d=noise_map_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_normalized_residuals == np.array([0.0, 1.0, 1.0])).all()
class TestPixelizationChiSquareds:
def test__pixelization_perfectly_reconstructed_data__quantities_like_residuals_all_zeros(
self
):
pixelization_values = np.ones(3)
reconstructed_data_1d = np.ones(9)
noise_map_1d = np.ones(9)
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
]
pixelization_chi_squareds = aa.util.inversion.inversion_chi_squared_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
noise_map_1d=noise_map_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_chi_squareds == np.zeros(3)).all()
def test__pixelization_not_perfect_fit__quantities_like_residuals_non_zero(self):
pixelization_values = np.ones(3)
reconstructed_data_1d = 2.0 * np.ones(9)
noise_map_1d = 2.0 * np.ones(9)
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
]
pixelization_chi_squareds = aa.util.inversion.inversion_chi_squared_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
noise_map_1d=noise_map_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_chi_squareds == 0.25 * np.ones(3)).all()
pixelization_values = np.ones(3)
reconstructed_data_1d = np.array([1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0])
noise_map_1d = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 4.0, 4.0, 4.0])
mask_1d_index_for_sub_mask_1d_index = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
all_sub_mask_1d_indexes_for_pixelization_1d_index = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
]
pixelization_chi_squareds = aa.util.inversion.inversion_chi_squared_map_from_pixelization_values_and_reconstructed_data_1d(
pixelization_values=pixelization_values,
data=reconstructed_data_1d,
noise_map_1d=noise_map_1d,
mask_1d_index_for_sub_mask_1d_index=mask_1d_index_for_sub_mask_1d_index,
all_sub_mask_1d_indexes_for_pixelization_1d_index=all_sub_mask_1d_indexes_for_pixelization_1d_index,
)
assert (pixelization_chi_squareds == np.array([0.0, 4.0, 0.25])).all()
|
from django.shortcuts import render, redirect, reverse, HttpResponse
from django.contrib import messages
from products.models import Product
from Profile.models import Profile
# Create your views here.
def shopping_bag(request):
"""A view to show the shopping bag"""
user = Profile.objects.filter(user=request.user)
context = {
'user': user
}
return render(request, 'bag.html', context)
def add_to_bag(request, item_id):
""" Add a quantity of the specified product to the shopping bag """
product = Product.objects.get(pk=item_id)
quantity = int(request.POST.get('quantity'))
redirect_url = request.POST.get('redirect_url')
size = None
if 'product_size' in request.POST:
size = request.POST['product_size']
bag = request.session.get('bag', {})
if size:
if item_id in list(bag.keys()):
if size in bag[item_id]['items_by_size'].keys():
bag[item_id]['items_by_size'][size] += quantity
messages.success(request, f'Added {product.name} to your bag')
else:
bag[item_id]['items_by_size'][size] = quantity
messages.success(request, f'Added size {size.upper()} {product.name} to your bag')
else:
bag[item_id] = {'items_by_size': {size: quantity}}
messages.success(request, f'Added size {size.upper()} {product.name} to your bag')
else:
if item_id in list(bag.keys()):
bag[item_id] += quantity
messages.success(request, f'Added {product.name} to your bag')
else:
bag[item_id] = quantity
messages.success(request, f'Added {product.name} to your bag')
request.session['bag'] = bag
return redirect(redirect_url)
def remove_from_bag(request, item_id):
"""Remove the item from the shopping bag"""
try:
size = None
if 'product_size' in request.POST:
size = request.POST['product_size']
bag = request.session.get('bag', {})
if size:
del bag[item_id]['items_by_size'][size]
if not bag[item_id]['items_by_size']:
bag.pop(item_id)
else:
bag.pop(item_id)
request.session['bag'] = bag
return HttpResponse(status=200)
except Exception as e:
return HttpResponse(status=500) |
import datetime
import smtplib
from email.mime.text import MIMEText
import pandas as pd
import pymysql
import requests
print("核心数据自动发送邮箱")
class getFromDataBase():
def __init__(self):
self.con_mall = pymysql.connect(host='106.75.233.242', port=28306, user='leizhen',
passwd='b00c482fb32781964a1e',
db='mall', charset='utf8')
def getNewResourseNum(self):
sql = "select QUARTER(t.firsttime) 'q', count(*) '新用户数' " \
"from (select user_id as uid, min(create_time) as firsttime " \
"from `order` where platform_id in (2,3,4,5,6,7,8,9,11) and status = 2 group by user_id) t " \
"where t.firsttime >= '2021' " \
"group by QUARTER(t.firsttime)"
df_NewResourseNum = pd.read_sql(sql=sql, con=self.con_mall).iat[1, 1]
return df_NewResourseNum
class getFromSensors():
def __init__(self):
self.url = "https://sa.xinpianchang.com/api/sql/query?" \
"token=ee33d9f5a116111566847814e7a5acc9f37402245c76c3bf528cf3879a01e725&project=production"
def get30dMeanDAU(self):
date1 = (datetime.date.today() - datetime.timedelta(days=30))
date2 = (datetime.date.today() - datetime.timedelta(days=1))
print(date1, date2)
sql1 = "select avg(tb1.num) as avg1 from(select date,count(distinct user_id) as num from events " \
"where event = '$AppStart' " \
f"and date between '{date1}' and '{date2}' " \
"group by date " \
"order by date) tb1"
sql2 = "select avg(tb1.num) as avg2 from(select date,count(distinct user_id) as num " \
"from events where event = '$pageview' " \
"and $url regexp 'www.xinpianchang.com' " \
"and $os in ('Mac','Windows') " \
f"and date between '{date1}' and '{date2}' " \
"group by date " \
"order by date) tb1"
params1 = {
"q": sql1,
"format": "json"
}
params2 = {
"q": sql2,
"format": "json"
}
response1 = int(requests.get(url=self.url, params=params1).json()['avg1'])
response2 = int(requests.get(url=self.url, params=params2).json()['avg2'])
return response1+response2
def send_email(title, content):
mail_host = 'smtp.163.com'
# 163用户名
mail_user = 'pylitton@163.com'
# 密码(部分邮箱为授权码)
mail_pass = 'XPJSVKOLCCVMOSPX'
# 邮件发送方邮箱地址
sender = 'pylitton@163.com'
# 邮件接受方邮箱地址,注意需要[]包裹,这意味着你可以写多个邮件地址群发
receivers = ['leizhen@xinpianchang.com']
# 设置email信息
# 邮件内容设置
message = MIMEText(_text=content, _subtype='html', _charset='utf-8')
# 邮件主题
message['Subject'] = title
# 发送方信息
message['From'] = sender
# 接受方信息
message['To'] = receivers[0]
# 登录并发送邮件
try:
smtpObj = smtplib.SMTP()
# 连接到服务器
smtpObj.connect(mail_host, 25)
# 登录到服务器
smtpObj.login(mail_user, mail_pass)
# 发送
smtpObj.sendmail(
sender, receivers, message.as_string())
# 退出
smtpObj.quit()
print('success')
except smtplib.SMTPException as e:
print('error', e) # 打印错误
def control():
title = "目标表数据提示邮件_{}".format(datetime.date.today())
getDataM = getFromDataBase()
data_NewResourseNum = getDataM.getNewResourseNum()
getDataS = getFromSensors()
data_30DAU = getDataS.get30dMeanDAU()
content = """
<html>
<head></head>
<body>
<p>Q2季度全球精选新增用户数:{}</p>
<p>最近30天社区日活(app启动+web全站):{}</p>
</body>
</html>
""".format(data_NewResourseNum,data_30DAU)
print(data_NewResourseNum,data_30DAU)
send_email(title=title, content=content)
control() |
from django.urls import path
from . import views
urlpatterns =[
path('', views.my_cart, name='shopcart-index'),
path('add_to_cart/<int:id>', views.add_to_cart, name='add_to_cart'),
path('delete_from_cart/<int:id>', views.delete_from_cart, name='delete_from_cart')
] |
#!/usr/bin/python3
from Crypto.Cipher import AES
import itertools
import random
from util import get_random_bytes, chunk, pkcs7_pad, pkcs7_unpad, slurp_base64_file, hexquote_chars
from s1 import xor_buf
from base64 import b64decode
from random import randrange
def main():
c16()
def c16():
block_size = 16
random_key = get_random_bytes(block_size)
random_iv = get_random_bytes(block_size)
payload = bytearray(b";admin=true;")
# Hide the special chars by flipping a bit in them
payload[0] ^= 0x01
payload[6] ^= 0x01
payload[11] ^= 0x01
# Assuming we don't know the prefix, we will try at each offset
for offset in range(0, block_size):
chosen_plain_text = b'A' * offset
# Prepend a sacrificial block, in which we can flip bits
chosen_plain_text += b'A' * block_size
chosen_plain_text += payload
cipher_text = bytearray(c16_encryptor(block_size, random_key, random_iv, chosen_plain_text))
# We don't know which block to flip. Let's try 'em all
for block_index in range(0, (len(cipher_text) // block_size) - 1):
# Flip the corresponding bits in the sacrificial block
cipher_text[(block_index * block_size) + offset + 0] ^= 0x01
cipher_text[(block_index * block_size) + offset + 6] ^= 0x01
cipher_text[(block_index * block_size) + offset + 11] ^= 0x01
try:
if (c16_decryptor(block_size, random_key, random_iv, bytes(cipher_text))):
print("S2C16 got admin")
return
except Exception:
# pkcs 7 fail?
pass
print("S2C16 fail :-(")
def c16_encryptor(block_size: int, key, iv, plain_text):
buf = hexquote_chars(b";=", plain_text)
buf = b"comment1=cooking%20MCs;userdata=" + buf + b";comment2=%20like%20a%20pound%20of%20bacon"
padded_buf = pkcs7_pad(buf, block_size)
return aes128_cbc_encode(key, iv, padded_buf)
def c16_decryptor(block_size, key, iv, cipher_text) -> bool:
padded_plain_text = aes128_cbc_decode(key, iv, cipher_text)
plain_text = pkcs7_unpad(padded_plain_text, block_size)
return b";admin=true;" in plain_text
def c14():
unknown_key = get_random_bytes(16)
# oracle = lambda pt: c14_encryption_oracle(unknown_key, pt)
def oracle(pt):
return c14_encryption_oracle(unknown_key, pt)
block_size = 16
pad_char = b'A'
recovered_plain_text = bytearray()
chosen_plain_text = bytearray()
while True:
# We construct a (block_size - 1) piece plain text. Which
# ends in the our recovered plain text and is prepended with enough
# pad_char to make the size
chosen_plain_text[:] = recovered_plain_text
if len(chosen_plain_text) > block_size - 1:
chosen_plain_text = chosen_plain_text[-(block_size - 1):]
added_pad = max(0, (block_size - 1) - len(chosen_plain_text))
chosen_plain_text = bytearray(pad_char * added_pad) + chosen_plain_text
assert len(chosen_plain_text) == block_size - 1, "Using correct size chosen_plain_text block"
# By prepending with enough pad_chars and appending with bytes 0->255,
# and repeating until we get block_size different
# answers, we find 'block_size' candidate cipher blocks for each possible end byte
dictionary = c14_dictionary_for_block(oracle, block_size, chosen_plain_text)
next_byte = None
for num_attempts in range(0, 10*block_size):
pad = pad_char * added_pad
cipher_text = oracle(pad)
for c in chunk(cipher_text, block_size):
try:
next_byte = dictionary[c]
break
except KeyError:
pass
if next_byte is None:
raise RuntimeError("Failed to find next byte in {} iterations", num_attempts)
recovered_plain_text.append(next_byte)
print("{}".format(recovered_plain_text.decode('ascii')))
print("S2C14 msg is {}", recovered_plain_text)
# def c14():
#
# unknown_key = get_random_bytes(16)
# oracle = lambda pt: c14_encryption_oracle(unknown_key, pt)
#
# # Shim is number of bytes to fill a block
# block_size = c14_discover_block_size(oracle)
# print("S2C14 - found block size {}".format(block_size))
#
# is_ecb = c12_detect_ecb(oracle, block_size)
# print("S2C14 - is ECB?: {}".format(is_ecb))
#
# known_bytes = bytearray()
# for index in range(0, 10 * block_size):
# print("JB - index {}".format(index))
# block_index = index // block_size
# chunk_index = index % block_size
#
# needed_pad_len = (block_size - 1) - chunk_index
# needed_pad = bytes(needed_pad_len)
#
# trick_block = bytearray(block_size) + known_bytes
# trick_block = trick_block[-(block_size-1):]
#
# block_dictionary = c14_make_block_dictionary(oracle, block_size, trick_block)
# cipher_text = oracle(needed_pad)
#
# cipher_chunks = chunk(cipher_text, block_size)
# interesting_chunk = cipher_chunks[index // block_size]
# try:
# plain_text_byte = block_dictionary[interesting_chunk]
# except KeyError:
# break
#
# known_bytes.append(plain_text_byte)
#
# print("S2C14 - got msg len: {}".format(len(known_bytes)))
# plain_text = pkcs7_unpad(known_bytes, block_size)
# print("S2C14 - got msg: {}".format(plain_text.decode('ascii')))
def c14_discover_block_size(oracle):
lengths = set()
for shim_size in range(1, 1000):
ct = oracle(bytes(shim_size))
lengths.add(len(ct))
min_diff = 1000
last_length = 0
for length in sorted(lengths):
if last_length == 0:
last_length = length
continue
diff = length - last_length
if diff < min_diff:
min_diff = diff
return min_diff
def c14_dictionary_for_block(oracle, block_size, chosen_plain_text):
assert len(chosen_plain_text) == block_size - 1, "Using correct size chosen_plain_text block"
dictionary = dict()
duplicates = set()
enough_padding_for_duplicates = b'_' * ((3 * block_size) - 1)
for end_byte in range(0, 256):
plain_text = bytearray(enough_padding_for_duplicates)
plain_text += chosen_plain_text
plain_text.append(end_byte)
candidates = set()
# Keep trying so we get different offsets
while len(candidates) < block_size:
cipher_text = oracle(plain_text)
candidate = find_block_after_duplicates(cipher_text, block_size)
candidates.add(candidate)
for candidate in candidates:
if candidate in duplicates:
continue
if candidate in dictionary:
duplicates.add(candidate)
del(dictionary[candidate])
continue
dictionary[candidate] = end_byte
return dictionary
def find_block_after_duplicates(buf, block_size):
next_is_target = False
last_chunk = b''
chunks = chunk(buf, block_size)
for c in chunks:
# Continue while we keep seeing duplicates
if c == last_chunk:
next_is_target = True
continue
if next_is_target:
return c
last_chunk = c
raise RuntimeError("Didn't find block after duplicates")
def c14_encryption_oracle(key, chosen_plain_text):
block_size = 16
secret_suffix = b64decode("""Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK""")
prefix_size = randrange(20, 40)
random_prefix = get_random_bytes(prefix_size)
msg = random_prefix + chosen_plain_text + secret_suffix
# chunk_index = 0
# chunks = chunk(msg, 16)
# for c in chunks:
# chunk_index+= 1
# print("JB - oracle pt {}/{}: [{}]".format(chunk_index, len(chunks), c))
msg = pkcs7_pad(msg, block_size)
return aes128_ecb_encode(key, msg)
def c13():
block_size = 16
secret_key = get_random_bytes(block_size)
def encryptor(email_address):
return aes128_ecb_encode(secret_key, pkcs7_pad(c13_profile_for(email_address), block_size))
def decryptor(cipher_text):
return c13_parse_kv(pkcs7_unpad(aes128_ecb_decode(secret_key, cipher_text), block_size))
# The minimum amount of prefix padding to cause a duplicated block
# will give us the target block in the next block
for repeat_pad_size in range(2*block_size - 1, 3 * block_size):
repeat_pad = b"A" * repeat_pad_size
trick_email_address = repeat_pad + pkcs7_pad(b"admin", block_size) + b"@example.com"
cipher_text = encryptor(trick_email_address)
chunks = chunk(cipher_text, block_size)
# If we have a repeat, the block after repeat is target
next_is_target = False
target_cipher_block = b''
last_chunk = b''
for c in chunks:
if next_is_target:
target_cipher_block = c
break
next_is_target = (c == last_chunk)
last_chunk = c
if target_cipher_block != b'':
break
if target_cipher_block == b'':
raise RuntimeError("Didn't find target cipher block")
# At some padding between 0..block_size the end block should
# be 'user<pkcspadding>'. If so, replacing it with our
# target cipher block should give us something which will decode
# to our desired plaintext
for padding_size in range(0, block_size):
padded_email_address = (b"A" * padding_size) + b"@example.com"
cipher_text = encryptor(padded_email_address)
# Splice in target block
cipher_text = bytearray(cipher_text)
cipher_text[-block_size:] = target_cipher_block
cipher_text = bytes(cipher_text)
try:
profile = decryptor(cipher_text)
if profile[b"role"] == b"admin":
print("S2C13 - did it! got an admin role")
return
except (KeyError, ValueError):
pass
print("S2C13 fail. Bad coder, no biscuit")
def c13_profile_for(email_address):
email_address = email_address.replace(b'&', b'')
email_address = email_address.replace(b'=', b'')
profile = {
b"email": email_address,
b"uid": b"10",
b"role": b"user",
}
return b'&'.join(map(lambda k: k + b'=' + profile[k], profile))
def c13_parse_kv(buf):
kvs = buf.split(b'&')
return dict(map(lambda buf: buf.split(b'='), kvs))
def c12():
unknown_key = get_random_bytes(16)
def oracle(pt):
return c12_encryption_oracle(unknown_key, pt)
# Shim is number of bytes to fill a block
(block_size, shim_size) = c12_discover_block_and_shim_sizes(oracle)
print("S2C12 - found block size {}".format(block_size))
is_ecb = c12_detect_ecb(oracle, block_size)
print("S2C12 - is ECB?: {}".format(is_ecb))
known_bytes = bytearray()
for index in range(0, 10 * block_size):
# block_index = index // block_size
chunk_index = index % block_size
# print("block_index {} chunk_index {}".format(block_index, chunk_index))
needed_pad_len = (block_size - 1) - chunk_index
needed_pad = bytes(needed_pad_len)
trick_block = bytearray(block_size) + known_bytes
trick_block = trick_block[-(block_size-1):]
block_dictionary = c12_make_block_dictionary(oracle, block_size, trick_block)
cipher_text = oracle(needed_pad)
cipher_chunks = chunk(cipher_text, block_size)
interesting_chunk = cipher_chunks[index // block_size]
# print("C0: {}".format(interesting_chunk))
try:
plain_text_byte = block_dictionary[interesting_chunk]
except KeyError:
break
known_bytes.append(plain_text_byte)
# print("Got byte: {}".format(plain_text_byte))
# print("Got known bytes: {}".format(known_bytes))
plain_text = pkcs7_unpad(known_bytes, block_size)
print("S2C12 - got msg: {}", plain_text.decode('ascii'))
def c12_make_block_dictionary(oracle, block_size, prefix):
if len(prefix) != block_size - 1:
raise RuntimeError("sanity violation: {} != {}".format(block_size-1, len(prefix)))
d = {}
for b in range(0, 256):
msg = bytearray(prefix)
msg.append(b)
cipher_text = oracle(msg)
cipher_chunks = chunk(cipher_text, block_size)
d[cipher_chunks[0]] = b
return d
def c12_detect_ecb(oracle, block_size):
repeated_blocks = bytes(block_size * 4)
cipher_text = oracle(repeated_blocks)
chunks = chunk(cipher_text, block_size)
distinct_chunks = set(chunks)
return len(chunks) != len(distinct_chunks)
def c12_discover_block_and_shim_sizes(oracle):
max_block_size = 1000
zero_len = len(oracle(b''))
for shim_size in range(1, max_block_size):
ct = oracle(bytes(shim_size))
if len(ct) != zero_len:
return (len(ct) - zero_len, shim_size - 1)
raise RuntimeError("Failed to find block size up to {}".format(max_block_size))
def c12_encryption_oracle(key, chosen_plain_text):
block_size = 16
secret_suffix = b64decode("""Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK""")
msg = pkcs7_pad(chosen_plain_text + secret_suffix, block_size)
return aes128_ecb_encode(key, msg)
def c11():
block_size = 16 # we're doing AES128
for i in range(10):
plain_text = bytes(block_size * 10) # A lot of repetition, which repeats under ECB
cipher_text = c11_encrypt_ecb_or_cbc_oracle(plain_text)
chunks = chunk(cipher_text, block_size)
distinct_chunks = set(chunks)
if len(chunks) != len(distinct_chunks):
print("S2C11 - guess ECB!")
else:
print("S2C11 - guess CBC!")
def c11_encrypt_ecb_or_cbc_oracle(plain_text):
block_size = 16
key = get_random_bytes(block_size)
prefix = get_random_bytes(10)
suffix = get_random_bytes(10)
msg = pkcs7_pad(prefix + plain_text + suffix, block_size)
if random.random() >= 0.5:
print("S2C11 - doing CBC")
iv = get_random_bytes(16)
return aes128_cbc_encode(key, iv, msg)
else:
print("S2C11 - doing ECB")
return aes128_ecb_encode(key, msg)
def c10():
cipher_text = slurp_base64_file("10.txt")
iv = bytes(itertools.repeat(0, 16))
key = b"YELLOW SUBMARINE"
plain_text = aes128_cbc_decode(key, iv, cipher_text)
print("S1C10 msg is {}".format(plain_text.decode('ascii')))
recipher_text = aes128_cbc_encode(key, iv, plain_text)
print("Re-encode matches? : {}".format(recipher_text == cipher_text))
def aes128_ecb_encode(key, plain_text):
plain_text_modlen = len(plain_text) % 16
if plain_text_modlen != 0:
raise RuntimeError("AES128 ECB input must be 16 byte, not {}".format(plain_text_modlen))
ecb_cipher = AES.new(key, AES.MODE_ECB)
return ecb_cipher.encrypt(plain_text)
def aes128_ecb_decode(key, plain_text):
ecb_cipher = AES.new(key, AES.MODE_ECB)
return ecb_cipher.decrypt(plain_text)
def aes128_cbc_encode(key, iv, plain_text):
ecb_cipher = AES.new(key, AES.MODE_ECB)
block_size = ecb_cipher.block_size
if len(plain_text) % block_size != 0:
raise RuntimeError("CBC requires padding to block size")
if len(iv) != block_size:
raise RuntimeError("IV must be one block")
plain_chunks = chunk(plain_text, block_size)
last_cipher_chunk = iv
cipher_chunks = []
for pc in plain_chunks:
next_cipher_chunk = ecb_cipher.encrypt(xor_buf(pc, last_cipher_chunk))
cipher_chunks.append(next_cipher_chunk)
last_cipher_chunk = next_cipher_chunk
return b''.join(cipher_chunks)
def aes128_cbc_decode(key, iv, cipher_text):
cipher_text = bytes(cipher_text)
ecb_cipher = AES.new(key, AES.MODE_ECB)
block_size = ecb_cipher.block_size
if len(cipher_text) % block_size != 0:
raise RuntimeError("CBC requires padding to block size")
if len(iv) != block_size:
raise RuntimeError("IV must be one block")
cipher_chunks = chunk(cipher_text, block_size)
last_cipher_chunk = iv
plain_chunks = []
for cc in cipher_chunks:
next_plain_chunk = xor_buf(last_cipher_chunk, ecb_cipher.decrypt(cc))
plain_chunks.append(next_plain_chunk)
last_cipher_chunk = cc
return b''.join(plain_chunks)
def c9():
block_size = 20
msg = b"YELLOW SUBMARINE"
padded_message = pkcs7_pad(msg, block_size)
expected_padded_message = b"YELLOW SUBMARINE\x04\x04\x04\x04"
print("S2C9 padded message correct: {}", padded_message == expected_padded_message)
if __name__ == "__main__":
main()
|
from django.shortcuts import render, redirect
from django.utils import timezone
from .models import Rating, Album
from .forms import SearchForm, SearchResult, RatingForm
import requests
import json
# Create your views here.
def ratings_list(request):
ratings = Rating.objects.all().order_by('-updated')
return render(request, 'myApp/ratings.html', {'ratings':ratings})
def get_json(query):
getTokenData={
"client_id":"d3fb423d78d6479a9ab409baccd10cc2",
"client_secret":"691f84aaf05a41528a916f96c33f4416",
"grant_type":"client_credentials"
}
r = requests.post(url = "https://accounts.spotify.com/api/token", data=getTokenData)
token_json = r.text
parsed_json = json.loads(token_json)
token = parsed_json["access_token"]
stem = "https://api.spotify.com/v1/search?q="
print("QUERYING SPOTIFY WITH " + stem+query)
headers = {"Authorization": "Bearer " + token}
q = requests.get(url = stem+query, headers=headers)
response = q.text
return json.loads(response)
def search_form(request):
print(request)
if request.method == "POST":
#DO stuff
form = SearchForm(request.POST)
if form.is_valid():
artist = request.POST['artist']
name = request.POST['name']
query = ""
if name is not "":
query += "album%3A" + name
if artist is not "":
query += "%20artist%3A" + artist
query += "&type=album"
print("Got NAME="+name+" and ARTIST="+artist)
parsedAlbums=get_json(query)
results = []
for album in parsedAlbums['albums']['items']:
result = {'name':album['name'],'artist':album['artists'][0]['name'], 'id':album['id'], 'img':album['images'][0]['url']}
results.append(result)
return render(request, 'myapp/search_result.html', {'results':results, 'og_data':parsedAlbums['albums']['items']})
# data = json.dumps(parsedAlbums['albums']['items'])
# return render(request, 'myapp/search_result.html', {'data':data})
else:
form = SearchForm()
return render(request, 'myapp/search_form.html', {'form':form})
def rating_detail(request, ratingID):
rating = Rating.objects.get(id=ratingID)
return render(request, 'myApp/rating_detail.html', {'rating':rating})
# V2 Notes
# When create/edit a rating
# INPUT: Spotify URI
# IF album exists
# IF rating exists
# go straight to rating ID
# IF rating doesnt exist
# create rating
# refresh, sending new rating ID
# IF album doesnt exist
# create album
# refresh, sending album URI
# INPUT: Rating ID
# go straight to rating ID
def rating_edit(request, ratingID=None, uri=None):
# If uri is none, ratingID is not none
# This means we have a ratingID
# This means we have a rating
# View an existing rating
if uri is None:
rating = Rating.objects.get(id=ratingID)
if(request.user == rating.user):
if request.method == "POST":
print("Got a post request, rating is: " + str(rating))
form = RatingForm(request.POST, instance=rating)
if form.is_valid():
rating = form.save(commit=False)
rating.updated = timezone.now()
rating.save()
return redirect('rating_detail', ratingID=rating.pk)
print("You created this rating!")
form = RatingForm({'user':rating.user, 'album':rating.album, 'value':rating.value, 'comment':rating.comment, 'updated':rating.updated})
return render(request, 'myApp/rating_edit.html', {'form':form, 'rating':rating})
else:
print("You didn't create this rating!")
return redirect('rating_detail', ratingID=ratingID)
elif ratingID is None:
album = Album.objects.get(spotify_id=uri)
rating, created = Rating.objects.get_or_create(album=album, user=request.user)
return redirect(rating_edit, ratingID=rating.id)
# def rating_edit(request, uri):
# album = Album.objects.get(spotify_id=uri)
# rating, created = Rating.objects.get_or_create(album=album, user=request.user)
# return redirect(rating_edit, ratingID=rating.id)
# WE SHOULD NOT CREATE AN ALBUM RECORD ON VIEW!
# Need to replace the {'album': album}
# Instead of passing model to template
# V2 should pass dict to template
def album_detail(request, uri):
album, created = Album.objects.get_or_create(spotify_id=uri)
if created is True:
print("Creating a new album!")
album.name = request.GET['name']
album.artist = request.GET['artist']
album.art_url = request.GET['img']
album.save()
#Display detail view
return redirect('/album/'+uri)
#return redirect('album_detail', uri=uri)
elif created is False and len(request.GET)!=0:
print("Still getting get parameters. Need to redirect you")
return redirect("/album/"+uri+"/")
ratings = Rating.objects.filter(album=album.spotify_id)
return render(request, 'myApp/album_detail.html', {'album':album, 'ratings':ratings})
|
import vlc
import time
addr = '172.14.1.194'
url = f"rtsp://{addr}/live"
# The cameras appear to restart WiFi if they don't receive an RTCP Receiver Report regularly.
# FFMpeg and anything that depends on this don't appear to send these enough?
#Basic Recording
# cvlc rtsp://172.14.1.194/live --sout file/ts:stream.mpg
#
instance = vlc.Instance()
media = instance.media_new(url)
media.add_option("sout=file/ts:stream.mpg")
player = instance.media_player_new()
player.set_media(media)
player.play()
while 1:
continue
|
# -*- coding: utf-8 -*-
# @Author: Li Qin
# @Date: 2020-02-24 09:06:23
# @Last Modified by: Li Qin
# @Last Modified time: 2020-02-24 10:14:19
import compileall
import glob
import re
import timeit
def show(title):
print(title)
for filename in glob.glob('example/**', recursive=True):
print(f' {filename}')
print()
def compile_():
show('BEFORE')
compileall.compile_dir(
'example',
maxlevels=0)
compileall.compile_file('example/subfolder2/c.py')
show('AFTER')
compile_() |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
from ._segmentation import _ve_step, _interaction_energy
TINY = 1e-300
NITERS = 20
BETA = 0.2
VERBOSE = True
def print_(s):
if VERBOSE:
print(s)
def gauss_dist(x, mu, sigma):
return np.exp(-.5 * ((x - float(mu)) / float(sigma)) ** 2) / float(sigma)
def laplace_dist(x, mu, sigma):
return np.exp(-np.abs((x - float(mu)) / float(sigma))) / float(sigma)
def vm_step_gauss(ppm, data_masked, mask):
"""
ppm: ndarray (4d)
data_masked: ndarray (1d, masked data)
mask: 3-element tuple of 1d ndarrays (X,Y,Z)
"""
nclasses = ppm.shape[-1]
mu = np.zeros(nclasses)
sigma = np.zeros(nclasses)
prop = np.zeros(nclasses)
for i in range(nclasses):
P = ppm[..., i][mask]
Z = P.sum()
tmp = data_masked * P
mu[i] = tmp.sum() / Z
sigma[i] = np.sqrt(np.sum(tmp * data_masked) / Z - mu[i] ** 2)
prop[i] = Z / float(data_masked.size)
return mu, sigma, prop
def weighted_median(x, w, ind):
F = np.cumsum(w[ind])
f = .5 * (w.sum() + 1)
i = np.searchsorted(F, f)
if i == 0:
return x[ind[0]]
wr = (f - F[i - 1]) / (F[i] - F[i - 1])
jr = ind[i]
jl = ind[i - 1]
return wr * x[jr] + (1 - wr) * x[jl]
def vm_step_laplace(ppm, data_masked, mask):
"""
ppm: ndarray (4d)
data_masked: ndarray (1d, masked data)
mask: 3-element tuple of 1d ndarrays (X,Y,Z)
"""
nclasses = ppm.shape[-1]
mu = np.zeros(nclasses)
sigma = np.zeros(nclasses)
prop = np.zeros(nclasses)
sorted_indices = np.argsort(data_masked) # data_masked[ind] increasing
for i in range(nclasses):
P = ppm[..., i][mask]
mu[i] = weighted_median(data_masked, P, sorted_indices)
sigma[i] = np.sum(np.abs(P * (data_masked - mu[i]))) / P.sum()
prop[i] = P.sum() / float(data_masked.size)
return mu, sigma, prop
message_passing = {'mf': 0, 'icm': 1, 'bp': 2}
noise_model = {'gauss': (gauss_dist, vm_step_gauss),
'laplace': (laplace_dist, vm_step_laplace)}
class VEM(object):
"""
Classification via VEM algorithm.
"""
def __init__(self, data, labels, mask=None, noise='gauss',
ppm=None, synchronous=False, scheme='mf'):
"""
A class to represent a variational EM algorithm for tissue
classification.
Parameters
----------
data: array
Image data (n-dimensional)
labels: int or sequence
Desired number of classes, or sequence of strings
mask: sequence
Sequence of one-dimensional coordinate arrays
"""
# Labels
if hasattr(labels, '__iter__'):
self.nclasses = len(labels)
self.labels = labels
else:
self.nclasses = int(labels)
self.labels = [str(l) for l in range(self.nclasses)]
# Make default mask (required by MRF regularization) This will
# be passed to the _ve_step C-routine, which assumes a
# contiguous int array and raise an error otherwise. Voxels
# on the image borders are further rejected to avoid
# segmentation faults.
if mask == None:
XYZ = np.mgrid[[slice(1, s - 1) for s in data.shape]]
XYZ = np.reshape(XYZ, (XYZ.shape[0], np.prod(XYZ.shape[1::]))).T
XYZ = np.asarray(XYZ, dtype='int', order='C')
else:
submask = (mask[0] > 0) * (mask[0] < data.shape[0] - 1)
for i in range(1, len(mask)):
submask *= (mask[i] > 0) * (mask[i] < data.shape[i] - 1)
mask = [mask[i][submask] for i in range(len(mask))]
XYZ = np.zeros((len(mask[0]), len(mask)), dtype='int')
for i in range(len(mask)):
XYZ[:, i] = mask[i]
self._XYZ = XYZ
self.mask = tuple(XYZ.T)
# If a ppm is provided, interpret it as a prior, otherwise
# create ppm from scratch and assume flat prior.
if ppm == None:
self.ppm = np.zeros(list(data.shape) + [self.nclasses])
self.ppm[self.mask] = 1. / self.nclasses
else:
self.ppm = ppm
self.data_masked = data[self.mask]
self.prior_ext_field = self.ppm[self.mask]
self.posterior_ext_field = np.zeros([self.data_masked.size,
self.nclasses])
# Inference scheme parameters
self.synchronous = synchronous
if scheme in message_passing:
self.scheme = message_passing[scheme]
else:
raise ValueError('Unknown message passing scheme')
if noise in noise_model:
self.dist, self._vm_step = noise_model[noise]
else:
raise ValueError('Unknown noise model')
# Cache beta parameter
self._beta = BETA
# VM-step: estimate parameters
def vm_step(self):
"""
Return (mu, sigma)
"""
return self._vm_step(self.ppm, self.data_masked, self.mask)
def sort_labels(self, mu):
"""
Sort the array labels to match mean tissue intensities ``mu``.
"""
K = len(mu)
tmp = np.asarray(self.labels)
labels = np.zeros(K, dtype=tmp.dtype)
labels[np.argsort(mu)] = tmp
return list(labels)
# VE-step: update tissue probability map
def ve_step(self, mu, sigma, prop=None, beta=BETA):
"""
VE-step
"""
# Cache beta parameter
self._beta = beta
# Compute complete-data likelihood maps, replacing very small
# values for numerical stability
for i in range(self.nclasses):
self.posterior_ext_field[:, i] = self.prior_ext_field[:, i] * \
self.dist(self.data_masked, mu[i], sigma[i])
if not prop == None:
self.posterior_ext_field[:, i] *= prop[i]
self.posterior_ext_field[:] = np.maximum(self.posterior_ext_field,
TINY)
# Normalize reference probability map
if beta == 0.0 or self._XYZ.shape[1] != 3:
self.ppm[self.mask] = (self.posterior_ext_field.T /
self.posterior_ext_field.sum(1)).T
# Update and normalize reference probabibility map using
# neighborhood information (mean-field theory)
else:
print_(' ... MRF regularization')
self.ppm = _ve_step(self.ppm, self.posterior_ext_field, self._XYZ,
beta, self.synchronous, self.scheme)
def run(self, mu=None, sigma=None, prop=None, beta=BETA,
niters=NITERS, freeze_prop=True):
do_vm_step = (mu == None)
def check(x, default=0.0):
if x == None:
return default * np.ones(self.nclasses, dtype='double')
else:
return np.asarray(x, dtype='double')
mu = check(mu)
sigma = check(sigma)
prop = check(prop, default=1. / self.nclasses)
prop0 = prop
for i in range(niters):
print_('VEM iter %d/%d' % (i + 1, niters))
print_(' VM-step...')
if do_vm_step:
mu, sigma, prop = self.vm_step()
if freeze_prop: # account for label switching
prop = prop0[np.argsort(mu)]
print_(' VE-step...')
self.ve_step(mu, sigma, prop, beta=beta)
do_vm_step = True
return mu, sigma, prop
def free_energy(self):
"""
Compute the free energy defined as:
F(q, theta) = int q(x) log q(x)/p(x,y/theta) dx
associated with input parameters mu,
sigma and beta (up to an ignored constant).
"""
q = self.ppm[self.mask]
# Entropy term
f = np.sum(q * np.log(np.maximum(q / self.posterior_ext_field, TINY)))
# Interaction term
if self._beta > 0.0:
fc = _interaction_energy(self.ppm, self._XYZ)
f -= .5 * self._beta * fc
return f
|
from flask import Flask, request
from flask_cors import CORS
import json
app = Flask(__name__)
CORS(app)
explanations = [
{
'title': 'What does it mean?',
'body': 'Green vines attached to the trunk of the tree had wound themselves toward the top of the canopy. Ants used the vine as their private highway, avoiding all the creases and crags of the bark, to freely move at top speed from top to bottom or bottom to top depending on their current chore. At least this was the way it was supposed to be. Something had damaged the vine overnight halfway up the tree leaving a gap in the once pristine ant highway.',
}, {
'title': 'When to get tested?',
'body': 'He ordered his regular breakfast. Two eggs sunnyside up, hash browns, and two strips of bacon. He continued to look at the menu wondering if this would be the day he added something new. This was also part of the routine. A few seconds of hesitation to see if something else would be added to the order before demuring and saying that would be all. It was the same exact meal that he had ordered every day for the past two years.',
}
]
dds = [
{'id': '1234', 'code': '1234:icd9'},
{'id': '2345', 'code': '2345:icd9'},
{'id': '3456', 'code': '3456:icd10'},
]
indications = [
{'id': '1234', 'code': '1234:icd9', 'timeout': 1, 'deviations': 0},
{'id': '2345', 'code': '2345:icd9', 'timeout': 6, 'deviations': 1},
{'id': '3456', 'code': '3456:icd10', 'timeout': 120, 'deviations': 2},
]
numeric_data_set = {
'ranges': [
{'id': '1234', 'units': 'mg/dl', 'male_low': 50, 'male_high': 150, 'female_low': 70, 'female_high': 170},
{'id': '2345', 'units': '%', 'male_low': 5, 'male_high': 15, 'female_low': 7, 'female_high': 17},
],
'high_dds': dds,
'low_dds': dds,
'high_indications': indications,
'low_indications': indications,
}
binary_data_set = {
'is_negative_good': 'True',
'is_positive_good': 'Not Relevant',
'positive_dds': dds,
'negative_dds': dds,
'positive_indications': indications,
'negative_indications': indications,
}
lab_1234 = {
'lab_test_name': 'Trombotzitim',
'lab_test_id': '1234',
'lab_test_panel': 'Micro Major Space Cows',
'supported_panels': ['Micro Major Space Cows', 'Huge Spaceship Rocket'],
'numeric_data_set': numeric_data_set,
'binary_data_set': binary_data_set,
'explanations': explanations,
}
@app.route('/labtest/<lab_test_id>/<dataset_type>/<condition_type>/<abnormality_type>/<condition_id>', methods=['POST'])
def update_condition(
lab_test_id: str,
dataset_type: str,
condition_type: str,
abnormality_type: str,
condition_id: str,
) -> str:
"""
Examples:
/labtest/1234/binary/indication/positive/1234
/labtest/1234/numeric/differential_diagnosis/low/1234
"""
if lab_test_id != '1234':
raise NotImplementedError()
get_conditions_list = {
('binary', 'differential_diagnosis', 'positive'): lambda: binary_data_set['positive_dds'],
('binary', 'differential_diagnosis', 'negative'): lambda: binary_data_set['negative_dds'],
('binary', 'indications', 'positive'): lambda: binary_data_set['positive_indications'],
('binary', 'indications', 'negative'): lambda: binary_data_set['negative_indications'],
('numeric', 'differential_diagnosis', 'positive'): lambda: numeric_data_set['high_dds'],
('numeric', 'differential_diagnosis', 'negative'): lambda: numeric_data_set['low_dds'],
('numeric', 'indications', 'positive'): lambda: numeric_data_set['high_indications'],
('numeric', 'indications', 'negative'): lambda: numeric_data_set['low_indications'],
}
params = (dataset_type, condition_type, abnormality_type)
conditions = get_conditions_list.get(params)
if conditions is None:
raise ValueError(f"couldn't find the combination {params}")
for cond in conditions:
if cond['id'] == condition_id:
cond.update(request.form)
return
raise KeyError("couldn't find condition with id {condition_id} in combination {params}")
@app.route('/labtest/<lab_test_id>/numeric/range', methods=['POST'])
def add_range(lab_test_id: str, range_id: str) -> str:
numeric_data_set['ranges'].append({
'id': str(uuid.uuid4()),
**request.form,
})
return ''
@app.route('/labtest/<lab_test_id>/numeric/range/<range_id>', methods=['PUT'])
def update_range(lab_test_id: str, range_id: str) -> str:
if lab_test_id != '1234':
raise NotImplementedError()
for range_ in numeric_data_set['ranges']:
if range_['id'] == range_id:
range_.update(request.form)
return
raise KeyError("couldn't find range with id {range_id} in combination {params}")
@app.route('/labtest/<lab_test_id>')
def lab_test_page(lab_test_id: str) -> str:
if lab_test_id == '1234':
return json.dumps(lab_1234)
raise NotImplementedError()
if __name__ == '__main__':
app.run(port=8000, debug=True)
|
#!/usr/bin/python
# Args are:
# change id
# number
# workname
import sys
import utils
utils.recheck(sys.argv[1], sys.argv[2], workname=sys.argv[3])
|
# importing pandas
import pandas as pd
# making dataframes from lists using pandas
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
# making dictionary from above lists
my_dict= {'country': names, 'drives_right': dr, 'cars_per_cap': cpc}
row_labels = ['US', 'AUS', 'JPN', 'IN', 'RU', 'MOR', 'EG']
# making dataframe using pandas
cars = pd.DataFrame(my_dict)
cars.index= row_labels
print(cars) |
from django.shortcuts import render, get_object_or_404
from django.views import generic
from django.views.generic import FormView
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseRedirect
from .models import Post, Comment, Category
from .forms import CommentForm
class IndexView(generic.ListView):
template_name = 'blogapp/index.html'
model = Post
def get_queryset(self):
#in order to only show the most recent 5 posts:
return Post.objects.order_by('-pub_date')[:10]
class CategoryListView(generic.ListView):
template_name = 'blogapp/category.html'
model = Category
class PostDetailView(generic.DetailView):
model = Post
success_url = reverse_lazy('blogapp:commentsuccess')
def get_context_data(self, *args, **kwargs):
context = super(PostDetailView, self).get_context_data(**kwargs)
context["form"] = CommentForm
return context
def form_valid(self, form):
#add a comment
comment = Comment()
comment.person = form.cleaned_data['name']
comment.comment_text = form.cleaned_data['comment']
comment.post = form.cleaned_data['post']
comment.save()
return super(PostDetailView, self).form_valid(form)
class CategoryDetailView(generic.DetailView):
model = Category
template_name = 'blogapp/categorydetail.html'
class CommentFormView(FormView):
form_class = CommentForm
template_name = 'blogapp/commentform.html'
success_url = reverse_lazy('blogapp:commentsuccess')
#def get_context_data(self):
#context = super(PostDetailView,self).get_context_data()
#context.update({"comment_list": self.get_object().comment_set.all()})
#context.update({"comment_list": Comment.objects.filter(post__pk = self.kwargs.get('pk'))})
#return context
|
from collections import Counter
from matplotlib import pyplot as plt
import numpy as np
import cv2
class matcher:
"""
Class for finding candidate map for each image
Obtain homography matrix between an image pair based on candidate map
"""
def __init__(self, img_pts_dict, m_candidate, lowe_ratio, max_threshold):
# config setting
self.m_candidate = m_candidate
self.lowe_ratio = lowe_ratio
self.max_threshold = max_threshold
# used for extract features and feature matching
index_params = dict(algorithm=0, trees=5)
search_params = dict(checks=50)
self.surf = cv2.xfeatures2d.SURF_create()
self.flann = cv2.FlannBasedMatcher(index_params, search_params)
# compute candidate map
self.candidate_map = self.candidate_match(img_pts_dict)
def find_match(self, img_left, img_right):
# obtain all possible matches between images
matches, img_data = self.get_all_possible_match(img_left, img_right)
# obtain valid matches from possible matches
valid_matches = self.get_all_valid_match(matches)
if len(valid_matches) > 4:
pts_left = img_data['left']['kp']
pts_right = img_data['right']['kp']
match_pts_left = np.float32([pts_left[i].pt for (i, __) in valid_matches])
match_pts_right = np.float32([pts_right[i].pt for (__, i) in valid_matches])
# show feature match before and after lower concept
# homography matrix from right to left
H, s = self.compute_homography(match_pts_right, match_pts_left)
return H
return None
def get_features(self, img):
# make sure image is in format UINT-8
img = np.uint8(img)
kp, des = self.surf.detectAndCompute(img, None)
return {'des':des, 'kp':kp}
def get_all_possible_match(self, img_left, img_right):
# obtain features
data_left = self.get_features(img_left)
data_right = self.get_features(img_right)
# compute matches given two feature sets
all_matches = self.flann.knnMatch(data_right['des'], data_left['des'], k=2)
return all_matches, {'left':data_left, 'right':data_right}
def get_all_valid_match(self, all_matches):
valid_matches = []
for val in all_matches:
# get valid matche using lowe concept
if len(val) == 2 and val[0].distance < val[1].distance * self.lowe_ratio:
valid_matches.append((val[0].trainIdx, val[0].queryIdx))
return valid_matches
def compute_homography(self, pointsA, pointsB):
# compute homography matrix givin two point sets
H, status = cv2.findHomography(pointsA, pointsB, cv2.RANSAC, self.max_threshold)
return (H, status)
def candidate_match(self, img_pts_dict):
candidate_map = {}
for i in range(len(img_pts_dict)):
allimg = []
# candidate all image id
for each in img_pts_dict[i]:
allimg += each.knn_list
# mapping from image id to count
result = dict(Counter(allimg))
# sorted reversely
sorted_res = sorted(result.items(), key=lambda item: item[1], reverse=True)
# m candidate matching images
candidates = [each[0] for each in sorted_res[:self.m_candidate]]
candidate_map[i] = candidates
return candidate_map
def match_verification(self, inliers, matches):
a = 8
b = 0.3
if inliers > a + b * matches:
return True
else:
return False
|
# -*- coding: utf-8 -*-
import maya.cmds as cmds
import sys
import os
import json
import codecs
from functools import partial
#######################################
import os.path
import shutil
import datetime
import xgenm as xg
#######################################
class Tt_FileBackUp:
ExFrameLayout = ""
Tt_windowName = "Tt_FileBackUp"
In = cmds.playbackOptions(q=True,min=True)
Out = cmds.playbackOptions(q=True,max=True)
FilesPath = ""
OutDirValue = ""
OutDirButton = ""
InDirValue = ""
@classmethod
def main(self,*args):
if cmds.window(self.Tt_windowName ,exists=True):
cmds.deleteUI(self.Tt_windowName ,window=True)
MainWindow = cmds.window(self.Tt_windowName,t=self.Tt_windowName,w=450,resizeToFitChildren=True)
cmds.columnLayout()
self.ExFrameLayout = cmds.frameLayout(l="Export",backgroundColor=[0.8,0.2,0.3],w=450)
cmds.rowLayout(nc=1)
cmds.text(l=u" ■書き出し")
cmds.setParent("..")
cmds.rowLayout(nc=3)
self.OutDirValue = cmds.textField(w=410)
self.Tt_Path = cmds.workspace(q=True,rootDirectory=True)
cmds.textField( self.OutDirValue, edit=True,text=self.Tt_Path,enable=False)
self.OutDirButton = cmds.button(l="...",w=30,enable=False)
cmds.setParent("..")
cmds.rowLayout(nc=3)
self.CheckScene = cmds.checkBox(l=u"Sceneファイルも書き出す",v=True)
self.CheckMemo = cmds.checkBox(l=u"メモファイルも書き出す",v=True,cc=self.ChangeBox)
self.CheckXGen = cmds.checkBox(l=u"XGenファイルも書き出す",v=True)
cmds.setParent("..")
cmds.rowLayout(nc=1)
cmds.text(l=u" ■メモ (こちらに記載して下さい。)")
cmds.setParent("..")
cmds.rowLayout(nc=1)
self.TectBox = cmds.scrollField( editable=True, wordWrap=True, text=u'',h=90,w=440 )
cmds.setParent("..")
cmds.rowLayout(nc=1)
cmds.button(l="Export!!",w=440,h=40,backgroundColor=[0.8,0.2,0.3],c=self.BackUPPPPPP)
cmds.setParent("..")
cmds.showWindow(MainWindow)
@classmethod
def ChangeBox(self,*args):
Valie = cmds.checkBox(self.CheckMemo,q=True,v=True)
if Valie:
cmds.scrollField(self.TectBox, e=True,enable=True)
else:
cmds.scrollField(self.TectBox, e=True,enable=False)
@classmethod
def BackUPPPPPP(self,*args):
Flag = cmds.checkBox(self.CheckScene,q=True,v=True)
FlagXgen = cmds.checkBox(self.CheckXGen,q=True,v=True)
day = datetime.datetime.now()
Time = day.strftime("%m-%d_%Hh%Mm%Ss")
Scene_Name = cmds.file( query=True, sn=True).rpartition( "/" )[0]
Scene_Name_Only = cmds.file( query=True, sn=True , shn=True).partition( "." )[0]
Scene_Exten = cmds.file( query=True, sn=True , shn=True).partition( "." )[-1]
Path = Scene_Name + "/versionFolder/"
if not os.path.exists(Path):
os.makedirs(Path)
Path2 = Path + "/" + Time + "/"
if not os.path.exists(Path2):
os.makedirs(Path2)
if Flag:
cmds.file(save=True, force=True)
Rename = str(Path2)+str(Scene_Name_Only)+"_"+str(Time)+"."+Scene_Exten
Scene_Dir = cmds.file( query=True, sn=True)
shutil.copyfile(Scene_Dir, Rename)
if FlagXgen:
Scene_Name = cmds.file( query=True, sn=True).rpartition( "/" )[0]
XGenPath = Scene_Name.rpartition( "/" )[0] + "/xgen/"
if not os.path.exists(XGenPath):
os.makedirs(XGenPath)
Sel = cmds.ls(sl=True)[0]
try:
xg.exportPalette(str(Sel), str(XGenPath) + "Collection.xgen")
except:
print "NG"
RenameXgen = str(Path2)+str(Scene_Name_Only)+"_"+str(Time)+".xgen"
shutil.copyfile(str(XGenPath) + "Collection.xgen", RenameXgen)
FlagMemo = cmds.checkBox(self.CheckMemo,q=True,v=True)
if FlagMemo:
f = open(Path2 + Scene_Name_Only + "_" + Time +".txt",'w')
textVal = cmds.scrollField(self.TectBox,q=True,text=True)
WriteText = textVal.splitlines()
for i in WriteText:
f.write(i)
f.write("\r\n")
f.close()
F_BackUp = Tt_FileBackUp
F_BackUp.main() |
from copy import deepcopy
from typing import List
from typing import Dict
def shipment(items: List[Dict], drone: Dict, trip: int = 1, initial: bool = True) -> None:
if initial:
# sort items by weight from heavier to lighter
# do this only for the first function call (initial=True)
items.sort(key=lambda x: x.get('weight'), reverse=True)
print(drone.get('name'))
capacity = drone.get('capacity')
loading_weight = 0
selected_items = []
def drone_loaded() -> None:
"""
Print selected items
Check if we still have items in list and initiate next trip
:return:
"""
print(f'location {", ".join(selected_items)}')
if len(items):
shipment(items, drone, trip=trip + 1, initial=False)
print(f'trip #{trip}')
# copy items to keep original collection without changes
item_iterator = iter(deepcopy(items))
while True:
try:
item = next(item_iterator)
item_weight = item.get('weight')
# check if it's possible to add next item
if item_weight + loading_weight <= capacity:
loading_weight += item_weight
# add item to selected items
selected_items.append(item.get('name'))
# remove item from available items list
items.remove(item)
# check if item weight less then drone capacity
elif item_weight > capacity:
# if item weight > capacity remove it from the list
items.remove(item)
if loading_weight == capacity:
raise StopIteration()
except StopIteration as e:
# print current trip and initiate next
drone_loaded()
break
test_items = [{'name': 'test1', 'weight': 23},
{'name': 'test2', 'weight': 15},
{'name': 'test3', 'weight': 17}]
test_drone = {'name': 'drone 1', 'capacity': 40}
shipment(test_items, test_drone)
|
import cv2
import numpy as np
import math
import json
from skimage.feature import hog
#### Cropping functions ####
BOUNDING_BOX_FACTOR = 4/3
RESIZE_IMG_SIZE = (96, 96)
NUMBER_OF_BINS = 9
CELL_SIZE = (16, 16)
BLOCK_SIZE = (2, 2)
count = 0
def crop_image(img, x1, y1, x2, y2):
# (x1, y1) is top-left corner of where image should be cropped, relative to (0, 0). (x2, y2) is right-bottom corner.
# if rectangle is out of bounds of normal image, we add edge-padding
if (x1 < 0 or x2 > img.shape[1] or y1 < 0 or y2 > img.shape[0]):
# pad_size is size of largest distance crop box is out of bounds by
left_diff = abs(0 - x1) if x1 < 0 else 0
top_diff = abs(0 - y1) if y1 < 0 else 0
right_diff = abs(x2 - img.shape[1]) if x2 > img.shape[1] else 0
bottom_diff = abs(y2 - img.shape[0]) if y2 > img.shape[0] else 0
pad_size = max(left_diff, top_diff, right_diff, bottom_diff)
edge_padded = cv2.copyMakeBorder(img, pad_size, pad_size, pad_size, pad_size, cv2.BORDER_REPLICATE)
# top-left corner is moved
x1 = x1 + left_diff
y1 = y1 + top_diff
return edge_padded[y1:y2, x1:x2]
return img[y1:y2, x1:x2] # image subset
def bounding_box(center_x, center_y, radius_x, radius_y, with_factor=True):
bounding_factor = BOUNDING_BOX_FACTOR if with_factor else 1
x1, y1 = math.floor(center_x - (bounding_factor * radius_x)), math.floor(center_y - (bounding_factor * radius_y))
x2, y2 = math.floor(center_x + (bounding_factor * radius_x)), math.floor(center_y + (bounding_factor * radius_y))
return ((x1, y1), (x2, y2))
def resize_img_with_values(img, center_x, center_y, radius_x, radius_y):
resized_img = cv2.resize(img, RESIZE_IMG_SIZE)
x_ratio = RESIZE_IMG_SIZE[0] / img.shape[1]
y_ratio = RESIZE_IMG_SIZE[1] / img.shape[0]
resized_center_x = math.floor(x_ratio * center_x)
resized_center_y = math.floor(y_ratio * center_y)
resized_radius_x = math.floor(x_ratio * radius_x)
resized_radius_y = math.floor(y_ratio * radius_y)
return (resized_img, resized_center_x, resized_center_y, resized_radius_x, resized_radius_y)
def HOG_vector(img):
fd, _ = hog(img, orientations=NUMBER_OF_BINS, pixels_per_cell=CELL_SIZE,
cells_per_block=BLOCK_SIZE, visualize=True, multichannel=True)
return fd
def gen_postive_feature(img, center_x, center_y, radius_x, radius_y):
(x1, y1), (x2, y2) = bounding_box(center_x, center_y, radius_x, radius_y)
cropped_img = crop_image(img, x1, y1, x2, y2)
resized_img = cv2.resize(cropped_img, RESIZE_IMG_SIZE)
return HOG_vector(resized_img)
# slides 8 64x64 images around a presumably 96x96 positive sample. Scales back up to 96.
def sliding_boxes_feature_64(img):
sliding_size = (64, 64)
x_bound = img.shape[1] - sliding_size[1]
y_bound = img.shape[0] - sliding_size[0]
x_step = math.ceil(x_bound / 3)
y_step = math.ceil(y_bound / 3)
sliding_boxes = []
box_number = 0
for r in range(0, y_bound, y_step):
for c in range(0, x_bound, x_step):
if (box_number != 4):
cropped_img = img[r:r + sliding_size[1], c: c + sliding_size[0]]
resized_img = cv2.resize(cropped_img, RESIZE_IMG_SIZE)
feature_vector = HOG_vector(resized_img)
sliding_boxes.append(feature_vector)
box_number += 1
return sliding_boxes
def gen_negative_features(img, center_x, center_y, radius_x, radius_y):
global count
count += 8
return sliding_boxes_feature_64(img)
def gen_data_with_labels(face_data_arr, gen_negatives=False):
X_data = []
Y_data = []
for face_data in face_data_arr:
X_data.append(gen_postive_feature(*face_data))
Y_data.append(1)
if (gen_negatives):
X_data += gen_negative_features(*face_data)
Y_data += [0] * 8
return (X_data, Y_data)
#### File I/O functions ####
IMAGE_PATH = 'originalPics/'
FOLDER_PATH = 'FDDB-folds/'
# given ellipse text file name, location of images, returns [face data] where face data = (img, center_x, center_y, radius_x, radius_y)
def read_ellipse_text(text_file_name, image_path):
face_data_arr = []
text_file = open(text_file_name, "r")
lines = text_file.read().split('\n')[0:-1]
i = 0
img_name = ""
while (i < len(lines)):
if ("img" in lines[i]):
img_name = lines[i].strip('\n')
img_matrix = cv2.imread(image_path + img_name + ".jpg")
num_faces = int(lines[i + 1].strip('\n'))
i += 2
for j in range(i, i + num_faces):
face_data = lines[j].split(" ") # x_rad, y_rad, radians, x, y, 1
radius_y = float(face_data[0])
radius_x = float(face_data[1])
center_x = float(face_data[3])
center_y = float(face_data[4])
face_data_arr.append((img_matrix, center_x, center_y, radius_x, radius_y))
i += num_faces
return face_data_arr
# given location of text files, images, and range, returns [face data] in range where face data = (img, center_x, center_y, radius_x, radius_y)
def read_ellipse_text_in_range(interval, folder_path, image_path):
face_data_arr = []
for i in range(*interval):
text_file_name = folder_path + "FDDB-fold-0%d-ellipseList.txt" % i
if i > 9:
text_file_name = folder_path + "FDDB-fold-%d-ellipseList.txt" % i
face_data_arr += read_ellipse_text(text_file_name, image_path)
return face_data_arr
#### Create dataset using cropping + File I/O functions ####
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# Arbitrary dataset where first 4 folders (pos + neg), first 8 train, last 2 test, last 1 (pos + neg)
def create_data():
X_train, Y_train, X_test, Y_test = [], [], [], []
face_data_arr_1 = read_ellipse_text_in_range((1, 5), FOLDER_PATH, IMAGE_PATH) # positives
face_data_arr_2 = read_ellipse_text_in_range((5, 9), FOLDER_PATH, IMAGE_PATH) # positives & negatives
face_data_arr_3 = read_ellipse_text_in_range((9, 10), FOLDER_PATH, IMAGE_PATH) # positives
face_data_arr_4 = read_ellipse_text_in_range((10, 11), FOLDER_PATH, IMAGE_PATH) # positives & negatives
# face_data_arr_1 = read_ellipse_text_in_range((1, 2), FOLDER_PATH, IMAGE_PATH) # positives
# face_data_arr_2 = read_ellipse_text_in_range((5, 6), FOLDER_PATH, IMAGE_PATH) # positives & negatives
# face_data_arr_3 = read_ellipse_text_in_range((9, 10), FOLDER_PATH, IMAGE_PATH) # positives
# face_data_arr_4 = read_ellipse_text_in_range((10, 11), FOLDER_PATH, IMAGE_PATH) # positives & negatives
# TODO: Adding two tuples (l1, l2) + (l3, l4) -> (l1 + l3, l2 + l4) more concise?
X_train, Y_train = gen_data_with_labels(face_data_arr_1)
X_train_add, Y_train_add = gen_data_with_labels(face_data_arr_2, True)
X_train += X_train_add
Y_train += Y_train_add
X_test, Y_test = gen_data_with_labels(face_data_arr_3)
X_test_add, Y_test_add = gen_data_with_labels(face_data_arr_4, True)
X_test += X_test_add
Y_test += Y_test_add
return (X_train, Y_train, X_test, Y_test)
# JSON save & load
FILE_NAME = "data_full.json"
def save_dataset(filename=FILE_NAME):
X_train, Y_train, X_test, Y_test = create_data()
data_obj = {
"X_train": X_train,
"Y_train": Y_train,
"X_test": X_test,
"Y_test": Y_test
}
with open(filename, 'w') as f: # writing JSON object
json.dump(data_obj, f, cls=NumpyEncoder)
def load_data(filename=FILE_NAME):
X_train, Y_train, X_test, Y_test = [], [], [], []
with open(filename) as f:
data_dict = json.load(f)
X_train_raw = data_dict["X_train"]
Y_train_raw = data_dict["Y_train"]
X_test_raw = data_dict["X_test"]
Y_test_raw = data_dict["Y_test"]
X_train = np.array(X_train_raw)
Y_train = np.array(Y_train_raw).reshape(len(Y_train_raw), 1)
X_test = np.array(X_test_raw)
Y_test = np.array(Y_test_raw).reshape(len(Y_test_raw), 1)
return (X_train, Y_train, X_test, Y_test)
# face_data_arr_1 = read_ellipse_text_in_range((1, 5), FOLDER_PATH, IMAGE_PATH) # positives
# face_data_arr_2 = read_ellipse_text_in_range((5, 9), FOLDER_PATH, IMAGE_PATH)
# _, Y_train = gen_data_with_labels(face_data_arr_1)
# _, Y_train_add = gen_data_with_labels(face_data_arr_2, True)
# Y_train += Y_train_add
# face_data_arr_3 = read_ellipse_text_in_range((9, 10), FOLDER_PATH, IMAGE_PATH) # positives
# face_data_arr_4 = read_ellipse_text_in_range((10, 11), FOLDER_PATH, IMAGE_PATH) # positives & negatives
# _, Y_test = gen_data_with_labels(face_data_arr_3)
# _, Y_test_add = gen_data_with_labels(face_data_arr_4, True)
# Y_test += Y_test_add
# print(len(Y_test))
# print(count) |
# Create your views here.
from django.contrib.auth import login, authenticate, logout
from django.views.generic.base import View
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
class login(View):
def get(self,request):
return render(this, "login.html")
def post(self,request):
if request.user.is_authenticated:
logout()
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username,password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect("/")
res = HttpResponse("401 Unauthorized")
res.status_code = 401
return res
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 18 03:21:31 2018
@author: yahia
"""
def pancakeSort(arr, n):
Current_Stack = n
while Current_Stack > 1:
mi = arr[:Current_Stack].index(max(arr[:Current_Stack]))
if mi != Current_Stack-1:
arr[:mi+1] = arr[:mi+1][::-1]
arr[:Current_Stack] = arr[:Current_Stack][::-1]
Current_Stack -= 1
List1 = [23,432,52,25,12,0,1]
print(List1)
pancakeSort(List1,len(List1))
print(List1) |
# encoding: utf-8
from __future__ import unicode_literals, print_function
__all__ = ['gen']
from string import Template
from src.parse import parse
from src.vm import instructions, pExpr
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
prologue = Template('''#include <stdio.h>
static int memory[1024 * 1024];
static int stack[$maxstack];
static size_t sp = 0;
#define PUSH(x) stack[sp++] = (x)
#define POP() stack[--sp]
#define READ() PUSH(getchar())
#define WRITE() putchar(POP())
#define SAVE() x = POP(); memory[x] = POP()
#define LOAD() x = POP(); PUSH(memory[x])
#define ADD() y = POP(); x = POP(); PUSH(x + y)
#define SUB() y = POP(); x = POP(); PUSH(x - y)
#define EQL() y = POP(); x = POP(); PUSH(x == y)
#ifdef VERBOSE_EXECUTION
void PEXPR(char *expr) {
fprintf(stderr, "%s\\n", expr);
}
void PSTACK() {
size_t i;
for(i = 0; i < sp; ++i) {
fprintf(stderr, "%d ", stack[i]);
}
fprintf(stderr, "\\n");
}
void PNUM(int num) {
fprintf(stderr, "%4d | ", num);
PSTACK();
}
void PSYM(char *sym) {
fprintf(stderr, "%4s | ", sym);
PSTACK();
}
void PTOS() {
fprintf(stderr, " | %d\\n", stack[sp - 1]);
}
#else
#define PEXPR(x)
#define PSTACK(x)
#define PNUM(x)
#define PSYM(x)
#define PTOS(x)
#endif
int main() {
register int x, y, label = 0;
while(1) {
switch(label) {
''')
epilogue = '''default:
goto halt;
}
PTOS();
label = POP();
}
halt:
return 0;
}
'''
class Writer(object):
'''Wrap a StringIO object for easy code generation'''
def __init__(self, content=None, level=0, indentstr=' '):
self.io = StringIO()
self.level = level
self.indentstr = indentstr
if content is not None:
self.io.write(content)
def write(self, value):
'''Append indented value to output stream'''
self.io.write('{}{}'.format(self.indentstr * self.level, value))
return self
def writeln(self, line):
'''Append indented line to output stream'''
return self.write(line + '\n')
def indent(self, levels=1):
'''Increase indent level'''
self.level += levels
return self
def dedent(self, levels=1):
'''Decrease indent level'''
self.level = max(0, self.level - levels)
return self
def value(self):
'''Return complete string content'''
return self.io.getvalue()
def gen(maxstack, source):
'''Generate C code from source'''
out = Writer(prologue.substitute(maxstack=maxstack))
out.indent(3)
# Each label and expression will be made into a case
# statement in the C program's main switch statement.
for label, expression in sorted(source.items()):
out.writeln('case {}:'.format(label)).indent()
out.writeln('PEXPR("{}");'.format(pExpr(expression)))
for instruction in expression:
out.writeln(instruction.ccode())
out.writeln("break;").dedent()
return out.write(epilogue).value()
|
me0 = "test_etaCas"
import numpy as np
import scipy as sp
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator, NullLocator
import os, optparse, glob, time
from LE_Utils import filename_par, fs, set_mplrc
from LE_CSim import force_mlin
set_mplrc(fs)
##=================================================================================================
def main():
"""
Plot pdf of eta if given a file. pdf split into regions.
Adapted from test_etaPDF.py
Can be called as standalone, or plot_peta imported.
"""
me = me0+".main: "
parser = optparse.OptionParser(conflict_handler="resolve")
parser.add_option('-s','--show',
dest="showfig", default=False, action="store_true")
parser.add_option('-a','--plotall',
dest="plotall", default=False, action="store_true")
parser.add_option('--str',
dest="searchstr", default="", type="str")
parser.add_option('--nosave',
dest="nosave", default=False, action="store_true")
parser.add_option('-v','--verbose',
dest="verbose", default=False, action="store_true")
opt, args = parser.parse_args()
showfig = opt.showfig
plotall = opt.plotall
searchstr = opt.searchstr
nosave = opt.nosave
vb = opt.verbose
histfile = args[0]
fig, ax = plt.subplots(1,1)
plot_peta_CL(histfile, fig, ax, nosave)
if showfig:
plt.show()
return
##=================================================================================================
def plot_peta_CL(histfile, fig, ax, nosave=True):
"""
"""
me = me0+".plot_peta_CL: "
assert ("_CL_" in histfile) or ("_ML_" in histfile), me+"Designed for Casmir geometry."
a = filename_par(histfile, "_a")
R = filename_par(histfile, "_R")
S = filename_par(histfile, "_S")
T = filename_par(histfile, "_T")
## Space (for axes)
bins = np.load(os.path.dirname(histfile)+"/BHISBIN"+os.path.basename(histfile)[4:-4]+".npz")
xbins = bins["xbins"]
xmax = xbins[-1]
x = 0.5*(xbins[1:]+xbins[:-1])
exbins = bins["exbins"]
ex = 0.5*(exbins[1:]+exbins[:-1])
## Wall indices
Rind = np.abs(x-R).argmin()
Sind = np.abs(x-S).argmin()
Tind = np.abs(x-T).argmin()
cuspind = np.abs(x-0.5*(S+T)).argmin()
## Load histogram; normalise
H = np.load(histfile)
try: H = H.sum(axis=2)
except ValueError: pass
H /= np.trapz(np.trapz(H,ex,axis=1),x,axis=0)
## Distribution on either side of the wall: inner, outer
xin = x[:cuspind]
Hin = H[:cuspind,:]
xout = x[cuspind:]
Hout = H[cuspind:,:]
## q is probability density in eta. r is no longer relevant.
##
if "_CL_" in histfile:
qin = H[Tind]
qout = np.trapz(H[Sind:Rind+1], x[Sind:Rind+1], axis=0)
labels = ["Interior","Bulk"]
colour = ["g","b"]
elif "_ML_" in histfile:
## in is now right region and out is left region
qin = np.trapz(H[Sind:Rind+1], x[Sind:Rind+1], axis=0) if Sind!=Rind else H[Sind]
qout = np.trapz(H[x.size-Rind:Tind], x[x.size-Rind:Tind], axis=0)
labels = ["Small bulk","Large bulk"]
colour = ["b","g"]
## Normalise each individually so we can see just distrbution
qin /= np.trapz(qin, ex)
qout /= np.trapz(qout, ex)
##---------------------------------------------------------------
## PDF PLOT
ax.plot(ex, qout, colour[0]+"-", label=labels[1])
ax.fill_between(ex,0,qout,facecolor=colour[0],alpha=0.1)
ax.plot(ex, qin, colour[1]+"-", label=labels[0])
ax.fill_between(ex,0,qin,facecolor=colour[1],alpha=0.1)
## ##---------------------------------------------------------------
## ## Entire in/out region
## qIN = np.trapz(H[0:cuspind], x[0:cuspind], axis=0)
## qOUT = np.trapz(H[cuspind:], x[cuspind:], axis=0)
## ## Normalise pdf
### qIN /= np.trapz(qIN, ex)
### qOUT /= np.trapz(qOUT, ex)
## ## Normalise by size of region
### qIN /= x[cuspind]-x[0]
### qOUT /= x[-1]-x[cuspind]
## ## Plot
### ax.plot(ex, qIN, "b-", label=labels[0])
### ax.fill_between(ex,0,qIN,facecolor="blue",alpha=0.1)
### ax.plot(ex, qOUT, "g-", label=labels[1])
### ax.fill_between(ex,0,qOUT,facecolor="green",alpha=0.1)
# ## Lots of intermediate
# colours = ["r","k","b","k","grey","orange","grey","k","b"]
# linesty = ["-"]*6+["--"]*3
# for i,idx in enumerate([0,cuspind/2,cuspind,3*cuspind/2,Sind,(Sind+Rind)/2,Rind,Rind+cuspind/2,Rind+cuspind]):
# ax.plot(ex, H[idx], c=colours[i], ls=linesty[i], label="%.2f"%(x[idx]))
# ax.set_ylim(0,1.5*H[Sind].max())
## ##
## ##---------------------------------------------------------------
## Accoutrements
ax.yaxis.set_major_locator(MaxNLocator(7))
ax.set_xlabel(r"$\eta$")
ax.set_ylabel(r"$p(\eta)$")
ax.grid()
## Make legend if standalone
if not nosave:
ax.legend()
##---------------------------------------------------------------
## Plot inset
if not nosave:
if "_ML_" in histfile:
## Plot potential as inset
x = np.linspace(-R-1.5,+R+1.5,x.size)
fx = force_mlin([x,0],R,S,T)[0]
U = -sp.integrate.cumtrapz(fx, x, initial=0.0); U -= U.min()
left, bottom, width, height = [0.18, 0.63, 0.25, 0.25]
axin = fig.add_axes([left, bottom, width, height])
axin.plot(x, U, "k-")
# axin.axvspan(x[0],x[cuspind], color=lL[0].get_color(),alpha=0.2)
# axin.axvspan(x[cuspind],x[-1], color=lR[0].get_color(),alpha=0.2)
axin.set_xlim(-R-1.5, R+1.5)
# axin.set_ylim(top=2*U[cuspind])
axin.xaxis.set_major_locator(NullLocator())
axin.yaxis.set_major_locator(NullLocator())
axin.set_xlabel(r"$x$", fontsize = fs["fsa"]-5)
axin.set_ylabel(r"$U$", fontsize = fs["fsa"]-5)
##---------------------------------------------------------------
# fig.suptitle(r"PDF of $\eta$, divided into regions. $\alpha="+str(a)+"$, $R="+str(R)+"$, $S="+str(S)+"$")
if not nosave:
plotfile = os.path.dirname(histfile)+"/PDFeta"+os.path.basename(histfile)[4:-4]+".pdf"
fig.savefig(plotfile)
print me+"Figure saved to",plotfile
return
##=================================================================================================
##=================================================================================================
if __name__=="__main__":
main()
|
#!/usr/bin/python3
import random
class pageInfo:
def __init__(self, k):
self.hist = [0] * k
self.last = 0
self.CRP = 0;
def getHist(self):
return self.hist
def getLast(self):
return self.last
def getCRP(self):
return self.CRP
def updateCRP(self, value):
self.CRP = value
def updateHist(self, value, pos):
self.hist[pos] = value
def updateLast(self, value):
self.last = value
class LRUKRP:
#initialize the object
#bufferSize: number of pages in memory
#k: LRU-K algorithm parameter
#CRP:correlated reference period
#RIP:retained information period
def __init__(self, bufferSize, k, CRP, RIP):
self.buffer = [0] * bufferSize
self.pageinfo = {}
self.k = k
self.CRP = CRP
self.RIP = RIP
self.t = 0
self.hits = 0
self.requests = 0
def clearStats(self):
self.hits = 0
self.requests = 0
def requestPage(self, p):
self.t += 1
self.requests += 1
page = str(p)
#if this disk page is referenced for the first time, we create a history for the disk page
if page not in self.pageinfo:
self.pageinfo[page] = pageInfo(self.k)
if p in self.buffer:
self.hits += 1
if self.pageinfo[page].getLast() != 0 and self.t - self.pageinfo[page].getLast() > self.CRP:
if self.pageinfo[page].getHist()[0] == 0:
self.pageinfo[page].updateHist(self.t, 0)
else:
self.pageinfo[page].updateCRP(self.pageinfo[page].getLast() - self.pageinfo[page].getHist()[0])
pageObj = self.pageinfo[page]
for i in range(1, self.k):
pageObj.updateHist(pageObj.getHist()[self.k-i-1]+ pageObj.getCRP(), self.k-i)
pageObj.updateHist(self.t, 0)
self.pageinfo[page].updateLast(self.t)
#p is not in buffer and buffer is full
else:
#if there is space in buffer, put it into free space, else select a victim
if 0 in self.buffer:
self.buffer[self.buffer.index(0)] = p
self.requests -= 1
else:
min = self.t
victim = self.buffer[0]
for q in self.buffer:
page = self.pageinfo[str(q)]
if self.t - page.getLast() > self.CRP and page.getHist()[self.k-1] < min:
victim = q
min = page.getHist()[self.k-1]
self.buffer[self.buffer.index(victim)] = p
#now fetch p into the buffer frame previously held by victim
pageObj = self.pageinfo[str(p)]
if pageObj.getHist()[0] == 0:
pass
else:
for i in range(1, self.k):
pageObj.updateHist(pageObj.getHist()[self.k-i-1], self.k-i)
pageObj.updateHist(self.t, 0)
pageObj.updateLast(self.t)
def getHitRatio(self):
return float(self.hits) / self.requests
def main():
#generate a reference string
R = []
random.seed(20)
for i in range(100000):
m = random.randint(1,100)
if m < 90:
R.append(random.randint(1,100))
else:
R.append(random.randint(101, 1000))
bufManager = LRUKRP(50, 2, 30, 10000)
#print(bufManager.buffer)
for p in R:
bufManager.requestPage(p)
#bufManager.buffer.sort()
#print(bufManager.buffer)
print(bufManager.buffer)
print("number of hits: ", bufManager.hits)
print("number of requests: ", bufManager.requests)
print("hit ratio: ", bufManager.hits/bufManager.requests)
if __name__ == "__main__":main()
|
def swap(brickor, i, d):
temp = inverted([brickor.pop(i) for _ in range(2)])
return brickor + temp if d else temp + brickor
def unswap(brickor, i, d):
for b in reversed(inverted([brickor.pop(p if d else 0) for p in range(-2, 0)])):
brickor.insert(i, b)
return brickor
def inverted(r):
return ['S' if b == 'V' else 'V' for b in r]
def solve(brickor, depth, history):
if brickor in history['state']:
i = history['state'].index(brickor)
if depth >= history['depth'][i]:
return brickor
history['depth'][i] = depth
else:
history['state'].append(brickor.copy())
history['depth'].append(depth)
for a, (i, j) in enumerate(zip(brickor[:-1], brickor[1:])):
if 'S' in (i, j):
for direction in (0, 1):
brickor = swap(brickor, a, direction)
depth += 1
brickor = solve(brickor, depth, history)
depth -= 1
brickor = unswap(brickor, a, direction)
return brickor
brickor = list(input('Brickor ? '))
history = {'state': [], 'depth': []}
solve(brickor, 0, history)
print('Minsta antal drag:', history['depth'][history['state'].index(sorted(history['state'])[-1])]) |
#!/usr/bin/python3
"""
This module defines save_to_json_file function.
"""
import json
def save_to_json_file(my_obj, filename):
"""
writes an Object to a text file, using a JSON representation
"""
with open(filename, "w") as myfile:
json.dump(my_obj, myfile)
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
def line_seg(path,filename):
# Image sectioning
image = cv2.imread(path+'/'+filename)
height, width = image.shape[:2]
image.shape
# Let's get the starting pixel coordiantes (top left of cropped bottom)
start_row, start_col = int(height * .3), int(0)
# Let's get the ending pixel coordinates (bottom right of cropped bottom)
end_row, end_col = int(height * .7), int(width)
cropped_img = image[start_row:end_row , start_col:end_col]
cv2.imwrite(path+"/sectioned_image.png", cropped_img)
# Preproces the sectioned image
src_img = cv2.imread(path+"/sectioned_image.png")
copy = src_img.copy()
#(1009, 2559, 3) = (742,1320)
#(360, 1200, 3)
height = src_img.shape[0]
width = src_img.shape[1]
print("\n Resizing Image........")
src_img = cv2.resize(copy, dsize =(1320, 742), interpolation = cv2.INTER_AREA)
#src_img = cv2.resize(copy, dsize =(1320, 742), interpolation = cv2.INTER_AREA)
#src_img = cv2.resize(copy, dsize =(700, 650), interpolation = cv2.INTER_AREA)
src_img.shape
height = src_img.shape[0]
width = src_img.shape[1]
print("#---------Image Info:--------#")
print("\tHeight =",height,"\n\tWidth =",width)
print("#----------------------------#")
grey_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
print("Applying Adaptive Threshold with kernel :- 21 X 21")
bin_img = cv2.adaptiveThreshold(grey_img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,21,20)
bin_img1 = bin_img.copy()
bin_img2 = bin_img.copy()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
kernel1 = np.array([[1,0,1],[0,1,0],[1,0,1]], dtype = np.uint8)
print("Noise Removal From Image.........")
final_thr = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
contr_retrival = final_thr.copy()
print("Beginning Character Semenation..............")
count_x = np.zeros(shape= (height))
for y in range(height):
for x in range(width):
if bin_img[y][x] == 255 :
count_x[y] = count_x[y]+1
print("Beginning Line Segmenation..............")
# Find the empty white lines and text lines
text_lines = []
empty_lines = []
for i,x in enumerate(count_x):
if x > 0:
text_lines.append(i)
elif x == 0:
empty_lines.append(i)
# Find the start position of the each line
upperlines = []
for i, val in enumerate(text_lines):
if val != text_lines[i-1]+1:
upperlines.append(val)
# Find the end position of the each line
lowerlines = []
for idx, v in enumerate(empty_lines):
if v != empty_lines[idx-1]+1:
lowerlines.append(v)
# Delete any end position value greater than start position for minute pixel ranges
if upperlines[0] > lowerlines[0]:
del lowerlines[0]
for i, j in zip(upperlines, lowerlines):
if j-i <= 10:
upperlines.remove(i)
lowerlines.remove(j)
print(" Start and end position of each lines : ", upperlines, lowerlines)
# print(upperlines, lowerlines)
if len(upperlines)==len(lowerlines):
lines = []
for y in upperlines:
final_thr[y][:] = 255
for y in lowerlines:
final_thr[y][:] = 255
for y in range(len(upperlines)):
lines.append((upperlines[y], lowerlines[y]))
else:
print("Too much noise in image, unable to process.\nPlease try with another image. Ctrl-C to exit:- ")
exit()
lines = np.array(lines)
no_of_lines = len(lines)
print("\nGiven Text has # ",no_of_lines, " # no. of lines")
lines_img = []
for i in range(no_of_lines):
lines_img.append(bin_img2[lines[i][0]:lines[i][1], :])
#-------------Letter Width Calculation--------#
contours, hierarchy = cv2.findContours(contr_retrival,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
final_contr = np.zeros((final_thr.shape[0],final_thr.shape[1],3), dtype = np.uint8)
cv2.drawContours(src_img, contours, -1, (0,255,0), 1)
def letter_width(contours):
letter_width_sum = 0
count = 0
for cnt in contours:
if cv2.contourArea(cnt) > 20:
x,y,w,h = cv2.boundingRect(cnt)
letter_width_sum += w
count += 1
return letter_width_sum/count
mean_lttr_width = letter_width(contours)
print("\nAverage Width of Each Letter:- ", mean_lttr_width)
# Save the lines
for idx, val in enumerate(lines_img):
cv2.imwrite(path+f'/lines{idx}.png',255-lines_img[idx]) |
from tests.integration.api_test_suite import APITestSuite
class TestITRegistrationAPI(APITestSuite):
async def test_register_a_new_user(self):
response = await self.register_user()
self.assertEqual(201, response.status)
self.assertEqual(self.JSON, response.content_type)
body = await response.json()
self.assertRegex(body.get("id"), self.UUID_PATTERN)
self.assertEqual("Lucy", body.get("username"))
self.assertEqual("About Lucy", body.get("about"))
async def test_register_a_duplicate_user(self):
await self.register_user()
response = await self.register_user()
self.assertEqual(400, response.status)
self.assertEqual("Username already in use.", await response.text())
async def register_user(self):
response = await self.client.post("/users", json=dict(
username="Lucy",
password="alki342d",
about="About Lucy"))
return response
|
# def test(N):
# if N == 0:
# return -1
# result = [_ for _ in xrange(10)]
# for i in xrange(1, 10001):
# k = N * i
# m = str(k)
# for s in m:
# temp = int(s)
# if temp in result:
# result.remove(int(s))
# if len(result) == 0:
# return k
# return -1
# print test(1)
# print test(5)
# print test(15)
# print test(19)
# def test(str_):
# l = len(str_)
# d = [0] * l
# e = [0] * l
# for i in xrange(l):
# if i == 0:
# if str_[i] == '+':
# d[i] = 0
# e[i] = 1
# else:
# d[i] = 1
# e[i] = 0
# else:
# if str_[i] == '+':
# d[i] = min(d[i - 1], e[i - 1] + 1)
# e[i] = min(e[i - 1] + 2, d[i - 1] + 1)
# else:
# d[i] = min(e[i - 1] + 1, d[i - 1] + 2)
# e[i] = min(e[i - 1], d[i - 1] + 1)
# return d[l - 1]
import math
def is_prime(n):
if n % 2 == 0 and n > 2:
return 2
for i in range(3, int(math.sqrt(n)) + 1, 2):
if n % i == 0:
return i
return 1
def convert_to_decmial(str_, base):
r = 0
for i in xrange(len(str_) - 1, -1, -1):
if str_[i] == '1':
r += math.pow(base, len(str_) - 1 - i)
return r
def test(N, J):
count = 0
for num in xrange(2 ** (N - 1) + 1, 2 ** N, 2):
if count >= J:
break
if num > 3:
result = ""
str_ = bin(num)[2:].zfill(N)
result += str_ + " "
# print str_
b = True
for base in xrange(2, 11):
k = convert_to_decmial(str_, base)
# print k
d = is_prime(k)
if d > 1:
result += str(d) + " "
else:
b = False
continue
if b:
print result
count += 1
# test(6, 3)
# print test("--+-") # 3
# print test("+++") # 0
# print test("+-") # 2
# print test("+") # 0
# print test("-") # 1
if __name__ == "__main__":
testcases = input()
for caseNr in xrange(1, testcases + 1):
N, J = raw_input().split()
# print("Case #%i: %i" % (caseNr, k))
print("Case #1:")
k = test(int(N), int(J))
|
#!usr/bin/env python
# -*- coding : utf-8 -*-
# 本课概要
# • 浏览器伪装技术原理
# • 浏览器伪装技术实战
# 浏览器伪装技术原理:在 header中加入user-agent伪装成为浏览器
# 我们可以试试爬取csdn博客,我们发现会返回403,因为对方服务器会
# 对爬虫进行屏蔽。此时,我们需要伪装成浏览器才能爬取。
# 浏览器伪装我们一般通过报头进行,接下来我们通过实战分析一下
# 浏览器伪装技术实战
# 由于urlopen()对于一些HTTP的高级功能不支持,所以,我们如果要修
# 改报头,可以使用urllib.request.build_opener()进行,当然,也可以
# 使用urllib.request.Request()下的add_header()实现浏览器的模拟。
# 我们重点讲前者方法,后者方法是否掌握无所谓,有兴趣并有时间的同
# 学可以自行研究第2种方法,接下来通过实战讲解
import urllib.request
url="https://blog.csdn.net"
#headers后面
headers=("user-agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36")
#添加一个报头信息
opener=urllib.request.build_opener()
opener.addheaders =[headers]
#将oppner设为去全局
urllib.request.install_opener(opener)
data=urllib.request.urlopen(url).read()
print(data)
file=open("F:/伪装浏览器.html","w+")
file.write(str(data))
file.close()
import urllib.request
url="https://blog.csdn.net"
#headers后面
headers=("user-agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36")
#添加一个报头信息
opener=urllib.request.build_opener()
opener.addheaders =[headers]
data=opener.open(url).read()
print(data)
file=open("F:/伪装浏览器oppner.html","w+")
file.write(str(data))
file.close()
# #导入urllib.request模块
# import urllib.request
# url="https://blog.csdn.net"
# #设置请求头
# headers=("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0")
# #创建一个opener
# opener=urllib.request.build_opener()
# #将headers添加到opener中
# opener.addheaders=[headers]
# #将opener安装为全局
# urllib.request.install_opener(opener)
# #用urlopen打开网页
# data=urllib.request.urlopen(url).read().decode('gbk','ignore')
# print(data)
#
# file=open("F:/liulanqi.html","w+")
# file.write(str(data))
# file.close()
|
import json
import io
from Ironscales import fetch_incidents
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_ironscales_fetch_incident(mocker):
incidents_mocked_data = util_load_json('test_data/test_get_open_incidents.json')
last_run = {"data": [0]}
mocked_client = mocker.Mock()
mocked_client.get_open_incidents.return_value = {"incident_ids": [0, 1]}
mocked_client.get_incident.return_value = incidents_mocked_data
result = fetch_incidents(mocked_client, last_run)
result_to_compare = [
{
"name": "Ironscales incident: IS-1",
"occurred": "2021-07-06T06:34:00.070Z",
"rawJSON": json.dumps(incidents_mocked_data),
}
]
assert result == ([0, 1], result_to_compare)
|
# make sure pickle is imported
import pickle
class Dataset(object):
"""An abstract class representing a Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __add__(self, other):
return ConcatDataset([self, other])
class Dataset_sino(Dataset):
def __init__(self, file_path_x, file_path_y, transform=None):
print('Loading Data....')
with open(file_path_x, 'rb') as x:
X = pickle.load(x)
print('X loaded.')
with open(file_path_y, 'rb') as y:
Y = pickle.load(y)
print('Y loaded.\n')
self.data_x = X
self.data_y = Y
self.transform = transform
def __len__(self):
return len(self.data_x)
def __getitem__(self, index):
# load image as ndarray type (Height * Width * Channels)
# be carefull for converting dtype to np.uint8 [Unsigned integer (0 to 255)]
# in this example, i don't use ToTensor() method of torchvision.transforms
# so you can convert numpy ndarray shape to tensor in PyTorch (H, W, C) --> (C, H, W)
train_image = self.data_x[index, :, : ,:]
test_image = self.data_y[index, :, :, :]
if self.transform is not None:
train_image = self.transform(train_image)
test_image = self.transform(test_image)
return train_image, test_image |
class Solution(object):
def majorityElement(self, nums):
"""
给定一个大小为 n 的数组,找到其中的众数。众数是指在数组中出现次数大于 ⌊ n/2 ⌋ 的元素。
你可以假设数组是非空的,并且给定的数组总是存在众数
---
:type nums: List[int]
:rtype: int
"""
dict = {}
n = len(nums)
flg = n//2
i = 0
while i < n:
if nums[i] in dict:
dict[nums[i]] += 1
if dict[nums[i]] > flg:
return nums[i]
else:
dict[nums[i]] = 1
i += 1
return nums[0]
a = Solution()
print(a.majorityElement([1]))
|
# coding: utf-8
from django.conf import settings
from django.db import models
from django.db.models import signals
from django import dispatch
from pg_fts.fields import TSVectorField
from market.core import models as core_models
class VendorSearch(models.Model):
"""Model pointing to the original searched model."""
link = models.OneToOneField('market.Vendor', on_delete=models.CASCADE)
name = models.CharField(max_length=100)
motto = models.CharField(max_length=255)
description = models.TextField(null=True)
fts_name = TSVectorField(('name', ), dictionary=settings.PG_FT_LANGUAGE)
fts = TSVectorField(
(('name', 'A'), ('motto', 'B'), ('description', 'C')),
dictionary=settings.PG_FT_LANGUAGE
)
class Meta:
"""Define mandatory app_label."""
app_label = 'search'
required_db_vendor = 'postgresql'
required_db_features = ['tsearch2']
@dispatch.receiver(signals.post_save, sender=core_models.Vendor)
def update_vendor(sender, instance, raw, created, using, update_fields, **kwargs):
"""Sync search fields with original model."""
pass
class ProductSearch(models.Model):
"""Model pointing to the original searched model."""
link = models.OneToOneField('market.Product', on_delete=models.CASCADE)
name = models.CharField(max_length=255)
description = models.TextField()
extra = models.TextField(null=True)
fts_name = TSVectorField(('name', ), dictionary=settings.PG_FT_LANGUAGE)
fts = TSVectorField(
(('name', 'A'), ('description', 'B'), ('extra', 'D')),
dictionary=settings.PG_FT_LANGUAGE
)
class Meta:
"""Define mandatory app_label."""
app_label = 'search'
required_db_vendor = 'postgresql'
required_db_features = ['tsearch2']
class ManufacturerSearch(models.Model):
"""Model pointing to the original searched model."""
link = models.OneToOneField('market.Manufacturer', on_delete=models.CASCADE)
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
fts_name = TSVectorField(('name', ), dictionary=settings.PG_FT_LANGUAGE)
fts = TSVectorField(
(('name', 'A'), ('description', 'B')),
dictionary=settings.PG_FT_LANGUAGE
)
class Meta:
"""Define mandatory app_label."""
app_label = 'search'
required_db_vendor = 'postgresql'
required_db_features = ['tsearch2']
|
import os
import tensorflow as tf
import numpy as np
class TextFiles(tf.data.Dataset):
def __init__(self, directory, glob, shuffle=True):
self._directory = os.path.realpath(directory)
self._glob = glob
dataset = tf.data.Dataset.list_files(os.path.join(self._directory, self._glob),
shuffle=shuffle)
dataset = dataset.map(self.load_content)
self._dataset = dataset
@property
def output_classes(self):
return self._dataset.output_classes
@property
def output_shapes(self):
return self._dataset.output_shapes
@property
def output_types(self):
return self._dataset.output_types
def _inputs(self):
return []
def _as_variant_tensor(self):
return self._dataset._as_variant_tensor()
def load_content(self, txt_path):
content = tf.read_file(txt_path)
feature = {
'path': txt_path,
'content': content
}
return feature
def text_files(directory, **kwargs):
return TextFiles(directory, **kwargs)
|
import sys
number1 = int(input("Enter the first number: "))
number2 = int(input("Enter the second number: "))
try:
print(number1/number2)
except ZeroDivisionError:
print("You can not divide a by zero!!!!")
except OverflowError:
print("You can not divide by a number so small")
except ValueError:
print("The entries are wrong!!!!!!!")
def getInt(prompt):
while True:
try:
number = int(input(prompt))
return number
except ValueError:
print("Invalid number entered, please try again")
except EOFError:
sys.exit(0)
finally:
print("The finally clause always execute")
first_number = getInt("Please enter first number")
second_number = getInt("Please enter second number")
try:
print("{} divided by {} is {}".format(first_number, second_number, first_number / second_number))
except ZeroDivisionError:
print("You can not divide by zero") |
from django.conf import settings
# Mailing
from django.contrib import messages
from django.core.mail import send_mail, EmailMessage
# Vistas
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect, HttpResponseRedirect, get_object_or_404, get_list_or_404
from django.views.generic import ListView, DetailView, View
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.decorators.http import require_GET, require_POST
# Transbank
#from django.shortcuts import render
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import action
import os
from django.utils import timezone
from datetime import datetime
import random
import tbk
# Proyecto
from .forms import CheckoutForm, CouponForm, RefundForm, PaymentForm, PerfilForm, UserProfileForm, TokenForm
from .models import Item, OrderItem, Order, Payment, Coupon, Used_coupon, Refund, UserProfile, Category, Order_buy, Perfil, Token_tbk, Favorite
from .filters import ItemFilter
from django.db.models import Count
# Paginator
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
#Stripe
#import random
import string
import stripe
stripe.api_key = settings.STRIPE_SECRET_KEY
# Create your views here.
def create_ref_code():
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))
def is_valid_form(values):
valid = True
for field in values:
if field =='':
valid = False
return valid
def Preciominmax(request):
if request.method == "POST":
preciominimo = request.POST.get('preciominimo')
preciomaximo = request.POST.get('preciomaximo')
resultado = Item.objects.raw('select id_item, title, price from core_item where price between "'+preciominimo+'" and "'+preciomaximo+'" ')
return render(request, 'shop.html', {'data':resultado})
else:
itemss = Item.objects.all()
return render(request, 'preciominmax.html', {'data':itemss})
class HomeView(ListView):
template_name = "home.html"
queryset = Item.objects.filter(is_active=True)
context_object_name = 'items'
def get(self, *args, **kwargs):
category = Category.objects.filter(is_active=True, in_home=True).order_by('orden')[:9]
item = Item.objects.filter( is_active=True)
context = {
'object_list': item,
'category_list': category,
}
return render(self.request, "home.html", context)
class ShopView(ListView):
model = Item
queryset = Item.objects.filter(is_active=True)
template_name = "shop.html"
paginate_by = 15
filterset_class = ItemFilter
def get_paginate_by(self, queryset):
return self.request.GET.get('paginate_by', self.paginate_by)
def get_context_data(self, **kwargs):
object_list = self.get_queryset()
user_filter = ItemFilter(self.request.GET, queryset=object_list)
context = super().get_context_data(object_list=user_filter.qs, user_filter=user_filter, **kwargs)
context['filter'] = ItemFilter(self.request.GET, queryset=self.get_queryset())
context['result'] = self.get_queryset().count()
context['all_items'] = str(Item.objects.filter(is_active=True).count())
context["itemcount"] = self.request.GET.get('paginate_by', self.paginate_by)
return context
class ItemDetailView(DetailView):
model = Item
template_name = "product.html"
def get_context_data(self, *args, **kwargs):
context = super(ItemDetailView, self).get_context_data(*args, **kwargs)
context['object_list'] = Item.objects.all()[:8]
context['orderitem'] = OrderItem.objects.filter()
context['object_list_slug'] = Category.objects.all()
context['order'] = Order.objects.filter()
return context
class ItemListView(ListView):
model = Item
template_name = "product_related.html"
class OrderSummaryView(LoginRequiredMixin, View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
item = Item.objects.filter( is_active=True)
context = {
'object': order,
'item': item,
'couponform': CouponForm(),
}
return render(self.request, 'order_summary.html', context)
except ObjectDoesNotExist:
messages.warning(self.request, 'No tienes un pedido activo.')
return redirect("/")
class CategoryView(ListView):
model = Item
category = Category.objects.all()
queryset = Item.objects.filter(category=category, is_active=True)
template_name = "category.html"
paginate_by = 12
filterset_class = ItemFilter
def get_paginate_by(self, queryset):
return self.request.GET.get('paginate_by', self.paginate_by)
def get_context_data(self, **kwargs):
category = Category.objects.get(slug=self.kwargs['slug'])
object_list = Item.objects.filter(category=category, is_active=True)
user_filter = ItemFilter(self.request.GET, queryset=object_list)
context = super().get_context_data(object_list=user_filter.qs, user_filter=user_filter, **kwargs)
context['filter'] = ItemFilter(self.request.GET, queryset=self.get_queryset())
context['all_items'] = str(Item.objects.filter(category=category, is_active=True).count())
context["itemcount"] = self.request.GET.get('paginate_by', self.paginate_by)
context["category_title"] = category = Category.objects.get(slug=self.kwargs['slug'])
return context
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
@method_decorator(login_required, name='dispatch')
class CheckoutView(View):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
order_item = OrderItem.objects.filter(user=self.request.user, ordered=False)
perfil_account = Perfil.objects.filter(user=self.request.user).last()
profile = PerfilForm()
context = {
'profile': PerfilForm(),
'couponform': CouponForm(),
'order': order,
'order_item':order_item,
'DISPLAY_COUPON_FORM': True,
'perfil_account' : perfil_account,
}
return render(self.request, "checkout.html", context)
except ObjectDoesNotExist:
messages.info(self.request, "No tiene una orden activa.")
return redirect('/')
def post(self, *args, **kwargs):
profile = PerfilForm(self.request.POST or None)
if profile.is_valid():
profile = profile.save(commit=False)
profile.user = self.request.user
profile.save()
profile = PerfilForm()
return redirect('/integracion_tbk/normal_index/')
messages.warning(self.request, "Formulario incompleto")
return redirect('core:checkout')
context = {
'profile':profile,
}
return render(self.request,'checkout.html',context)
def init(request):
template_name = 'integracion_tbk/base.html'
return render(request,template_name)
class PaymentView(View):
def get(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
if order.billing_address:
context = {
'order': order,
'DISPLAY_COUPON_FORM': False
}
userprofile = self.request.user.userprofile
if userprofile.one_click_purchasing:
# fetch the users card list
cards = stripe.Customer.list_sources(
userprofile.stripe_customer_id,
limit=3,
object='card'
)
card_list = cards['data']
if len(card_list) > 0:
# update the context with the default card
context.update({
'card': card_list[0]
})
return render(self.request, "payment.html", context)
else:
messages.warning(
self.request, "No tienes una dirección de PAGO.")
return redirect("core:checkout")
def post(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
form = PaymentForm(self.request.POST)
userprofile = UserProfile.objects.get(user=self.request.user)
if form.is_valid():
token = form.cleaned_data.get('stripeToken')
save = form.cleaned_data.get('save')
use_default = form.cleaned_data.get('use_default')
if save:
if userprofile.stripe_customer_id != '' and userprofile.stripe_customer_id is not None:
customer = stripe.Customer.retrieve(
userprofile.stripe_customer_id)
customer.sources.create(source=token)
else:
customer = stripe.Customer.create(
email=self.request.user.email,
)
customer.sources.create(source=token)
userprofile.stripe_customer_id = customer['id']
userprofile.one_click_purchasing = True
userprofile.save()
amount = int(order.get_total() * 100)
try:
if use_default or save:
# charge the customer because we cannot charge the token more than once
charge = stripe.Charge.create(
amount=amount, # cents
currency="usd",
customer=userprofile.stripe_customer_id
)
else:
# charge once off on the token
charge = stripe.Charge.create(
amount=amount, # cents
currency="usd",
source=token
)
# create the payment
payment = Payment()
payment.stripe_charge_id = charge['id']
payment.user = self.request.user
payment.amount = order.get_total()
payment.save()
# assign the payment to the order
order_items = order.items.all()
order_items.update(ordered=True)
for item in order_items:
item.save()
order.ordered = True
order.payment = payment
order.buy_order = create_ref_code()
order.save()
'''
msg = "Usuario:{} Descuento:{} Total:{} Code:{}".format(order.user, order.get_discount_order_total(), order.get_total(), order.ref_code)
email = EmailMessage('Hello',
msg,
settings.DEFAULT_FROM_EMAIL,
[self.request.user.email],
)
email.send(fail_silently=True)
'''
messages.success(self.request, "Su orden fue exitosa!.")
return redirect("/")
except stripe.error.CardError as e:
body = e.json_body
err = body.get('error', {})
messages.warning(self.request, f"{err.get('message')}")
return redirect("/")
except stripe.error.RateLimitError as e:
# Too many requests made to the API too quickly
messages.warning(self.request, "Rate limit error")
return redirect("/")
except stripe.error.InvalidRequestError as e:
# Invalid parameters were supplied to Stripe's API
print(e)
messages.warning(self.request, "Invalid parameters")
return redirect("/")
except stripe.error.AuthenticationError as e:
# Authentication with Stripe's API failed
# (maybe you changed API keys recently)
messages.warning(self.request, "Not authenticated")
return redirect("/")
except stripe.error.APIConnectionError as e:
# Network communication with Stripe failed
messages.warning(self.request, "Network error")
return redirect("/")
except stripe.error.StripeError as e:
# Display a very generic error to the user, and maybe send
# yourself an email
messages.warning(
self.request, "Something went wrong. You were not charged. Please try again.")
return redirect("/")
except Exception as e:
# send an email to ourselves
messages.warning(
self.request, "A serious error occurred. We have been notifed.")
return redirect("/")
messages.warning(self.request, "Invalid data received")
return redirect("/payment/stripe/")
@login_required
def add_to_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(
item=item,
user=request.user,
ordered=False
)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item.quantity += 1
order_item.save()
messages.success(request,"Ítem: " + item.title + " fue actualizado (+1)")
return redirect("core:order_summary")
else:
order.items.add(order_item)
messages.success(request, "Ítem: " + item.title + " fue agregado a su carrito")
return redirect("core:order_summary")
else:
ordered_date = timezone.now()
order = Order.objects.create(
user=request.user, ordered_date=ordered_date)
order.items.add(order_item)
messages.success(request,"Ítem: " + item.title + " fue actualizado (+1)")
return redirect("core:order_summary")
@login_required
def remove_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered=False
)[0]
order_item.quantity -= 1
order_item.save()
order.items.remove(order_item)
order_item.delete()
messages.info(request,"Ítem: " + item.title + " fue eliminado (x)")
return redirect("core:order_summary")
else:
messages.info(request, "Este artículo no estaba en tu carrito.")
return redirect("core:product", slug=slug)
else:
messages.info(request, "No tienes un pedido activo.")
return redirect("core:product", slug=slug)
@login_required
def remove_single_item_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_qs = Order.objects.filter(
user=request.user,
ordered=False
)
if order_qs.exists():
order = order_qs[0]
# check if the order item is in the order
if order.items.filter(item__slug=item.slug).exists():
order_item = OrderItem.objects.filter(
item=item,
user=request.user,
ordered=False
)[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
messages.info(request,"Ítem: " + item.title + " fue actualizado (-1)")
return redirect('core:order_summary')
else:
messages.info(request, "Este artículo no estaba en tu carrito.")
return redirect('core:product', slug=slug)
else:
messages.info(request, "No tienes un pedido activo.")
return redirect('core:product', slug=slug)
'''
@login_required(login_url='login')
def get_coupon(request, code):
now = timezone.now()
coupon = Coupon.objects.get(code=code,
valid_from__lte=now,
valid_to__gte=now,
coupon_active=True)
add_used = Used_coupon(user=request.user,code=code)
add_used.save()
return coupon
'''
@login_required
def get_coupon(request, code):
now = timezone.now()
coupon = Coupon.objects.get(code=code,
valid_from__lte=now,
valid_to__gte=now,
coupon_active=True)
add_used = Used_coupon(user=request.user,code=code)
add_used.save()
return coupon
class AddCouponView(View):
def post(self, *args, **kwargs):
form = CouponForm(self.request.POST or None)
if form.is_valid():
code = form.cleaned_data.get('code')
try:
order = Order.objects.get(user=self.request.user, ordered=False)
coupon = Coupon.objects.get(code=code)
if Coupon.objects.all().filter(code=code).exists():
coupon = Coupon.objects.get(code=code)
if coupon.coupon_active :
if Used_coupon.objects.all().filter(user=self.request.user,code=code).exists():
messages.info(self.request,"Error: El cupón "+ coupon.code +" ya ha sido utilizado")
return redirect('core:checkout')
else:
order.coupon = get_coupon(self.request, code)
order.save()
messages.success(self.request, 'Su cupón ' +coupon.code +' ha sido aplicado exitosamente!')
return redirect('core:checkout')
else:
messages.error(self.request,"Error: El cupón "+ coupon.code +" expiró ")
return redirect('core:checkout')
else:
messages.error(self.request," Este cupón no existe ")
return redirect('core:checkout')
except ObjectDoesNotExist:
messages.info(self.request, 'Este cupón no existe')
return redirect('core:checkout')
class RequestRefundView(View):
def get(self, *args, **kwargs):
form = RefundForm()
context = {
'form': form
}
return render(self.request, "request_refund.html", context)
def post(self, *args, **kwargs):
form = RefundForm(self.request.POST)
if form.is_valid():
buy_order = form.cleaned_data.get('buy_order')
message = form.cleaned_data.get('message')
email = form.cleaned_data.get('email')
try:
order = Order.objects.get(buy_order=buy_order)
order.refund_requested = True
order.save()
refund = Refund()
refund.order = order
refund.reason = message
refund.email = email
refund.save()
messages.info(self.request, "Tu solicitud ha sido recibida")
return redirect('core:request_refund')
except ObjectDoesNotExist:
messages.info(self.request, "Esta orden no existe.")
return redirect('core:request_refund')
@login_required
def dashboard(request):
user_orders = Order.objects.all().filter(user=request.user).order_by('-start_date')
userprofile = UserProfile.objects.get(user=request.user)
order_buy = Order_buy.objects.filter()
context = {
'orders' : user_orders,
'userprofile' : userprofile,
'order_buy': order_buy,
}
return render(request,'accounts/dashboard.html',context)
@login_required
def details(request,buy_order):
orders = get_list_or_404(Order, user=request.user, buy_order=buy_order)
order = Order.objects.all().filter(user=request.user)
payments = get_list_or_404(Payment, user=request.user)
payment = Order.objects.all().filter(user=request.user)
items = get_list_or_404(Item)
item = Item.objects.all()
order_items = get_list_or_404(OrderItem,user=request.user)
order_item = OrderItem.objects.filter(user=request.user, buy_order=buy_order)
order_buy = Order_buy.objects.filter(user=request.user, buy_order=buy_order)
context = {
'orders' : orders ,
'buy_order' : buy_order ,
'payment': payment,
'payment':payment,
'items':items,
'item':item,
'order_item': order_item,
'order_items': order_items,
'order_buy': order_buy,
}
return render(request,'accounts/bought.html',context)
@login_required
def profiles(request):
try:
perfil_account = Perfil.objects.filter(user=request.user).last()
order_buy = Order_buy.objects.filter(user=request.user).count
order_item = OrderItem.objects.filter(user=request.user, ordered=True).count
coupon_used = Used_coupon.objects.filter(user=request.user).count
favorites = Favorite.objects.all().filter(user_id=request.user.id).count
context = {
'perfil_account' : perfil_account,
'order_buy': order_buy,
'order_item': order_item,
'coupon_used':coupon_used,
'favorites':favorites,
}
return render(request,'accounts/profile.html',context)
except ObjectDoesNotExist:
messages.warning(request, 'no tienes un pedido activo.')
return redirect("accounts/profile.html")
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.contrib.auth.models import User
from django.template import Context
@login_required
def thankyou(request):
try:
perfil_account = Perfil.objects.all().last()
order = Order.objects.get(user=request.user, ordered=False)
orderitem = OrderItem.objects.filter(user=request.user, ordered=False)
orderbuy = Order_buy.objects.all()
orderbuy = Order_buy()
orderbuy.user = request.user
orderbuy.buy_order = create_ref_code()
orderbuy.save()
plaintext = get_template('send_email_voucher/email_voucher_detail.txt')
htmly = get_template('send_email_voucher/email_voucher_detail.html')
context = {
'username': request.user.username,
'perfil_account' : perfil_account,
'order': order,
'orderitem':orderitem,
'orderbuy':orderbuy,
}
to_mail = request.user.email
subject, from_email, to = ''+perfil_account.first_name+', gracias por tu compra en La Mascada!', 'contacto@lamascada.cl', to_mail,
text_content = plaintext.render(context)
html_template = htmly.render(context)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_template, "text/html")
msg.send()
amount = int(order.get_total() * 100)
# create the payment
payment = Payment()
payment.user = request.user
payment.amount = order.get_total()
payment.stripe_charge_id = orderbuy.buy_order
payment.save()
order_items = order.items.all()
order_items.update(ordered=True)
order_items.update(buy_order=orderbuy.buy_order)
for item in order_items:
item.save()
order.ordered = True
order.payment = payment
order.user = request.user
order.buy_order = orderbuy.buy_order
order.save()
orders = Order.objects.filter(user=request.user, ordered=True).last()
context = {
'orders':orders,
}
return render(request, 'thankyou.html', context)
except ObjectDoesNotExist:
return redirect("/")
def token_tbk(request):
form = TokenForm()
if request.method == 'POST':
form = TokenForm(request.POST or NONE)
if form.is_valid():
form.save()
form = TokenForm()
messages.success(request, "Tu solicitud ha sido recibida")
return redirect('/contacto')
context = {'form':form}
return render(request,'integracion_tbk/final_normal.html',context)
@login_required
def addtofavorite(request):
if request.method == 'POST':
user_id = request.user.id
product_id = request.POST['product_id']
product_slug = request.POST['product_slug']
product_name = request.POST['product_name']
product_photo = request.POST['product_photo']
product_price = request.POST['product_price']
if Favorite.objects.all().filter(user_id=user_id ,product_id=product_id).exists():
messages.warning(request ,"Ítem: " + product_name + " ha sido agregado antes")
return redirect('/favorites')
else :
favorite = Favorite(user_id=user_id , product_id=product_id, product_slug=product_slug, product_name=product_name , product_photo=product_photo , product_price=product_price)
favorite.save()
messages.success(request ,"Ítem: " + product_name + " ha sido agregado a favoritos")
return redirect('/favorites')
@login_required
def favorite_inf(request):
favorites = Favorite.objects.all().filter(user_id=request.user.id)
context = {
'favorites':favorites,
}
return render(request,'accounts/favorite.html',context)
@login_required
def removefavorite(request):
if request.method == 'POST' :
user_id = request.user.id
product_id = request.POST['product_id']
product_slug = request.POST['product_slug']
product_name = request.POST['product_name']
favorites_id = request.POST['favorites_id']
fav = Favorite.objects.all().filter(id=favorites_id, product_slug=product_slug, product_name=product_name,product_id=product_id)
fav.delete()
messages.info(request,"Ítem: " + product_name + " ha sido eliminado de favoritos" )
return redirect('/favorites')
|
from django.db import models
from datetime import datetime
from django.contrib.auth.models import AbstractUser
# class Category(models.Model):
# name = models.CharField(max_length=100)
# date_created = models.DateField(auto_now_add=True)
# slug = models.SlugField()
# def __str__(self):
# return self.name
# def get_absolute_url(self):
class Blog(models.Model):
title = models.CharField(max_length=100, null=False)
content = models.TextField(null=False)
image = models.ImageField(null=True)
updated_date = models.DateTimeField(auto_now_add=True, null=True)
# category = models.ForeignKey('Category', null=True, blank=True, on_delete=models.CASCADE)
CATEGORY_CHOICES = (
("BUSINESS", "Business"),
("TECHNOLOGY", "Technology"),
("SPORT", "Sport"),
("POLITICS", "Politics"),
("OTHER", "other"),
)
category = models.CharField(max_length=30, choices=CATEGORY_CHOICES, default= "BUSINESS")
class User(AbstractUser):
firstname = models.CharField(max_length=100, blank=True)
lastname = models.CharField(max_length=100, blank=True)
bio = models.TextField(max_length=500, blank=True)
location = models.CharField(max_length=30, blank=True)
birth_date = models.DateField(null=True, blank=True)
class Contact(models.Model):
fullname = models.CharField(max_length=100, blank=False)
address = models.CharField(max_length=100, blank=False)
email = models.EmailField(max_length=100, blank=False)
message = models.TextField(max_length=500, blank=False)
|
class NoTransformation:
"""
Implements basic parameter transformation strategy -
prameter is left unmodified.
"""
def eval(self, value):
return value
class FormatTransformation:
"""
Implements basic interpolation transformation startegy.
Parameter value is transformed through .format method
using named placeholders and values supplied from the
context passed at the time of initialization.
"""
def __init__(self, **kwargs):
self._context = kwargs
def eval(self, value):
return value.format(**self._context)
def set_parameter_transformation_service(context, service=NoTransformation()):
"""
Installs parameter transformaton service into context.
"""
setattr(context, 'parameter_transformation', service)
def transform_parameter(context, value):
"""
Performs parameter transformation using service resolved from the context.
If transformation service is not defined on the context (expected member
attribute name is parameter_transformation) the it installs default
parameter transformation service (NoTransformation).
"""
t = getattr(context, 'parameter_transformation', NoTransformation())
return t.eval(value) |
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import pandas as pd
ini = pd.read_excel('3ini.xlsx')
il = ini[0].values.tolist()
gmail_user = il[0]
gmail_password = il[1]
subject = il[2]
body = il[3]
df = pd.read_excel('2send.xlsx').iloc[:,0:1]
tl = df['Mail'].values.tolist()
msg = MIMEText(body, 'plain', 'utf-8')
msg['From'] = gmail_user
msg['Subject'] = Header(subject, 'utf-8')
for to in tl:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(gmail_user, to, msg.as_string())
server.close()
print('Email sent!') |
try:
from pyscreenshot import grab
except ImportError as e:
print str(e)
def screenshot():
try:
im=grab()
im.save('scr.png')
except Exception:
pass
#screenshot()
|
import discord
from discord.ext.commands import Bot
from discord.ext import commands
import asyncio
import time
import random
from discord import Game
Client = discord.client
client = commands.Bot(command_prefix = '!')
Clientdiscord = discord.Client()
@client.event
async def on_member_join(member):
print('Recognised that a member called ' + member.name + ' joined')
await client.send_message(member, 'Welome to Team Usless! Please wait for a Tryout or request fan.')
print('Sent message to ' + member.name)
async def on_member_join(member):
print('Recognised that a member called ' + member.name + ' joined')
await client.send_message(member, 'Welome to Team Usless! Please wait for a Tryout or request fan.')
print('Sent message to ' + member.name)
async def on_member_join(member):
print('Recognised that a member called ' + member.name + ' joined')
await client.send_message(member, 'Welome to Team Usless! Please wait for a Tryout or request fan.')
print('Sent message to ' + member.name)
async def on_member_join(member):
print('Recognised that a member called ' + member.name + ' joined')
await client.send_message(member, 'Welome to Team Usless! Please wait for a Tryout or request fan.')
print('Sent message to ' + member.name)
async def on_ready():
await client.change_presence(game=Game(name='with Nobody'))
print('Ready, Freddy')
@client.event
async def on_message(message):
if message.content == '!logo':
em = discord.Embed(description='Team useless Logo')
em.set_image(url='https://cdn.discordapp.com/attachments/554832401445027841/554845218101264427/Free_Sample_By_Wix_1.jpg')
await client.send_message(message.channel, embed=em)
if ('Nigga') in message.content:
await client.delete_message(message)
if message.content.startswith('!random'):
randomlist = ["1","2","3"]
await client.send_message(message.channel,(random.choice(randomlist)))
if message.content.startswith('!getpinged'):
await client.send_message(message.channel,'Here you go <@%s>' %(message.author.id))
if message.content.startswith('!getpinged'):
await client.send_message(message.channel,'Here you go <@%s>' %(message.author.id))
client.run('NTU0ODM2NjQyODg0NjE2MTky.D2ih8Q.qdsuY9MMQ9F3fWaRzJAESp9E8YM')
|
import sys
import numpy as np
from sklearn.metrics import average_precision_score
def mean_average_precision(sort_data):
#to do
count_1 = 0
sum_precision = 0
for index in range(len(sort_data)):
if sort_data[index][1] == 1:
count_1 += 1
sum_precision += 1.0 * count_1 / (index+1)
return sum_precision / count_1
def mean_reciprocal_rank(sort_data):
sort_lable = [s_d[1] for s_d in sort_data]
assert 1 in sort_lable
return 1.0 / (1 + sort_lable.index(1))
def precision_at_position_1(sort_data):
if sort_data[0][1] == 1:
return 1
else:
return 0
def recall_at_position_k_in_10(sort_data, k):
sort_lable = [s_d[1] for s_d in sort_data]
select_lable = sort_lable[:k]
return 1.0 * select_lable.count(1) / sort_lable.count(1)
def evaluation_one_session(data):
sort_data = sorted(data, key=lambda x: x[0], reverse=True)
m_a_p = mean_average_precision(sort_data)
m_r_r = mean_reciprocal_rank(sort_data)
p_1 = precision_at_position_1(sort_data)
r_1 = recall_at_position_k_in_10(sort_data, 1)
r_2 = recall_at_position_k_in_10(sort_data, 2)
r_5 = recall_at_position_k_in_10(sort_data, 5)
return m_a_p, m_r_r, p_1, r_1, r_2, r_5
def evaluate(file_path):
sum_m_a_p = 0
sum_m_r_r = 0
sum_p_1 = 0
sum_r_1 = 0
sum_r_2 = 0
sum_r_5 = 0
i = 0
total_num = 0
with open(file_path, 'r') as infile:
for line in infile:
if i % 10 == 0:
data = []
tokens = line.strip().split('\t')
data.append((float(tokens[0]), int(tokens[1])))
if i % 10 == 9:
total_num += 1
m_a_p, m_r_r, p_1, r_1, r_2, r_5 = evaluation_one_session(data)
sum_m_a_p += m_a_p
sum_m_r_r += m_r_r
sum_p_1 += p_1
sum_r_1 += r_1
sum_r_2 += r_2
sum_r_5 += r_5
i += 1
print('total num: %s' %total_num)
print('MAP: %s' %(1.0*sum_m_a_p/total_num))
print('MRR: %s' %(1.0*sum_m_r_r/total_num))
print('P@1: %s' %(1.0*sum_p_1/total_num))
return (1.0*sum_m_a_p/total_num, 1.0*sum_m_r_r/total_num, 1.0*sum_p_1/total_num,
1.0*sum_r_1/total_num, 1.0*sum_r_2/total_num, 1.0*sum_r_5/total_num)
if __name__ == '__main__':
result = evaluate(sys.argv[1])
for r in result:
print(r)
|
# Generated by Django 3.0.5 on 2020-05-07 16:09
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('base_pages', '0004_auto_20200506_1628'),
]
operations = [
migrations.AlterField(
model_name='blogpage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('small_table', wagtail.contrib.table_block.blocks.TableBlock(table_options={'contextMenu': ['row_above', 'row_below', '---------', 'col_left', 'col_right', '---------', 'remove_row', 'remove_col', '---------', 'undo', 'redo', '---------', 'copy', 'cut---------', 'alignment'], 'startCols': 2, 'startRows': 5})), ('large_table', wagtail.contrib.table_block.blocks.TableBlock(table_options={'contextMenu': ['row_above', 'row_below', '---------', 'col_left', 'col_right', '---------', 'remove_row', 'remove_col', '---------', 'undo', 'redo', '---------', 'copy', 'cut---------', 'alignment'], 'startCols': 3, 'startRows': 8}))]),
),
]
|
#!/usr/local/bin/python3.8
# polyFitter.py
# 6/21/2021
# Aidan Gray
# aidan.gray@idg.jhu.edu
#
# This script reads in a csv file containing calibration data and
# fits a Polynomial Series to it.
from matplotlib import pyplot as plt
from polyFit import polyFit
import numpy as np
import sys
import csv
import os
### How to use ################################################################
# Run this script with the following command options:
# deg=# Where # is the degree of fit. If not given, default is 5.
# test=on This allows the user to enter test points one at a time and
# displays the calibrated result. enter 'q' to exit
# plot=on This plots the data, the fit, and test points if given.
# file=FILEPATH This is the path to the calibration data file.
#
# The calibration data file should be a CSV in the following format:
#
# RAW_VALUE_1,TEMPERATURE_1
# RAW_VALUE_2,TEMPERATURE_2
# ...
# RAW_VALUE_N,TEMPERATURE_N
#
# The script will output the polynomial coefficients in order of low -> high.
###############################################################################
def create_2d_plot(dataList1, dataList2, dataList3=None, fit_x=None, fit_y=None, poly=None):
"""
Uses the supplied data to create a 2D plot.
Input:
- dataList List of coordinates (1, 2, or 3)
- fit_x polynomial fit x values
- fit_y polynomial fit y values
Output:
"""
xData1 = []
yData1 = []
for point in dataList1:
xData1.append(point[0])
yData1.append(point[1])
plt.plot(xData1, yData1, '-o', color='blue')
xData2 = []
yData2 = []
for point in dataList2:
xData2.append(point[0])
yData2.append(point[1])
plt.plot(xData2, yData2, '-+', color='red')
if dataList3 != None:
xData3 = []
yData3 = []
for point in dataList3:
xData3.append(point[0])
yData3.append(point[1])
plt.plot(xData3, yData3, '-^', color='green')
plt.legend(['calibration data', 'test fit', 'user test points'])
else:
plt.legend(['calibration data', 'test fit'])
plt.title('Temperature vs Sensor Units')
plt.ylabel('Temperature')
plt.xlabel('Sensor Units')
return plt
def get_data(fileName):
"""
Reads in a CSV file containing temperatures and sensor units
CSV should be of format:
float,float
Input:
- fileName Filename of the CSV file, ending in .csv
Output:
- data List of calibration data
"""
# parse the CSV file
with open(fileName, 'rt', encoding='utf-8-sig') as csvfile:
data = [(float(temp), float(sens))
for temp, sens in csv.reader(csvfile, delimiter= ',')]
return data
if __name__ == "__main__":
filePath = None
degree = 10
test = 'off'
plot = 'off'
for n in range(len(sys.argv)):
try:
if 'file=' in sys.argv[n]:
filePath = sys.argv[n].split('=')[1]
elif 'deg=' in sys.argv[n]:
degree = int(sys.argv[n].split('=')[1])
elif 'plot=' in sys.argv[n]:
plot = sys.argv[n].split('=')[1]
elif 'test=' in sys.argv[n]:
test = sys.argv[n].split('=')[1]
except Exception as e:
sys.exit(f'ERROR: {e}')
if filePath == None:
sys.exit('ERROR: No calibration data supplied.')
if filePath[0] == '~':
filePath = os.path.expanduser('~')+filePath[1:]
if filePath[len(filePath)-4:] == '.csv':
data = get_data(filePath)
polyFit = polyFit(data, degree)
print(f'coefficients(low->high)={polyFit.polyFit[0]}')
# coeffs = [-3.37351458e+02, 1.79897580e+04, -1.57983258e+05, 7.31552361e+05,
# -2.03985715e+06, 3.60952880e+06, -4.13891300e+06, 3.05966910e+06,
# -1.40503743e+06, 3.64151783e+05, -4.06729712e+04]
# polyFit = polyFit(coeffs=coeffs)
testData = get_data(filePath)
testResults = []
## Test each point by calculating temp with the Polynomial Fit
for n in range(len(testData)):
temp = polyFit.calib_t(testData[n][0])
testResults.append([testData[n][0], temp])
if test == 'on':
testResults2 = []
inTmp = ''
while inTmp != 'q':
inTmp = input(':')
try:
tmp = float(inTmp)
calTmp = polyFit.calib_t(tmp)
print(f'={calTmp}')
testResults2.append([tmp, calTmp])
except:
pass
if plot == 'on':
if test == 'on':
plt = create_2d_plot(data, testResults, testResults2)
plt.show()
else:
plt = create_2d_plot(data, testResults)
plt.show() |
# Python 3
import sys
class Rectangle:
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def pointWithin(self, x, y):
if x >= self.x1 and x <= self.x2:
if y >= self.y2 and y <= self.y1:
return True
return False
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
coord = test.strip().split(',')
for i in range(len(coord)):
coord[i] = int(coord[i])
minXVal = min(coord[0], coord[2], coord[4], coord[6])
minYVal = min(coord[1], coord[3], coord[5], coord[7])
#print("Minimum x and y: ", minXVal, minYVal)
if minXVal < 0:
minXVal = minXVal * -1
if minYVal < 0:
minYVal = minYVal * -1
coord[0] = coord[0] + minXVal
coord[2] = coord[2] + minXVal
coord[4] = coord[4] + minXVal
coord[6] = coord[6] + minXVal
coord[1] = coord[1] + minYVal
coord[3] = coord[3] + minYVal
coord[5] = coord[5] + minYVal
coord[7] = coord[7] + minYVal
#for i in coord:
# print(i)
r1 = Rectangle(coord[0], coord[1], coord[2], coord[3])
r2 = Rectangle(coord[4], coord[5], coord[6], coord[7])
print(r1.pointWithin(r2.x1, r2.y1) or r1.pointWithin(r2.x2, r2.y2) or r1.pointWithin(r2.x2, r2.y1) or r1.pointWithin(r2.x1, r2.y2))
test_cases.close() |
from django.urls import path, re_path
from . import views
app_name = "encyclopedia"
urlpatterns = [
path("", views.index, name="index"),
re_path(r"^wiki/(?P<title>\w*)/$", views.wiki, name="wiki"),
path("search", views.search, name="search"),
path("new", views.new, name="new"),
path("edit/<str:title>", views.edit, name="edit")
]
|
class NiceFormatter(object):
def print(self, triples):
previous_page_url = None
previous_link_type = None
for page_url, link_type, link_url in triples:
if page_url != previous_page_url:
print(page_url)
previous_page_url = page_url
if link_type != previous_link_type:
print(" Link type '{}':".format(link_type))
previous_link_type = link_type
print(" {}".format(link_url))
|
from fastapi.encoders import jsonable_encoder
from fastapi.exception_handlers import request_validation_exception_handler
from fastapi.exceptions import RequestValidationError
from starlette.requests import Request
from starlette.responses import JSONResponse
from .responses import fhir_rest_response
from fhirpath.utils import lookup_fhir_class
HTTP_400_FHIR_VALIDATION = 400
FHIR = False
async def fhir_request_validation_exception_handler(
request: Request, exc: RequestValidationError
) -> JSONResponse:
# do your custom
# identify as FHIR service
# create FHIR outcome
# response
if "FHIR_REQUEST_ID" not in request.scope:
# no dealings with no FHIR service
return await request_validation_exception_handler(request, exc)
# create operation outcome
outcome = make_outcome(request, exc)
return fhir_rest_response(
request,
outcome,
status_code=request.scope.get("http_error_code", HTTP_400_FHIR_VALIDATION),
)
def make_outcome(request: Request, exc: RequestValidationError):
"""
https://terminology.hl7.org/2.0.0/CodeSystem-operation-outcome.html
:param exc:
:param status_code:
:return:
"""
klass = lookup_fhir_class(
"OperationOutcome", fhir_release=request.scope["FHIR_VERSION"]
)
issues = list()
for error in exc.errors():
issue = {
"severity": "error",
"code": exc.code,
"details": {
"coding": [
{
"system": "http://terminology.hl7.org/CodeSystem/operation-outcome",
"code": exc.system_code,
"display": exc.body,
}
]
},
"diagnostics": f"loc: {error['loc']}, message: {error['msg']}",
}
issues.append(issue)
outcome = klass(**{"id": str(request.scope["FHIR_REQUEST_ID"]), "issue": issues})
return outcome
|
from unittest import mock
import json
from django.http import JsonResponse
from django.template.response import TemplateResponse
from django.test import Client, modify_settings
def test_get_request_graphiql():
client = Client()
response = client.get(
'/graphql',
)
assert isinstance(response, TemplateResponse)
assert response.status_code == 200
assert response.templates[0].name == 'django_graph_api/graphiql.html'
assert 'csrftoken' in response.cookies
@mock.patch('test_project.urls.schema.execute')
def test_post_request_executed(execute):
execute.return_value = {}
query = 'this is totally a query'
client = Client()
response = client.post(
'/graphql',
json.dumps({
'query': query,
}),
content_type='application/json',
HTTP_ACCEPT='application/json',
)
assert isinstance(response, JsonResponse)
assert response.status_code == 200
assert response.content == b'{}'
execute.assert_called_once_with(query, None)
@mock.patch('test_project.urls.schema.execute')
def test_variables_sent_in_post(execute):
execute.return_value = {}
query = 'this is totally a query'
client = Client()
response = client.post(
'/graphql',
json.dumps({
'query': query,
'variables': {
'level': 9001
}
}),
content_type='application/json',
HTTP_ACCEPT='application/json',
)
assert isinstance(response, JsonResponse)
assert response.status_code == 200
assert response.content == b'{}'
execute.assert_called_once_with(query, {'level': 9001})
def test_post_request_with_error():
client = Client()
response = client.post(
'/graphql',
'',
content_type='application/json',
HTTP_ACCEPT='application/json',
)
assert isinstance(response, JsonResponse)
assert response.status_code == 200
# actual error changes depending on Python version
assert 'error' in response.content.decode(response.charset)
@modify_settings(MIDDLEWARE={'remove': 'django.middleware.csrf.CsrfViewMiddleware'})
@mock.patch('test_project.urls.schema.execute')
def test_post__csrf_required(execute):
execute.return_value = {}
query = 'this is totally a query'
client = Client(enforce_csrf_checks=True)
response = client.post(
'/graphql',
json.dumps({
'query': query,
}),
content_type='application/json',
HTTP_ACCEPT='application/json',
)
assert response.status_code == 403
execute.assert_not_called()
|
from csv import DictReader
from collections import defaultdict
from sys import maxsize
class Purchase:
def __init__(
self, city, zipcode, state, beds,
baths, sq__ft, home_type, sale_date, price,
latitude, longitude):
self.longitude = longitude
self.latitude = latitude
self.price = price
self.sale_date = sale_date
self.type = home_type
self.sq__ft = sq__ft
self.baths = baths
self.beds = beds
self.state = state
self.zip = zipcode
self.city = city
@staticmethod
def create_from_dict(lookup):
return Purchase(
lookup['city'],
lookup['zip'],
lookup['state'],
int(lookup['beds']),
int(lookup['baths']),
int(lookup['sq__ft']),
lookup['type'],
lookup['sale_date'],
float(lookup['price']),
float(lookup['latitude']),
float(lookup['longitude']))
<<<<<<< HEAD
=======
class Averager:
def __init__(self):
self.current_value = 0
self.point_counter = 0
self.max = None
self.min = None
def count(self):
return self.point_counter
def sum(self):
return self.current_value * self.point_counter
def __add__(self, other):
current_sum = self.current_value * self.point_counter + float(other)
self.point_counter += 1
self.current_value = current_sum / self.point_counter
self.max = self.max if self.max and self.max > other else other
self.min = self.min if self.min and self.min < other else other
return self
def __str__(self):
return str(self.current_value)
def __float__(self):
return float(self.current_value)
>>>>>>> abad0c2981ab58bb7e47a62f0d33ba8f114210c9
class Analyzer:
def __init__(self, csv_path):
self.path = csv_path
self.max = 0
self.max_record = None
self.min = maxsize
self.min_record = None
self.averages = {
'price': Averager(),
'beds': Averager(),
'baths': Averager(),
}
self.two_bed_averages = {
'price': Averager(),
'beds': Averager(),
'baths': Averager(),
}
self.load()
self.max = 0
self.max_record = None
self.min = maxsize
self.min_record = None
self.averages = defaultdict(int)
self.two_bed_averages = defaultdict(int)
def load(self):
# do something more elegant for average calculations
with open(self.path, 'r', encoding='utf-8') as fin:
reader = DictReader(fin)
for row in reader:
p = Purchase.create_from_dict(row)
self.averages['price'] += p.price
self.averages['beds'] += p.beds
self.averages['baths'] += p.baths
if self.averages['price'].max == p.price:
self.max_record = row
if self.averages['price'].min == p.price:
self.min_record = row
if p.beds == 2:
self.two_bed_averages['price'] += p.price
self.two_bed_averages['beds'] += p.beds
self.two_bed_averages['baths'] += p.baths
def print_headers(self):
print('Header: ' + ', '.join(self.max_record.keys()))
def print_most_expensive(self):
print('Most expensive: {}-beds, {}-baths, {} for ${:,} in {}'.format(self.max_record['beds'], self.max_record['baths'], self.max_record['type'], int(self.max_record['price']), self.max_record['city']))
def print_least_expensive(self):
print('Least expensive: {}-beds, {}-baths, {} for ${:,} in {}'.format(self.min_record['beds'], self.min_record['baths'], self.min_record['type'], int(self.min_record['price']), self.min_record['city']))
def print_most_average(self):
print('Average: ${:,.2f}, {:,.1f} bed, {:,.1f} bath'.format(float(self.averages['price']), float(self.averages['beds']), float(self.averages['baths'])))
def print_most_average_two_bedroom(self):
if len(self.two_bed_averages):
print('Average 2-bedroom: ${:,.2f}, {:,.1f} bed, {:.1f} bath'.format(round(float(self.two_bed_averages['price']), 2), float(self.two_bed_averages['beds']), float(self.two_bed_averages['baths'])))
|
# Elaborar um programa que efetue a leitura de um número inteiro e apresentar uma mensagem informando
# se o número é par ou ímpar.
print('Este app identifica se o numero é par ou impar')
a=int(input('Digite um número: '))
if a%2==0:
print ('Este numero e PAR')
else:
print('Este numero e IMPAR') |
# Generated by Django 3.0.8 on 2020-07-15 02:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foods', '0005_auto_20200715_0154'),
]
operations = [
migrations.AddField(
model_name='food',
name='cover',
field=models.ImageField(blank=True, null=True, upload_to='foods/'),
),
]
|
import json
import numpy as np
import time
import imageio
import uuid
from AliSdkApp import AliSdkApp
class HandKeypointResult:
__slots__ = ['kps_conf', 'box_conf', 'kps', 'box', 'applied_wh', 'im_wh']
def __init__(self, d: dict=None):
self.kps_conf = None
self.box_conf = None
self.kps = {}
self.box = None
self.applied_wh = None
self.im_wh = None
if d is not None:
self.from_dict(d)
def to_dict(self):
d = dict()
for k in self.__slots__:
d[k] = self.__getattribute__(k)
return d
def from_dict(self, d: dict):
for k in self.__slots__:
self.__setattr__(k, d[k])
class HandKeypointDet:
def __init__(self):
self.app = AliSdkApp(BucketName="bio-totem")
self.retry_times = 5
def det(self, im):
im_name = '{}.jpg'.format(str(uuid.uuid4()))
is_success = False
cur_try = 0
while cur_try < self.retry_times:
try:
response = self.app.HandPostureDetection(im_name, im).decode('utf8')
is_success = True
break
except BaseException as e:
print('catch throw error. ignore.', str(e))
cur_try += 1
time.sleep(0.5)
if is_success:
hands = self._decode_hand_posture_msg(response)
else:
hands = []
return is_success, hands
def _decode_hand_posture_msg(self, json_txt, apply_wh=True):
d = json.loads(json_txt)
# is_success = bool(d['success'])
# if not is_success:
# return None
hands = []
# data_tree = d['data']['data']['Data']
data_tree = d['Data']
imwh = [data_tree['MetaObject']['Width'], data_tree['MetaObject']['Height']]
hands_tree = data_tree['Outputs'][0]['Results']
for hand_tree in hands_tree:
hand = HandKeypointResult()
hand.box_conf = float(hand_tree['Box']['Confident'])
pos_tree = hand_tree['Box']['Positions']
hand.box = np.array([*pos_tree[0]['Points'], *pos_tree[2]['Points']], np.float32).tolist()
hand.kps_conf = float(hand_tree['Hands']['Confident'])
labels = []
kps = []
for c in hand_tree['Hands']['KeyPoints']:
label = int(c['Label'])
labels.append(label)
a = c['Positions'][0]['Points']
kps.append(list(a))
# kps = np.asarray(kps, np.float32)
for k, v in zip(labels, kps):
hand.kps[k] = v
hand.applied_wh = apply_wh
hand.im_wh = imwh
if apply_wh:
hand.box[0] = hand.box[0] * imwh[0]
hand.box[1] = hand.box[1] * imwh[1]
hand.box[2] = hand.box[2] * imwh[0]
hand.box[3] = hand.box[3] * imwh[1]
for i in hand.kps:
hand.kps[i][0] = hand.kps[i][0] * imwh[0]
hand.kps[i][1] = hand.kps[i][1] * imwh[1]
hands.append(hand)
return hands
if __name__ == '__main__':
det = HandKeypointDet()
ppp = 19
im = imageio.imread(f'{ppp}.jpg')
ret, hands = det.det(im)
for h in hands:
print(h.to_dict())
|
import pandas as pd
import numpy as np
from typing import Dict, List
import csv
import torch
def get_glove_vectors(path):
df = pd.read_csv(path, sep=' ', header=None, engine='python',
quoting=csv.QUOTE_ALL, error_bad_lines=False)
token2id = {t: i for i, t in enumerate(df.iloc[:, 0])}
return df.iloc[:, 1:].values, token2id
def formatting_data(df: pd.DataFrame, token2id: Dict[str, int], section2label: Dict[str, int]):
# ToDo: completion of out-of-vocabulary
gr = df.groupby('paper_id')
documents = gr['sentense'].apply(lambda sentenses: [[token2id[token] for token in x.split() if token in token2id.keys()] for x in sentenses])
labels = gr['section'].apply(lambda section_names: [section2label[name] for name in section_names])
return documents, labels
def calculate_transition_matrix(df: pd.DataFrame, section2label: Dict[str, int]):
labels_all_paper = df.groupby('paper_id')['section'].apply(lambda x: [section2label[s] for s in x]).tolist()
n_labels = pd.Series(list(section2label.values())).nunique()
str2labels = {str([l1, l2]): [l1, l2] for l1 in range(n_labels) for l2 in range(n_labels)}
label_pairs = [str([labels[i-1], labels[i]]) for labels in labels_all_paper for i in range(1, len(labels))]
label_pairs = pd.Series(label_pairs).value_counts().to_dict()
transition = np.zeros([n_labels, n_labels])
for label, value in label_pairs.items():
l1, l2 = str2labels[label]
transition[l1, l2] = value
transition = np.log(transition / np.sum(transition))
return transition.T
def data_to_LongTensor(documents: List[List[List[int]]], labels: List[List[int]]):
documents = [[torch.LongTensor(token_list).cuda() for token_list in sentenses] for sentenses in documents]
labels = [torch.LongTensor(label).cuda() for label in labels]
return documents, labels
|
import pygame
import sys
screen_width = 1000
screen_height = 1000
block_size = 50
minigameID = 5
class Player:
def __init__(self,player_nmbr,network):
self.net = network
self.body = []
self.moved = True
self.apple = (-50, -50)
self.player_nmbr = player_nmbr
if(player_nmbr == 0):
self.direction = pygame.K_RIGHT
self.body.append((0, 0))
self.head = [0, 0]
else:
self.direction = pygame.K_LEFT
self.body.append((screen_width-block_size, screen_height-block_size))
self.head = [screen_width-block_size, screen_height-block_size]
def change_direction(self,new_direction):
#Check if can change from current direction
if (self.moved and
((self.direction in [pygame.K_UP,pygame.K_DOWN] and new_direction in [pygame.K_LEFT,pygame.K_RIGHT]) or
(self.direction in [pygame.K_LEFT,pygame.K_RIGHT] and new_direction in [pygame.K_UP,pygame.K_DOWN]))):
self.direction = new_direction
self.moved = False
def move(self):
self.moved = True
lost = False
#If apple was eaten, "add" new block by not deleting oldest
if not(self.apple[0] == self.head[0] and self.apple[1] == self.head[1]):
self.body.pop(0)
else:
self.net.send("ate")
#Check for hitting walls
if(self.direction == pygame.K_UP):
self.head[1] -= block_size
if self.head[1] < 0:
lost = True
elif (self.direction == pygame.K_DOWN):
self.head[1] += block_size
if self.head[1] >= screen_height:
lost = True
elif (self.direction == pygame.K_LEFT):
self.head[0] -= block_size
if self.head[0] < 0:
lost = True
else:
self.head[0] += block_size
if self.head[0] >= screen_width:
lost = True
if lost:
self.net.game_won_by((self.player_nmbr + 1) % 2)
return False
else:
self.body.append(tuple(self.head))
return True
class Snakes:
def __init__(self,player_nmbr,network):
self.player_nmbr = player_nmbr
self.net = network
self.screen = pygame.display.set_mode((screen_width, screen_height))
self.player = Player(player_nmbr, network)
self.enemy_body = []
if(player_nmbr == 1):
self.enemy_body.append((0, 0))
self.enemy_head=[0, 0]
else:
self.enemy_body.append((screen_width - block_size, screen_height - block_size))
self.enemy_head = [screen_width - block_size, screen_height - block_size]
def draw(self):
self.screen.fill((110, 110, 110))
#Draw apple
pygame.draw.rect(self.screen, (255, 0, 0), [self.player.apple[0], self.player.apple[1], block_size, block_size])
#Draw player
for body_part in self.player.body[:-1]:
pygame.draw.rect(self.screen, (0, 128, 0), [body_part[0], body_part[1], block_size, block_size])
#Draw enemy
for body_part in self.enemy_body[:-1]:
pygame.draw.rect(self.screen, (0, 0, 128), [body_part[0], body_part[1], block_size, block_size])
#Draw heads in diffrent color
pygame.draw.rect(self.screen, (0, 255, 0), [self.player.head[0], self.player.head[1], block_size, block_size])
pygame.draw.rect(self.screen, (0, 0, 255), [self.enemy_head[0], self.enemy_head[1], block_size, block_size])
pygame.display.flip()
def run (self):
while self.net.current_minigame() == minigameID:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
if event.type == pygame.KEYDOWN:
self.player.change_direction(event.key)
if not(self.player.move()):
break
self.net.send((minigameID, self.player.body, self.player.head))
data = self.net.get_data()
if(data[0] != 5):
break
else:
self.enemy_body, self.enemy_head, self.player.apple = data[1][0],data[1][1],data[2]
#If colided with enemy, lose
if(tuple(self.player.head) in self.enemy_body+self.player.body[:-1]):
self.net.game_won_by((self.player_nmbr + 1) % 2)
self.draw()
pygame.time.Clock().tick(5)
return True |
"""
Created on 14:04, June. 4th, 2021
Author: fassial
Filename: __init__.py
"""
# numba_backend model
from . import numba_backend
# tensor_backend model
from . import tensor_backend
from .tensor_backend import AlphaSyn
from .tensor_backend import ExpSyn
from .tensor_backend import GapJunction
from .tensor_backend import GapJunction_LIF
from .tensor_backend import TwoExpSyn
from .tensor_backend import VoltageJump
# def set_backend func
def set_backend(backend):
global AlphaSyn
global ExpSyn
global GapJunction
global GapJunction_LIF
global TwoExpSyn
global VoltageJump
if backend in ['tensor', 'numpy', 'pytorch', 'tensorflow', 'jax']:
AlphaSyn = tensor_backend.AlphaSyn
ExpSyn = tensor_backend.ExpSyn
GapJunction = tensor_backend.GapJunction
GapJunction_LIF = tensor_backend.GapJunction_LIF
TwoExpSyn = tensor_backend.TwoExpSyn
VoltageJump = tensor_backend.VoltageJump
elif backend in ['numba', 'numba-parallel', 'numba-cuda']:
AlphaSyn = numba_backend.AlphaSyn
ExpSyn = numba_backend.ExpSyn
GapJunction = numba_backend.GapJunction
GapJunction_LIF = numba_backend.GapJunction_LIF
TwoExpSyn = numba_backend.TwoExpSyn
VoltageJump = numba_backend.VoltageJump
else:
raise ValueError(f'Unknown backend "{backend}".')
|
# Dos vectores son ortogonales cuando son perpendiculares entre sí. Para determinarlo
# basta calcular su producto escalar y verificar si es igual a 0. Ejemplo:
# A = (2,3) y B = (-3,2) => 2 * (-3) + 3 * 2 = -6 + 6 = 0 => Son ortogonales
# Escribir una función que reciba dos vectores en forma de tuplas y devuelva un valor
# de verdad indicando si son ortogonales o no. Desarrollar también un programa
# que permita verificar el comportamiento de la función.
#FUNCIONES:
def es_ortogonal(vector1,vector2):
bol = False
if len(vector1) == len(vector2):
i = 0
suma = 0
while i < len(vector1):
suma += vector1[i] * vector2[i]
i += 1
if suma == 0:
bol = True
else:
raise ValueError("Los vectores son de distinto tamaño")
return bol
#PROGRAMA PRINCIPAL:
def main():
try:
vector1 = (int(input("Ingrese un numero del vector1: ")),int(input("Ingrese otro numero del vector1: ")))
vector2 = (int(input("Ingrese un numero del vector2: ")),int(input("Ingrese otro numero del vector2: ")))
if es_ortogonal(vector1,vector2):
print(f"Los vectores {vector1} y {vector2} son ortogonales ")
else:
print(f"Los vectores {vector1} y {vector2} no son ortogonales ")
except ValueError as mensaje:
print(mensaje)
main() |
import numpy as np
import matplotlib.pyplot as plt
X = []
Y = []
for line in open('data_1d.csv'):
x, y = line.split(',')
X.append(float(x))
Y.append(float(y))
X = np.array(X)
Y = np.array(Y)
denom = X.dot(X) - X.mean() * X.sum()
a = (X.dot(Y) - Y.mean() * X.sum()) / denom
b = (Y.mean() * X.dot(X) - X.mean() * X.dot(Y)) / denom
Y_hat = a * X + b
# Calculation R^2:
d1 = Y - Y_hat
d2 = Y - Y.mean()
R2 = 1 - (d1.dot(d1) / d2.dot(d2))
print(R2)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os, sys, logging, datetime, json
from sklearn import preprocessing
starttime = datetime.now()
log_filename = datetime.now().strftime("log\%Y%m%d-%H%M%S.log")
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=log_filename)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
filename = "user_desc_time.json"
try:
with open(filename, "r") as f:
jf = json.loads(f.read())
except IOError:
print("Could not read file: ",filename)
sys.exit()
endtime = datetime.now()
print("Running time is :",(endtime-starttime).seconds," sec")
with open("running_time_usertailor_log.txt","a") as f:
sen="Running time of ["+filename+"] to [usertailor_video.json] is "+str((endtime-starttime).seconds)+" sec\n"
f.write(sen)
f.close()
|
# CODE BY: Luna Jiménez Fernández
###########
# IMPORTS #
###########
from agents.old.dql_agent_old import DQLAgentOld
# General imports
from collections import deque
import numpy as np
import random
import csv
from os import mkdir
from os.path import exists, join
# Keras related imports
from keras.layers import Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
from keras.initializers import glorot_uniform
class WeightedAgentOld(DQLAgentOld):
"""
This class is a variation of the standard DQL Agent used by the original approach (action = player input), but
with specific weights when acting randomly (in the Act method)
The new weights are as following:
* 25% chance of moving left
* 25% chance of moving right
* 40% chance of rotating the piece
* 10% chance of dropping the piece
The idea is to artificially force the agent to perform more rotations and less instant drops (the main problem of
the original agent), to try to have it experience a wider pool of states.
"""
def act(self, state):
"""
For the current state, return the optimal action to take or a random action randomly
Note that, as specified above, the random action chances are weighted instead of uniform, following
these weights:
* 25% chance of moving left
* 25% chance of moving right
* 40% chance of rotating the piece
* 10% chance of dropping the piece
:param state: The current state provided by the game
:return: The action taken (as a string) and, if applicable, the set of Q-Values for each action
"""
# Count the action
self.actions_performed += 1
# Prepare the state for the neural network
state = np.expand_dims(state, axis=0)
# Predict the q-values for the state (will be needed anyways to keep track of the values)
q_values = self.q_network.predict(state)
# Generate a random number
random_chance = np.random.rand()
# Check if the value is smaller (random action) or greater (optimal action) than epsilon
if random_chance < self.epsilon:
# Take a random action from the actions dictionary
# Weighting is applied, to force more rotations and less drops
# Reminder that the dictionary structure of actions is as follows (in this order):
# * 0: right
# * 1: left
# * 2: rotate
# * 3: hard_drop
action = np.random.choice(list(self.actions.keys()), p=[0.25, 0.25, 0.4, 0.1])
# Add the appropriate value to the value counter and return the action
# (no Q-Values will be returned in this case)
self.q_values += q_values[0][action]
return self.actions[action], None
else:
# Choose the best action and add the value to the value counter
action = np.argmax(q_values[0])
self.q_values += q_values[0][action]
return self.actions[action], q_values
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','curdproject1.settings')
import django
django.setup()
from curdapp.models import *
from faker import Faker
from random import *
faker=Faker()
def populate(n):
for i in range(n):
fno=randint(1001,9999)
fname=faker.name()
fsal=randint(10000,60000)
faddrs=faker.city()
emp_records=Employee.objects.get_or_create(no=fno,name=fname,salary=fsal,address=faddrs)
populate(20)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^$", views.IndexView.as_view(), name="index"),
url(r"^commit/(?P<pk>[0-9]+)/$", views.CommitView.as_view(), name="commit"), # TODO better name in URL than "commit/"?
url(r"^result/(?P<pk>[0-9]+)/$", views.ResultSetView.as_view(), name="resultset"), # TODO make it include the Commit's ID/URL (e.g., commit/x/result/y/)?
] |
# using python3.6.10
# need to install numpy and pandas
# HC Data H_k:
"""
(1, 'p1_distance') Empirical error: 0.9713846153846143 True error: 0.5543076923076925
(1, 'p2_distance') Empirical error: 0.9712307692307681 True error: 0.5515384615384619
(1, 'p_inf_distance') Empirical error: 0.9713846153846143 True error: 0.5513846153846156
(3, 'p1_distance') Empirical error: 0.7772307692307691 True error: 0.571538461538462
(3, 'p2_distance') Empirical error: 0.7710769230769227 True error: 0.5693846153846157
(3, 'p_inf_distance') Empirical error: 0.7666153846153844 True error: 0.5643076923076926
(5, 'p1_distance') Empirical error: 0.7221538461538458 True error: 0.5530769230769235
(5, 'p2_distance') Empirical error: 0.7176923076923074 True error: 0.5530769230769235
(5, 'p_inf_distance') Empirical error: 0.7092307692307692 True error: 0.5543076923076928
(7, 'p1_distance') Empirical error: 0.6896923076923075 True error: 0.5480000000000004
(7, 'p2_distance') Empirical error: 0.6895384615384614 True error: 0.5484615384615386
(7, 'p_inf_distance') Empirical error: 0.6801538461538459 True error: 0.5480000000000004
(9, 'p1_distance') Empirical error: 0.6638461538461539 True error: 0.5509230769230773
(9, 'p2_distance') Empirical error: 0.6633846153846154 True error: 0.5492307692307695
(9, 'p_inf_distance') Empirical error: 0.6598461538461539 True error: 0.5449230769230771
"""
##############################
from KNN import *
import readData
import numpy as np
import multiprocessing
from joblib import Parallel, delayed
import itertools
def calc_error(rules, data):
err = 0
for i in range(1, len(data.index)):
w_vote = 0
for rule in rules:
w_vote += rule[1] * rule[0].predict([data["c1"][i], data["c2"][i]])
if np.sign(w_vote) != data["label"][i]:
err += 1
return err
def run_single_experiment(data,labels,seed):
"""
Perform a single iteration of 15 runs.
"""
idx = np.random.RandomState(seed=seed).permutation(data.index)
data= data.reindex(idx)
labels =labels.reindex(idx)
(train_data, test_data) = np.vsplit(data, 2)
train_labels = labels.head(65)
test_labels = labels.tail(65)
test_data = test_data.reset_index(drop=True)
train_data = train_data.reset_index(drop=True)
train_labels = train_labels.reset_index(drop=True)
test_labels = test_labels.reset_index(drop=True)
Ks = [1,3,5,7,9]
Ps = [p1_distance,p2_distance,p_inf_distance]
emp_error = []
true_error = []
for k in Ks:
for p in Ps:
#true error
result_df = test_data.apply(lambda row: classify(row, train_data, train_labels,p, k), axis=1)
error_df = result_df == test_labels
# print( "true: k = " + str(k) , "p= " + p.__name__ , "acc= ", error_df.value_counts(normalize=True).iloc[0])
true_error.append(error_df.value_counts(normalize=True).iloc[0])
#empirical error
result_df = train_data.apply(lambda row: classify(row, train_data, train_labels,p, k), axis=1)
error_df = result_df == train_labels
emp_error.append(error_df.value_counts(normalize=True).iloc[0])
# print( "emp: k = " + str(k) , "p= " + p.__name__ , "acc= ", error_df.value_counts(normalize=True).iloc[0])
return np.asarray(emp_error), np.asarray(true_error)
def run_experiment(data,labels, iterations=100):
"""
Run KNN on data where every iteration 15 runs are performed.
k=[1,3,5,7,9] p=[1,2,inf] runs=kXp
iterations default value is 100.
Note that this function runs using multiple cores, adjust below according to your specs
"""
emp_error = np.zeros(15)
true_error = np.zeros(15)
num_cores = multiprocessing.cpu_count()
processed_list = Parallel(n_jobs=num_cores)(
delayed(run_single_experiment)(data,labels,i) for i in range(iterations)
)
for res in processed_list:
emp_error = np.add(emp_error, res[0])
true_error = np.add(true_error, res[1])
emp_error /= iterations
true_error /= iterations
return emp_error, true_error
def main():
hbt , labels = readData.hbt_data("HC_Body_Temperature.txt")
emp_err, true_err = run_experiment(hbt,labels, 100)
Ks = [1,3,5,7,9]
Ps = ["p1_distance","p2_distance","p_inf_distance"]
for emp, tru , desc in zip(emp_err, true_err , itertools.product(Ks,Ps)):
print(desc,
"Empirical error:",
emp,
"True error:",
tru,
)
if __name__ == "__main__":
main()
|
[actor] @dbtype:mem,fs
"""
Operator actions for handling interventsions on a computenode
"""
method:setStatus
"""
Set the computenode status, options are 'ENABLED(creation and actions on machines is possible)','DISABLED(Only existing machines are started)', 'HALTED(Machine is not available'
"""
var:id str,, id of the computenode
var:locationId str,, the grid this computenode belongs to
var:status str,, status (ENABLED, MAINTENANCE, DECOMMISSIONED).
result: str
method:btrfs_rebalance
"""
Rebalances the btrfs filesystem
"""
var:name str,, name of the computenode
var:locationId str,, the grid this computenode belongs to
var:mountpoint str,,the mountpoint of the btrfs
var:uuid str,,if no mountpoint given, uuid is mandatory
result: bool
method:enable
"""
Enable a stack
"""
var:id str,,id of the computenode
var:message str,,message. Must be less than 30 characters
result: str
method:enableStacks
"""
Enable stacks
"""
var:ids list(str),,ids of stacks to enable
result:bool
method:list
"""
List stacks
"""
var:locationId str,,filter on gid @optional
result:list
method:sync
"""
Sync stacks
"""
var:locationId str,,the grid id to sync
result:bool
method:maintenance
"""
Migrates or stop all vms
Set the status to 'MAINTENANCE'
"""
var:id str,, id of the computenode
var:vmaction str,, what to do with running vms move or stop
var:force bool,, force to Stop VM if Life migration fails
var:message str,,message. Must be less than 30 characters
result: str
method:decommission
"""
Migrates all machines to different computes
Set the status to 'DECOMMISSIONED'
"""
var:id str,, id of the computenode
var:locationId str,, the grid this computenode belongs to
var:message str,,message. Must be less than 30 characters
result: str
method:upgrade
"""
upgrade node to new version
Set the status to 'ENABLED'
"""
var:id str,, id of the computenode
var:message str,,message. Must be less than 30 characters
var:force bool,,force. force upgrade
result: str
|
import re
camel_pat = re.compile(r'([A-Z])')
under_pat = re.compile(r'_([a-z])')
def camel_to_underscore(name):
return camel_pat.sub(lambda x: '_' + x.group(1).lower(), name)
def underscore_to_camel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)
def dict_keys_underscore_to_camel(d):
d_new = {}
for k, _ in d.items():
d_new[underscore_to_camel(k)] = d[k]
return d_new
def dict_keys_camel_to_underscore(d):
d_new = {}
for k, _ in d.items():
d_new[camel_to_underscore(k)] = d[k]
return d_new
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.