text stringlengths 38 1.54M |
|---|
import os
import numpy as np
import pylab as plt
from PIL import Image,ImageDraw
from copy import deepcopy
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import requests
from io import BytesIO
import matplotlib.pyplot as pyplt
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.utils.miscellaneous import mkdir
from predictor import HistoDemo
import argparse
def test_net_on_nr(config_file, ckpt, iteration):
path_image = '/mnt/DATA_OTHER/digestPath/Signet_ring_cell_dataset/sig-train-neg/original/'
img_list = os.listdir(path_image)
# config_file = '/home/ys309/Dropbox/coding/maskrcnn-benchmark/configs/ring_cell/rc_faster_rcnn_R_50_FPN_1x_rpn_pair.yaml'
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cuda:0"])
# cfg.MODEL.WEIGHT = '/mnt/DATA_OTHER/digestPath/results/rc_faster_rcnn_R_50_FPN_1x_rpn_pair/model_0005000.pth'
cfg.MODEL.WEIGHT = ckpt
output_dir=cfg.OUTPUT_DIR
# print(output_dir)
output_fold=cfg.OUTPUT_FOLDER
save_fold=os.path.join(output_dir, output_fold, 'inference', 'normal_region')
mkdir(save_fold)
histo_demo = HistoDemo(
cfg,
min_image_size=600,
confidence_threshold=0.5,
)
model_size = 600
overlap = 200
l_scores=[]
for i, fimg in enumerate(img_list):
print('%d/%d'%(i+1,len(img_list)))
fimg = Image.open(path_image+'/'+img_list[i])
boxlist=histo_demo.sliding_window_wsi(fimg)
if boxlist is not None:
print('there are some FP')
l_scores.append(boxlist.get_field('scores'))
else:
print('boxlist is None')
if l_scores:
scores = torch.cat(l_scores,0)
np.save(os.path.join(save_fold, 'scores_%07d.npy'%iteration), scores.cpu().numpy())
score_l = scores.cpu().numpy()
n_imgs = len(img_list)
nrfp_conf_thres = (score_l>histo_demo.confidence_threshold).sum()//n_imgs
fps_conf_thres = np.maximum(100 - nrfp_conf_thres, 0)
# save fps for froc calculation
nrfp = []
fps = []
# range same as the one for recs
for conf_t in np.arange(0.0, 1.0, 0.001):
nrfp.append((score_l>conf_t).sum()//n_imgs)
fps.append(np.maximum(100 - (score_l>conf_t).sum()//n_imgs, 0))
np.save(os.path.join(save_fold, 'nrfp_%07d.npy'%iteration), np.array(nrfp))
np.save(os.path.join(save_fold, 'fps_%07d.npy'%iteration), np.array(fps))
return { "nrfp": nrfp_conf_thres, "fps": fps_conf_thres}
def run_test_net(model_id):
cfgs = []
models = []
# ringcell
for file in os.listdir('configs/ring_cell/'):
if file.split('.')[-1]=='yaml':
cfgs.append(file)
models.append(file.split('.')[0])
result_str = ''
for j, cfg in enumerate(cfgs):
if j==int(model_id):
for iteration in range(2500,20001,2500):
print('model (%d/%d)'%(j+1,len(models)) + ' iter%d'%iteration)
config_file = '/home/ys309/Dropbox/coding/maskrcnn-benchmark/configs/ring_cell/%s'%cfgs[j]
ckpt = '/mnt/DATA_OTHER/digestPath/results/'+models[j]+'/model_%07d.pth'%iteration
print(config_file + '\n' + ckpt)
result = test_net_on_nr(config_file, ckpt, iteration)
result_str += "{:.4f}\t".format(
result["fps"]
)
with open('results/ringcell/' + models[j] + "_nr.txt", "w") as fid:
fid.write(result_str)
else:
continue
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--model_id",
default=None,
help="[0,17]",
)
# parser.add_argument(
# "--ckpt",
# help="The path to the checkpoint for test, default is the latest checkpoint.",
# default=None,
# )
args = parser.parse_args()
# test_net_on_wsi(args.config_file, args.ckpt)
run_test_net(args.model_id)
|
"""Welcome to the API functionality."""
from flask_restful import Resource
# pylint:disable=no-self-use
class WelcomeResource(Resource):
"""Displays welcome message and any other introductory information."""
def get(self):
"""Get the welcome message an display it."""
return {
'status': 'success',
'data': {
'message': 'Welcome to Real Estate Manager.'
}
}, 200
|
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
from copy import deepcopy
from tqdm import tqdm
import qeval
import qanim
#TODO: after 100g create a new file for saving
#TODO: fusion and fision
class EvolAlg():
def __init__(self, time=300, popsize=180, load_pop=True):
# internal (as defined by M. Quinn (2001) and Shibuya et al.)
self.popsize=popsize
self.time=time
self.n_gens=2000
self.cgen=0
self.n_parents=33
# quinn (2001):
# self.dmax=25
# self.crossover_rate=0.6
# self.micromuts=3.5
self.addgx_prob=0.01
self.delgx_prob=0.02
self.addcx_prob=0.025
self.delcx_prob=0.05
self.ids=0
# shibuya et al:
self.cmax=10
self.dmax=40
self.elite=2
self.crossover_rate=0.2
self.crossover_prob=0.5
self.mut_rate=0.05
# for saving the history of all genotypes
self.genotypes = []
# init population (from clonal types)
self.population = []
self.init_population(load_pop)
# saving
self.max_ft = 15
self.filename=""
self.define_filename()
self.save_step=1
self.save_data()
# run ea
self.evolve()
def evolve(self):
# for each generation
for i_gen in range(self.cgen,self.n_gens):
print("\ngeneration: {}\n".format(i_gen))
# for each genotype
parents = []
for gt in range(len(self.population)):
genotype = self.population[gt]
eval = qeval.Evaluation(genotype,self.time)
# save object everytime there is a new max
print("gt{}: ft={}".format(gt, round(eval.av_ft,2)))
if eval.av_ft > self.max_ft:
name = "{}_gen={}_ft={}_gt{}={}".format(self.index,i_gen,round(eval.av_ft,2),gt,[gx[0] for gx in genotype])
print("new max: {}".format(name))
qanim.sim_animation(eval, show=False, save=True, fname=name)
# self.save_eval(eval)
self.max_ft = eval.av_ft
# add genotype's results to list
parents.append([deepcopy(eval.genotype), eval.av_ft])
# sort by fitness, print results and save
parents = sorted(parents, key=lambda x:x[1], reverse=True)[:self.n_parents]
print("\ngeneration {} sorted agents:\n".format(i_gen))
for enum,px in enumerate(parents):
print("{} - ft = {}, genotype: {}".format(enum, round(px[1],2), [gene[0] for gene in px[0]]))
# save this generation's genotypes to pickle
gen_gts = deepcopy(parents)
self.genotypes.append(gen_gts)
if len(self.genotypes)%self.save_step==0:
self.save_data()
# next generation
self.next_gen(parents)
# from Shibuya et al. (Quinn, 2001 dont explain this)
def next_gen(self, parents):
# normalization of ft values (max=120, min=-40)
for px in parents:
norm_ft = 0 if px[1] <= 0 else px[1]
px.append(norm_ft)
# elitism (replicate best 2)
self.population = []
print("\nelitism:")
for enum,px in enumerate(parents[:self.elite]):
print("gt{}: ft={}, norm_ft={}, gt={}".format(enum,round(px[1],2),round(px[2],2),[gx[0] for gx in px[0]]))
self.population.append(deepcopy(px[0]))
# 20% from crossover by roulette strategy
print("\nroulette:")
# for normalized probabilities
ftsum = sum(px[1] for px in parents)
print("ftsum={}".format(ftsum))
for px in parents:
prob = px[2]/ftsum
px.append(prob)
psum = sum(px[3] for px in parents)
# roulette selection
for gi in range(int(self.popsize*self.crossover_rate/2)):
r1 = np.random.uniform(0,psum)
p1 = 0
for px in parents:
p1 += px[3]
if r1 <= p1:
pgt1 = px
break
r2 = np.random.uniform(0,psum)
p2 = 0
for px in parents:
p2 += px[3]
if r2 <= p2:
pgt2 = px
break
# px = [0:gt, 1:ft, 2:norm_ft, 3:prob([0:1])]
print("gt1_ft={}, gt1_prob={}, gt1={}".format(round(pgt1[1],2), round(pgt1[3],2), [gx[0] for gx in pgt1[0]]))
print("gt2_ft={}, gt2_prob={}, gt2={}".format(round(pgt2[1],2), round(pgt2[3],2), [gx[0] for gx in pgt2[0]]))
ngt1,ngt2 = self.crossover(deepcopy(pgt1[0]),deepcopy(pgt2[0]))
print("gt{}: gtx1 => ngt1={}".format(len(self.population),[gx[0] for gx in ngt1]))
print("gt{}: gtx2 => ngt2={}".format(len(self.population)+1,[gx[0] for gx in ngt2]))
self.population.extend([ngt1,ngt2])
# the reamining are mutations (it isn't explicit from where)
print("\nmutations:")
while len(self.population) < self.popsize:
r = np.randon.uniform(0,psum)
p = 0
for px in parents:
if r<=p:
ngt = self.mutate(deepcopy(parents[cgt][0]))
print("gt{}: => genotype: {}".format(len(self.population),[gx[0] for gx in ngt]))
self.population.append(ngt)
break
# mutator operator (from Quinn's thesis)
def mutate(self, gt
, xr=200, yr=250, sr=50, mr=30, ur=5):
# info
zaddgx = None
zdelgx = None
zaddcx_in = 0
zaddcx_out = 0
zdelcx_in = 0
zdelcx_out = 0
# macro-mutations: addition and deletion of genes
if np.random.uniform(0,1) <= self.addgx_prob:
gene = self.create_gene()
zaddgx = gene[0]
gt.append(gene)
# del gen
if np.random.uniform(0,1) <= self.delgx_prob:
if len(gt) > 1:
dg = np.random.randint(0,len(gt))
zdelgx = gt[dg][0]
del(gt[dg])
# inside gene mutations
for gene in gt:
# macro-mutations addition/deletion of connections
if np.random.uniform(0,1) <= self.addcx_prob:
io = np.random.choice([True,False])
zx = np.random.uniform(0,xr)
zw = np.random.uniform(-ur,ur)
if io:
# new input connection
zy = np.random.uniform(0,yr-mr)
gene[6].append([zx,zy,zw])
zaddcx_in += 1
else:
# new output connection
zy = np.random.uniform(sr,yr)
gene[7].append([zx,zy,zw])
zaddcx_out += 1
# deletion
if np.random.uniform(0,1) <= self.delcx_prob:
io = np.random.choice([True,False])
if io:
if len(gene[6]) > 0:
dg = np.random.randint(len(gene[6]))
del(gene[6][dg])
zdelcx_in += 1
else:
if len(gene[7]) > 0:
dg = np.random.randint(len(gene[7]))
del(gene[7][dg])
zdelcx_out += 1
# micro-mutations (as in Shibuya et al, mixed with Quinn's)
# neuron cartesian coordinates (bounded to the neural space)
dx = np.random.normal(0,xr*0.5)
x = gene[1]+dx
if x < 0:
x = np.random.uniform(0,gene[1])
elif x > xr:
x = np.random.uniform(gene[1],xr)
gene[1] = x
dy = np.random.normal(0,(yr-sr-mr)*0.5)
y = gene[2]+dy
if y < sr:
y = np.random.uniform(sr,gene[2])
elif y > yr-mr:
y = np.random.uniform(gene[2],(yr-mr))
gene[2] = y
# thresholds (unbounded, initialized between [-5,5])
dT = np.random.normal(0,ur)
gene[3] += dT
# decay parameters (bounded between [0:1])
dga = np.random.normal(0,0.5)
ga = gene[4] + dga
if ga < 0:
ga = np.random.uniform(0,gene[4])
elif ga > 1:
ga = np.random.uniform(gene[4],1)
gene[4] = ga
dgb = np.random.normal(0,0.5)
gb = gene[5] + dgb
if gb < 0:
gb = np.random.uniform(0,gene[5])
elif gb > 1:
gb = np.random.uniform(gene[5],1)
gene[5] = gb
# weights (unbounded, initialized between [-5:5])
for wi in range(len(gene[6])):
dw = np.random.normal(0,ur)
gene[6][wi][2] += dw
for wo in range(len(gene[7])):
dw = np.random.normal(0,ur)
gene[7][wo][2] += dw
print("+gen:{}, -gen:{}, +cx_in={},+cx_out={}, -cx_in={},-cx_out={}".format(zaddgx,zdelgx,zaddcx_in,zaddcx_out,zdelcx_in,zdelcx_out))
return gt
# recombination operator (from Quinn's thesis)
def crossover(self, gt1, gt2):
# crossover
new_gt1 = []
new_gt2 = []
# pair genes with the same id
par_gt1 = sorted([gx1 for gx1 in gt1 if gx1[0] in [gx2[0] for gx2 in gt2]], key=lambda x:x[0])
par_gt2 = sorted([gx2 for gx2 in gt2 if gx2[0] in [gx1[0] for gx1 in gt1]], key=lambda x:x[0])
# unpaired genes
unp_gt1 = [gx1 for gx1 in gt1 if gx1 not in par_gt1]
unp_gt2 = [gx2 for gx2 in gt2 if gx2 not in par_gt2]
np.random.shuffle(unp_gt1)
np.random.shuffle(unp_gt2)
# crossover (50% chances) (paired must be of the same length)
for pgx in zip(par_gt1,par_gt2):
i1 = np.random.choice([0,1])
i2 = 1 if i1 == 0 else 0
new_gt1.append(pgx[i1])
new_gt2.append(pgx[i2])
# unpaired genes crossover (same length)
for ugx in zip(unp_gt1,unp_gt2):
i1 = np.random.choice([0,1])
i2 = 1 if i1 == 0 else 0
new_gt1.append(ugx[i1])
new_gt2.append(ugx[i2])
# remaining unpaired genes (if different length)
rem_gt = unp_gt1[len(unp_gt2):] if len(unp_gt1) > len(unp_gt2) else unp_gt2[len(unp_gt1):]
for rgx in rem_gt:
new_gt1.append(rgx) if np.random.choice([True,False]) else new_gt2.append(rgx)
# send back
return new_gt1, new_gt2
# initialization (clonal)
def init_population(self, load_pop):
# create (or complete) population
if load_pop == True:
wdir = os.path.join(os.getcwd(), "qobjs")
objs = sorted([i for i in os.listdir(wdir) if "obj" in i])
select_obj = True
select_gen = False
# select object
while select_obj == True:
print("\n")
for enum, obj_filename in enumerate(objs):
print("{} - {}".format(enum, obj_filename))
n_in = input("\nselect object: ")
try:
n_obj = int(n_in)
n_obj_filename = objs[n_obj]
obj_path = os.path.join(wdir, n_obj_filename)
with open(obj_path, "rb") as ea_exp:
generations = pickle.load(ea_exp)
select_gen = True
obj_popsize = len(generations[0])
except:
print("couldn't open object")
# select generation
while select_gen==True:
print("\n")
for enum,gen in enumerate(generations):
fts = [gt[1] for gt in generations[enum]]
avft = sum(fts)/len(fts)
print("gen {}, av_ft={}, fts: {} ...".format(enum, round(avft,2), np.around(fts[:5],2)))
print("\n")
print("\"p\" to plot fitness evolution")
print("\"s\" to change popsize, currently={} (loaded object popsize={})".format(self.popsize,obj_popsize))
print("\"b\" to go back")
g_in = input("\ngeneration?: ")
if g_in=="b":
select_gt=False
elif g_in=="s":
change_popsize = True
while change_popsize == True:
pop_in = input("popsize?: ")
try:
self.popsize = int(pop_in)
change_popsize = False
except:
print("invalid input")
elif g_in =="p" or g_in=="plot":
plt.plot(np.arange(0,len(generations)),np.asarray([sum([gt[1] for gt in gen])/len(gen) for gen in generations]))
plt.plot(np.arange(0,len(generations)),np.asarray([gen[0][1] for gen in generations]))
plt.xlabel("Generation")
plt.ylabel("Fitness")
plt.show()
else:
try:
n_gen = int(g_in)
self.genotypes = generations
self.population = [gt[0] for gt in generations[n_gen]]
self.cgen = n_gen+1
select_gen = False
select_obj = False
except:
print("\ninvalid input")
print("population loaded from {}:\ngeneration {}, popsize={}".format(obj_path,self.cgen,self.popsize))
# adjust to popsize (in case)
if len(self.population) > self.popsize:
self.population = self.population[:self.popsize]
# create genotypes
print("=> {} agents to adjust to popsize={}".format(self.popsize-len(self.population),self.popsize))
while len(self.population) < self.popsize:
gt = self.create_genotype()
self.population.append(gt)
# Shibuya et al version
# def create_genotype(self, n_genes=20, n_in=8, n_out=4, rg=5):
# genotype = []
# for gi in range(n_genes):
# T = np.random.uniform(-rg,rg)
# wx_in = [np.random.uniform(-rg,rg) for wi in range(n_in)]
# wx_out = [np.random.uniform(-rg,rg) for wo in range(n_out)]
# ga = np.random.uniform(0,1)
# gb = np.random.uniform(0,1)
# gene = [T]+wx_in+wx_out+[ga,gb]
# genotype.append(gene)
# QUINN version
def create_genotype(self, min_genes=6, max_genes=8
, max_in=8, min_out=1, max_out=8):
# predefined values from Quinn's thesis
genotype = []
for _ in range(np.random.randint(min_genes,max_genes+1)):
gene = self.create_gene(max_in=max_in,min_out=min_out,max_out=max_out)
genotype.append(gene)
return genotype
def create_gene(self, max_in=2, min_out=0, max_out=2
, xr=200, yr=250, sr=50, mr=30, ur=5):
x = np.random.uniform(0,xr)
y = np.random.uniform(sr, yr-mr)
th = np.random.uniform(-ur,ur)
ga = np.random.uniform(0,1)
gb = np.random.uniform(0,1)
l_in = []
l_out = []
for _ in range(np.random.randint(0,max_in+1)):
ix = np.random.uniform(0,xr)
iy = np.random.uniform(0,yr-mr)
iw = np.random.uniform(-ur,ur)
l_in.append([ix,iy,iw])
for _ in range(np.random.randint(min_out,max_out+1)):
ox = np.random.uniform(0,xr)
oy = np.random.uniform(sr,yr)
ow = np.random.uniform(-ur,ur)
l_out.append([ox,oy,ow])
gene = [self.ids,x,y,th,ga,gb,l_in,l_out]
self.ids += 1
return gene
# define filename for saving
def define_filename(self):
# dirpath
dirname = "qobjs"
self.dir_path = os.path.join(os.getcwd(),dirname)
if not os.path.isdir(self.dir_path):
os.mkdir(self.dir_path)
try:
index = "{:03}".format(int(sorted([i for i in os.listdir(self.dir_path) if ".qobj" in i])[-1].split("_")[1])+1)
except:
index = "000"
self.index = "qrun_{}".format(index)
# save
def save_data(self):
i_name = "{}_pop={}_gen={}.qobj".format(self.index, self.popsize, len(self.genotypes))
i_path = os.path.join(self.dir_path, i_name)
# look for previous temps
temps = sorted([i for i in os.listdir(self.dir_path) if self.index in i])
# safety check
if os.path.isfile(i_path):
import time
print("\nfile already exists")
i_path += time.ctime()
# save with pickle
with open(i_path, "wb") as exp_file:
pickle.dump(self.genotypes, exp_file)
print("\nobject saved at: \n{}\n".format(i_path))
# delete previous temps
if len(temps)>=1:
for tempfile in temps[:-1]:
os.remove(os.path.join(self.dir_path,tempfile))
print("removed temporal file: {}".format(tempfile))
print("")
EvolAlg()
##
|
#! /usr/bin/env python
# coding: utf-8
#ヘアスタイル合成のメインコード
#基準座標型フィッテング手法
#スケール変更 → 位置合わせ → クロマキー合成
import cv
import cv2
import numpy as np
import pylab as plt
import synthesis #クロマキー合成
import hairTranceform2 #スケール変更 + 位置合わせ
#入力
samplenum = raw_input('合成するヘアスタイル番号を指定してください : ')
sampleName = '../image2/sample' + samplenum + '_front.jpeg'
inputImageName = '../image/face/test_front.jpeg' #入力顔画像のファイル名
out_inputImageName = '../image/face/inputNoHair.jpeg'
#サンプルの顔座標情報
sampleFacePoint_h = [[160, 45], [160, 218]]
sampleFacePoint_w = [[96, 131], [225, 131]]
sampleFace_baseX = sampleFacePoint_w[0][0] #サンプル画像の顔の基準点(x)
sampleFace_baseY = sampleFacePoint_h[0][1] #サンプル画像の顔の基準点(y)
sFace_height = sampleFacePoint_h[1][1] - sampleFacePoint_h[0][1] #サンプルの顔の縦の長さ
sFace_width = sampleFacePoint_w[1][0] - sampleFacePoint_w[0][0] #サンプルの顔の横の長さ
#入力顔画像情報取得
inputImg = cv2.imread(inputImageName)
inputFace_h_point, inputFace_w_point = hairTranceform2.zahyou_get(inputImg) #入力画像の分析(必要な座標の取得)
iFace_height = abs(inputFace_h_point[1][1] - inputFace_h_point[0][1]) #入力顔画像の縦の長さ
iFace_width = abs(inputFace_w_point[1][0] - inputFace_w_point[0][0]) #入力顔画像の横の長さ
#入力画像の顔の基準座標を求める
inputFace_baseX = min(inputFace_w_point[0][0], inputFace_w_point[1][0])
inputFace_baseY = min(inputFace_h_point[0][1], inputFace_h_point[1][1])
#入力画像の顔の中心座標を求める
iFace_centerPointX = (float(inputFace_w_point[0][0]) + float(inputFace_w_point[1][0])) / 2.0
iFace_centerPointX_int = int(round(iFace_centerPointX, 0)) #整数化
iFace_centerPointY = (float(inputFace_h_point[0][1]) + float(inputFace_h_point[1][1])) / 2.0
iFace_centerPointY_int = int(round(iFace_centerPointY, 0)) #整数化
#顔情報取得時に描かれた図形が邪魔なので再び同じものを読み込む
inputImg = cv2.imread(out_inputImageName)
#ヘア画像の読み込み
sampleImg = cv2.imread(sampleName)
#スケール合わせ
sampleImg_transe, sampleFace_baseX_transe, sampleFace_baseY_transe \
= hairTranceform2.scale(sFace_height, sFace_width, iFace_height, iFace_width, sampleImg, sampleFace_baseX, sampleFace_baseY)
#位置合わせ
#ここで受け取る二つの画像をクロマキーにかける
inputImg_triming, sampleImg_match = hairTranceform2.matchPoint(sampleImg_transe, inputImg, inputFace_baseX, inputFace_baseY, \
sampleFace_baseX_transe, sampleFace_baseY_transe, iFace_centerPointX_int, iFace_centerPointY_int)
#一度ファイル名をつけて保存
cv2.imwrite('hair_front.jpeg', sampleImg_match)
#クロマキー合成処理
mask_f = synthesis.mask_front('hair_front.jpeg') #マスク画像作成
#合成出力先の領域作成
h = sampleImg_match.shape[0]
w = sampleImg_match.shape[1]
image_out_ipl = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 3)
image_out_mat = cv.GetMat(image_out_ipl)
image_out = np.asarray(image_out_mat)
#合成処理
synImage_f = synthesis.syn(sampleImg_match, inputImg_triming, image_out, mask_f)
cv2.imwrite('result/base/hairSyn' + samplenum + '_front.jpeg', synImage_f)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 5 14:59:30 2016
@author: alex
"""
from AlexRobotics.dynamic import Manipulator as M
from AlexRobotics.estimation import ManipulatorDisturbanceObserver as OBS
from scipy.interpolate import interp1d
import numpy as np
'''
###############################################################################
### Controllers for Manipulators-class robots
###############################################################################
'''
##########################################################################
class ComputedTorqueController:
""" Feedback law """
############################
def __init__( self , R = M.TwoLinkManipulator() ):
""" """
self.R = R # Model of the robot used by the controller
# Params
self.w0 = 10
self.zeta = 0.7
# default is fixed goal at zero
self.goal = np.zeros( R.n )
self.traj_loaded = False
self.ctl = self.fixed_goal_ctl
# Or load a solution: self.solution = [ x , u , t , dx ]
#self.traj_ref_pts = 'closest'
self.traj_ref_pts = 'interpol'
# For manual acc control
self.ddq_manual_setpoint = np.zeros( self.R.dof )
# Integral action with dist observer (beta)
self.dist_obs_active = False
self.obs = OBS.DistObserver( R )
############################
def ctl( self , x , t = 0 ):
"""
Place holder for feedback law
"""
pass
############################
def traj_following_ctl( self , x , t = 0 ):
"""
Given desired loaded trajectory and actual state, compute torques
"""
ddq_d , dq_d , q_d = self.get_traj( t )
ddq_r = self.compute_ddq_r( ddq_d , dq_d , q_d , x )
F = self.computed_torque( ddq_r , x , t )
return F
############################
def fixed_goal_ctl( self , x , t = 0 ):
"""
Given desired fixed goal state and actual state, compute torques
"""
ddq_d = np.zeros( self.R.dof )
[ q_d , dq_d ] = self.R.x2q( self.goal ) # from state vector (x) to angle and speeds (q,dq)
ddq_r = self.compute_ddq_r( ddq_d , dq_d , q_d , x )
F = self.computed_torque( ddq_r , x , t )
return F
############################
def manual_acc_ctl( self , x , t = 0 ):
"""
Given desired acc, compute torques
"""
ddq_r = self.ddq_manual_setpoint
F = self.computed_torque( ddq_r , x , t )
return F
############################
def computed_torque( self , ddq_r , x , t ):
"""
Given actual state, compute torque necessarly for a given acceleration vector
"""
[ q , dq ] = self.R.x2q( x ) # from state vector (x) to angle and speeds (q,dq)
F = self.R.F( q , dq , ddq_r ) # Generalized force necessarly
# Dist obs:
if self.dist_obs_active:
self.obs.update_estimate( x , F , t )
self.R.f_dist_steady = self.obs.f_ext_hat
return F
############################
def compute_ddq_r( self , ddq_d , dq_d , q_d , x ):
"""
Given desired trajectory and actual state, compute ddq_r
"""
[ q , dq ] = self.R.x2q( x ) # from state vector (x) to angle and speeds (q,dq)
q_e = q - q_d
dq_e = dq - dq_d
ddq_r = ddq_d - 2 * self.zeta * self.w0 * dq_e - self.w0 ** 2 * q_e
# Save for Debug
self.q_e = q_e
self.dq_e = dq_e
self.ddq_r = ddq_r
return ddq_r
############################
def load_trajectory( self , solution ):
"""
Load Open-Loop trajectory solution to use as reference trajectory
"""
self.solution = solution
q = solution[0][ 0 : self.R.dof , : ]
dq = solution[0][ self.R.dof : 2 * self.R.dof , : ]
ddq = solution[3][ self.R.dof : 2 * self.R.dof , : ]
t = solution[2]
self.traj = [ ddq , dq , q , t ]
self.max_time = t.max()
# assign new controller
self.ctl = self.traj_following_ctl
# Create interpol functions
self.q = interp1d(t,q)
self.dq = interp1d(t,dq)
self.ddq = interp1d(t,ddq)
############################
def get_traj( self , t ):
"""
Find closest point on the trajectory
"""
if t < self.max_time - 0.1 :
if self.traj_ref_pts == 'interpol':
# Load trajectory
q = self.q( t )
dq = self.dq( t )
ddq = self.ddq( t )
elif self.traj_ref_pts == 'closest':
# Find closet index
times = self.traj[3]
i = (np.abs(times - t)).argmin() + 1
# Load trajectory
ddq = self.traj[0][:,i]
dq = self.traj[1][:,i]
q = self.traj[2][:,i]
else:
# Fixed goal
ddq = np.zeros( self.R.dof )
dq = self.goal[ self.R.dof : 2 * self.R.dof]
q = self.goal[ 0 : self.R.dof ]
return ddq , dq , q
'''
################################################################################
'''
class SlidingModeController( ComputedTorqueController ):
""" Feedback law """
############################
def __init__( self , R = M.TwoLinkManipulator() ):
""" """
ComputedTorqueController.__init__( self , R )
# Params
self.lam = 1 # Sliding surface slope
self.D = 1 # Discontinuous gain
self.nab = 0.1 # Min convergence rate
############################
def compute_sliding_variables( self , ddq_d , dq_d , q_d , x ):
"""
Given desired trajectory and actual state
"""
[ q , dq ] = self.R.x2q( x ) # from state vector (x) to angle and speeds (q,dq)
q_e = q - q_d
dq_e = dq - dq_d
s = dq_e + self.lam * q_e
dq_r = dq_d - self.lam * q_e
ddq_r = ddq_d - self.lam * dq_e
#ddq_r = ddq_d - 2 * self.zeta * self.w0 * dq_e - self.w0 ** 2 * q_e
# Save for Debug
self.q_e = q_e
self.dq_e = dq_e
self.ddq_r = ddq_r
return [ s , dq_r , ddq_r ]
############################
def K( self , q , t ):
""" Discontinuous gain matrix """
dist_max = np.diag( np.ones( self.R.dof ) ) * self.D
conv_min = np.diag( np.ones( self.R.dof ) ) * self.nab
K = dist_max + np.dot( self.R.H_all( q ) , conv_min )
return K
############################
def sliding_torque( self , ddq_r , s , x , t ):
"""
Given actual state, compute torque necessarly to guarantee convergence
"""
[ q , dq ] = self.R.x2q( x ) # from state vector (x) to angle and speeds (q,dq)
F_computed = self.R.F( q , dq , ddq_r ) # Generalized force necessarly
F_discontinuous = np.dot( self.K( q , t ) , np.sign( s ) )
F_tot = F_computed - F_discontinuous
return F_tot
############################
def traj_following_ctl( self , x , t = 0 ):
"""
Given desired loaded trajectory and actual state, compute torques
"""
ddq_d , dq_d , q_d = self.get_traj( t )
[ s , dq_r , ddq_r ] = self.compute_sliding_variables( ddq_d , dq_d , q_d , x )
F = self.sliding_torque( ddq_r , s , x , t )
return F
############################
def fixed_goal_ctl( self , x , t = 0 ):
"""
Given desired fixed goal state and actual state, compute torques
"""
ddq_d = np.zeros( self.R.dof )
[ q_d , dq_d ] = self.R.x2q( self.goal ) # from state vector (x) to angle and speeds (q,dq)
[ s , dq_r , ddq_r ] = self.compute_sliding_variables( ddq_d , dq_d , q_d , x )
F = self.sliding_torque( ddq_r , s , x , t )
return F
|
import time
from datetime import datetime
from multiprocessing import Process, Value, Queue, Manager
from Algorithms import Algorithm
from Arduino import Arduino
# from Android import Android
from socket import error as SocketError
import errno
ANDROID_HEADER = 'AND'.encode()
ARDUINO_HEADER = 'ARD'.encode()
ALGORITHM_HEADER = 'ALG'.encode()
class MultiProcessCommunication:
def __init__(self):
#Connect to Arduino, Algo and Android
# self.arduino = Arduino()
self.algorithm = Algorithm()
# self.android = Android()
self.manager = Manager()
#Messages from various modules are placed in this queue before being read
self.message_queue = self.manager.Queue()
#Messages to android are placed in this queue
# self.to_android_message_queue = self.manager.Queue()
# self.read_arduino_process = Process(target=self._read_arduino)
self.read_algorithm_process = Process(target = self._read_algorithm)
# self.read_android_process = Process(target=self._read_android)
self.write_process = Process(target=self._write_target)
# self.write_android_process = Process(target=self._write_android)
print('Multi Process initialized')
# self.status = Status.IDLE
# self.dropped_connection = Value('i',0)
def start(self):
try:
#Connect to arduio, algo and android
# self.arduino.connect()
self.algorithm.connect()
# self.android.connect()
#Start the process to listen and read from algo, android and arduino
# self.read_arduino_process.start()
self.read_algorithm_process.start()
# self.read_android_process.start()
#Start the process to write to algo and arduino
self.write_process.start()
#Start the process to write to android
# self.write_android_process.start()
print('Comms started. Reading from algo and android and arduino.')
except Exception as err:
raise err
def _format_for(self, target, message):
#Function to return a dictionary containing the target and the message
return {
'target': target,
'payload': message,
}
# def _read_arduino(self):
# '''
# Arduino only needs to send messages to Algo (PC)
# '''
# while True:
# try:
# rawmessage = self.arduino.read()
# if rawmessage == None:
# continue
# message_list = rawmessage.splitlines()
# for message in message_list:
# if len(message) <= 0:
# continue
# else:
# self.message_queue.put_nowait(self._format_for(ALGORITHM_HEADER, message))
# except Exception as err:
# print("_read_arduino failed - {}".format(str(err)))
# break
def _read_algorithm(self):
'''
To-dos: Layout messages to relay
'''
while True:
try:
raw_message = self.algorithm.read()
if raw_message is None:
continue
message_list = raw_message.splitlines()
for message in message_list:
if len(message) <= 0:
continue
else:
if(message[0] == 'M'.encode()[0]):
print('Sending to Android')
self.to_android_message_queue.put_nowait(message[1:])
else:
print(message)
except Exception as err:
raise err
# def algorithm_to_android(self, message):
# #Send message from Algo (PC) to Android
# '''
# Todos - Account for messages - E.g. Algo - Android : Turn right
# '''
# MESSAGE_SEPARATOR = ""
# messages_to_send = message.split(MESSAGE_SEPARATOR)
# for message_to_send in messages_to_send:
# if len(message_to_send) <= 0:
# continue
# else:
# self.to_android_message_queue.put_nowait(message_to_send)
# def _read_android(self):
# while True:
# try:
# rawmessage = self.android.read()
# if rawmessage == None:
# continue
# message_list = rawmessage.splitlines()
# for message in message_list:
# if len(message) <= 0:
# continue
# else:
# self.message_queue.put_nowait(self._format_for(ARDUINO_HEADER, message))
# # self.message_queue.put_nowait(self._format_for(ALGORITHM_HEADER,message))
# except Exception as err:
# print('_read_android error - {}'.format(str(err)))
# break
def _write_target(self):
while True:
target = None
try:
if not self.message_queue.empty():
message = self.message_queue.get_nowait()
target, payload = message['target'], message['payload']
if target == ALGORITHM_HEADER:
self.algorithm.write(payload)
elif target == ARDUINO_HEADER:
self.arduino.write(payload)
except Exception as err:
print('failed {}'.format(err))
break
# def _write_android(self):
# while True:
# try:
# if not self.to_android_message_queue.empty():
# message = self.to_android_message_queue.get_nowait()
# self.android.write(message)
# except Exception as error:
# print('Process write_android failed: ' + str(error))
# break
# def testing_arduino(self):
# while True:
# message = input('Input message to send to arduino')
# self.arduino.write(message.encode())
# if message == 'q':
# break
def testing_algo(self):
while True:
message = input("Input message to send to Algo:\n")
self.algorithm.write(message.encode())
if message == 'q':
break
# def testing_android(self):
# while True:
# message = input("Input message to send to Android:\n")
# self.android.write(message.encode())
# if message == 'q':
# break
|
#!/usr/bin/python3
a = 21
b = 10
c = 0
if ( a == b ):
print ("1 - a 等于 b")
else:
print ("1 - a 不等于 b")
if ( a != b ):
print ("2 - a 不等于 b")
else:
print ("2 - a 等于 b")
if ( a < b ):
print ("3 - a 小于 b")
else:
print ("3 - a 大于等于 b")
if ( a > b ):
print ("4 - a 大于 b")
else:
print ("4 - a 小于等于 b")
# 修改变量 a 和 b 的值
a = 5;
b = 20;
if ( a <= b ):
print ("5 - a 小于等于 b")
else:
print ("5 - a 大于 b")
if ( b >= a ):
print ("6 - b 大于等于 a")
else:
print ("6 - b 小于 a") |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 05:48
from __future__ import unicode_literals
import core.utils
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SiteConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024, verbose_name='header title')),
('address', models.CharField(default='', max_length=1024, verbose_name='address')),
('address_comment', models.CharField(blank=True, default='', max_length=1024, verbose_name='address comment')),
('phone', models.CharField(max_length=512, verbose_name='phone')),
('email', models.EmailField(max_length=254, verbose_name='email')),
],
options={
'verbose_name': 'site config',
},
),
migrations.CreateModel(
name='SocialLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=512, verbose_name='name')),
('icon', models.ImageField(upload_to=core.utils.upload_path_handler, verbose_name='icon')),
('icon_alt', models.CharField(blank=True, default='', max_length=1024, verbose_name='icon alt')),
('url', models.CharField(max_length=1024, verbose_name='http link')),
('position', models.PositiveIntegerField(default=0, verbose_name='position')),
('config', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='social_links', to='core.SiteConfiguration')),
],
options={
'ordering': ['position'],
'verbose_name': 'social link',
'verbose_name_plural': 'social links',
},
),
]
|
"""firstproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
#현재 장고프로그램의 url 저장하는 용돈
from django.contrib import admin
from django.urls import path
import wordcounter.views
urlpatterns = [
path('admin/', admin.site.urls),
path('',wordcounter.views.home, name="home"),
path('about/',wordcounter.views.about, name="about"),
path('count/',wordcounter.views.count, name="count"),
path('select/',wordcounter.views.select, name="select"),
#(번호)는 각 인자를 의미.
#(3)select라는 이름으로 html 파일에서 정보가 왔을 때, (1) url은 127.0.0.1/select로 보내고
#(2)views.py에 있는 select 함수를 실행시킨다.
]
|
# 1. Напиши программу, которая выводит на экран сегодняшнюю дату в формате 2017-01-31 15:34:45
# 2. Напиши функцию, которая принимает радиус окружности, и возвращает площадь этой окружности.
# 3. Напиши функцию, которая принимает цифру n от 0 до 9(больше или меньше должны выводить на экран ошибку), и возвращает сумму n+nn+nnn. Т.е. если введено 7, то должна быть сумма 7+77+777 (861)
# 4. Напиши функцию, которая принимает список(list) строк, например: ['Я', 'большой', 'молодец'], и возвращает одну строку "Я большой молодец"
# 5. Напиши функцию, которая принимает число и тип времени, и переводит его в секунды. Если введено 4 и "час", то результат 4 х 60 х 60 = 14400. Возможные типы: минута, час, день, год.
# 6. Напиши функцию, которая принимает число, и выводит на экран пример его умножения на числа от 2 до 10, например:
# Ввод: 2
# Вывод:
# 2 X 2 = 4
# 2 X 3 = 6
# 2 X 4 = 8
# ....
# 2 X 10 = 20
# 7. Напиши функцию, которая примнимает слово, и возвращает два других. Первое состоит только из чётных по порядку букв, второе из нечётных. 0 считается чётным. Например:
# Ввод: Август
# Выод: Агс вут |
from django.db import models
class CustomUser(models.Model):
email = models.EmailField(unique=True, verbose_name='Email')
name = models.CharField(blank=True, max_length=100, verbose_name='Name')
password = models.CharField(max_length=20, verbose_name='Password')
def __str__(self):
return self.email
class Company(models.Model):
name = models.CharField(max_length=100, verbose_name='Company')
field = models.CharField(max_length=100, verbose_name='Field of Internship')
stipend = models.FloatField(verbose_name='Stipend')
duration = models.CharField(max_length=50, verbose_name='Duration')
location = models.CharField(max_length=100, verbose_name='Location')
def __str__(self):
return self.name
class Application(models.Model):
company = models.ForeignKey('Company', on_delete=models.CASCADE, verbose_name='Company')
user = models.ForeignKey('CustomUser', on_delete=models.CASCADE, verbose_name='User')
q1 = models.TextField(verbose_name='Question 1')
q2 = models.TextField(verbose_name='Question 2')
q3 = models.TextField(verbose_name='Question 3')
def __str__(self):
return self.company.name+'-'+self.user.name
class Resume(models.Model):
user = models.ForeignKey('CustomUser', on_delete=models.CASCADE, verbose_name='User')
resume = models.FileField(upload_to='Resume', verbose_name='Resume')
def __str__(self):
return self.user.name
|
import os
from flask import Flask, request
app = Flask(__name__)
@app.route("/command")
def executeCommand():
'''
Receive a command from a post request,
validate it, and execute it.
'''
if request.method == 'POST':
action = request.args.get('action')
os.system(action)
return "Command executed"
|
from django import forms
# Форма регистрации
from .models import DiskUser
from django.contrib.auth import password_validation
from django.core.exceptions import ValidationError
# Регистрацция
class RegisterUserForm(forms.ModelForm):
# Полное объявление - обязательно для заполнения:
email = forms.EmailField(required=True,
label='Адрес электронной почты')
password1 = forms.CharField(label='Пароль',
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html())
password2 = forms.CharField(label='Пароль (повторно)',
widget=forms.PasswordInput,
help_text='Введите тот же самый пароль еще раз для проверки')
# Валидация пароля
def clean_password1(self):
password1 = self.cleaned_data['password1']
if password1:
password_validation.validate_password(password1)
return password1
# Проверка совпадения введеннных паролей
def clean(self):
super().clean()
# cleaned_data = super(RegisterUserForm, self).clean()
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if password1 and password2 and password1 != password2:
errors = {'password2': ValidationError(
'Введенные пароли не совпадают',
code='password_mismatch'
)
}
raise ValidationError(errors)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class Meta:
model = DiskUser
fields = ('username', 'email', 'password1', 'password2',
'first_name', 'last_name')
class ChangeUserInfoForm(forms.ModelForm):
# Выполняем полное объявление поля email модели DiskUser
# Так как оно обязательно
email = forms.EmailField(required=True,
label='Адрес электронной почты')
class Meta:
model = DiskUser
fields = ('username', 'email', 'first_name', 'last_name') |
import requests
from datetime import datetime
import config
exercise_endpoint = "https://trackapi.nutritionix.com/v2/natural/exercise"
# exercise_text = input("What exercise did you do? ")
exercise_body = {
"query": "ran 5 kilometers",
"gender": "male",
"weight_kg": 88,
"height_cm": 183,
"age": 33,
}
exercise_header = {
"x-app-id": config.APP_ID,
"x-app-key": config.API_KEY,
# "Content-Type": "json"
}
response_from_exercise = requests.post(exercise_endpoint, json=exercise_body, headers=exercise_header)
response_from_exercise = response_from_exercise.json()
today = datetime.now().strftime("%Y-%m-%d")
today_time = datetime.now().strftime("%X")
exercise_name = response_from_exercise["exercises"][0]["name"]
exercise_calories = response_from_exercise["exercises"][0]["nf_calories"]
exercise_duration = response_from_exercise["exercises"][0]["duration_min"]
print(response_from_exercise)
print(today)
print(exercise_name)
print(exercise_calories)
excel_endpoint = "https://api.sheety.co/a16a777eeb0f532c8eda0e87a6dd72b4/myWorkouts/workouts"
excel_body = {
"workout": {
"date": today,
"time": today_time,
"exercise": exercise_name.title(),
"duration": exercise_duration,
"calories": exercise_calories,
}
}
bearer_headers = {
"Authorization": f"Bearer {config.SHEETY_TOKEN}"
}
response_from_excel = requests.post(excel_endpoint, json=excel_body, headers=bearer_headers)
|
from django.urls import path
from . import views
app_name = 'stores'
urlpatterns = [
path('', views.store_list, name='store_list'),
path('new/', views.store_create, name='store_create'),
path('<int:pk>', views.store_detail, name='store_detail'),
path('<int:pk>/update/', views.store_update, name='store_update'),
path('<int:pk>/delete/', views.store_delete, name='store_delete'),
]
|
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from matplotlib import pyplot as plt
import numpy as np
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.Session()
Xtr, Ytr = mnist.train.next_batch(5000)
# distance = tf.reduce_sum(tf.abs(Xtr - x_test))
x_tr, y_tr = mnist.train.next_batch(1000)
# y_tr = mnist.train.labels
# print mnist.train.images.shape
# print mnist.train.labels.shape
# # print mnist.train.images
# # print len(x_tr[0])
x_test, y_test = mnist.test.next_batch(1000)
# y_test = mnist.test.labels
def cal_dist(x_tr, x_test):
loops = len(x_tr)
print 'length', loops
dist_arr = []
for i in range(loops):
dist = tf.reduce_sum(tf.abs(x_tr[i] - x_test))
dist_arr.append(sess.run(dist))
return dist_arr
accuracy = 0.0
for i in range(20):
arr = cal_dist(x_tr, x_test[i])
closest_tr_idx = sess.run(tf.argmin(arr, 0))
y_closest_tr = y_tr[closest_tr_idx]
y_closest_te = y_test[i]
if (tf.argmax(y_closest_te) == tf.argmax(y_closest_tr)):
accuracy += 1./ 20.
# print '****', arr
# print closest_tr_idx
# print '----', y_closest_te
# print '****', y_closest_tr
print accuracy
# print '****', sess.run(compare)
|
import time
from threading import Thread
"""
Clase fondo
Atributos:
+ejeX:es el eje x tanta de las estrellas y de los planetas del fondo
+canvas: canvas donde se mueve el fondo
+imagen:imagenes de las estrellas y de los planetas
+velocidadX: velocidad de movimiento en el eje x
+jugando:boolean que determina si se esta jugando o no
Metodos:
- moveT():Crea un thread para el movimiento del fondo
- move():Funcion que hace el movimiento del fondo de la pantalla
- __del__():Elimina la imagen del canvas
"""
class Fondo:
def __init__(self, canvas, imagen, velocidad, clasejuego):
self.ejeX = canvas.coords(imagen)[0]
self.canvas = canvas
self.imagen = imagen
self.velocidadX = -(velocidad)
self.jugando = clasejuego
def moveT(self):
tF = Thread(target = self.move)
tF.start()
def move(self):
while self.ejeX > -50:
self.canvas.move(self.imagen, self.velocidadX, 0)
self.ejeX = self.canvas.coords(self.imagen)[0]
time.sleep(0.1)
if self.jugando.returnJugando() == False:
self.canvas.delete(self.imagen)
break
def __del__(self):
self.canvas.delete(self.imagen)
|
# Note: Validation is done for 8-word input, we just need to check 3^8 = 6561 cases
# Validation for 16-word inputs reqires 3**16 = 43'046'721 checks
ELEMENTS_COUNT = 8
ALL_MASK = (1 << ELEMENTS_COUNT) - 1
ALL_BUT_ONE_MASK = (ALL_MASK >> 1)
# 'V' - single-word character (always valid)
# 'L' - low surrogate (must be followed by the high surrogate)
# 'H' - high surrogate
def all_sequences():
index = ['V'] * ELEMENTS_COUNT
def increment():
nonlocal index
for i in range(ELEMENTS_COUNT):
if index[i] == 'V':
index[i] = 'L'
return False
if index[i] == 'L':
index[i] = 'H'
return False
if index[i] == 'H':
index[i] = 'V' # wrap around
pass
return True
overflow = False
while not overflow:
yield index
overflow = increment()
def find_error_in_words(words):
prev = None
if words[0] == 'H':
# We assume that our vector algoritm loads proper data into vectors.
# In the case low surrogate was the last item in the previous iteration.
return 'high surrogate must not start a chunk'
for i, kind in enumerate(words):
if kind == 'V':
if prev == 'L':
return f'low surrogate {i - 1} must be followed by high surrogate'
elif kind == 'L':
if prev == 'L':
return f'low surrogate {i - 1} must be followed by high surrogate'
elif kind == 'H':
if prev != 'L':
return f'high surrogate {i} must be preceded by low surrogate'
prev = kind
return ''
def bitmask(words, state):
result = 0
for bit, type in enumerate(words):
if type == state:
# In SSE vector algorithm we compare 2 x 16 higher bytes of input
# words, which yields a 16-bit mask.
result |= 1 << bit
return result
def mask(words):
L = bitmask(words, 'L')
H = bitmask(words, 'H')
V = (~(L | H)) & ALL_MASK
a = L & (H >> 1)
b = a << 1
c = V | a | b
return c
def dump():
for words in all_sequences():
c = mask(words)
words_image = "[ %s ]" % ' | '.join(words)
error = find_error_in_words(words)
if error == '':
valid_image = 'T'
else:
valid_image = ' '
print(words_image, valid_image, '{:016b} {:04x}'.format(c, c))
def proof():
case1_hit = False
case2_hit = False
for words in all_sequences():
c = mask(words)
if c == ALL_MASK:
case1_hit = True
# all 16 words are valid (either 'V' or pairs 'L', 'H')
assert find_error_in_words(words) == '', (words, find_error_in_words(words))
if c == ALL_BUT_ONE_MASK:
case2_hit = True
# all 15 words are valid (either 'V' or pairs 'L', 'H')
# the last words is either 'L' or 'H' (the word will be
# re-examined in the next iteration of an algorihm)
if words[-1] == 'H':
assert find_error_in_words(words) == 'high surrogate 7 must be preceded by low surrogate'
elif words[-1] == 'L':
assert find_error_in_words(words) == ''
else:
assert False
assert case1_hit
assert case2_hit
print("All OK")
def main():
if 0:
dump()
else:
proof()
if __name__ == '__main__':
main()
|
import unittest
# get_adjacent_space_keys(area, k):
# get_moveable_space_keys(area, ls_k):
# get_moveable_adjacent_space_keys(area, k):
# get_move_spaces(area, k):
from model import Area, Unit
from combat import CombatRangeUtil
class CombatRangeUtilTest(unittest.TestCase):
def setUp(self):
self.area = Area(5, 5)
def test_get_adjacent_space_keys(self):
self.assertEqual({(0, 1), (1, 0), (1, 2), (2, 1)}, CombatRangeUtil.get_adjacent_space_keys(self.area, (1, 1)))
self.assertEqual({(0, 1), (1, 0)}, CombatRangeUtil.get_adjacent_space_keys(self.area, (0, 0)))
def test_get_moveable_space_keys(self):
self.area.spaces[(0, 1)].accessible = False
self.assertEqual({(1, 0)}, CombatRangeUtil.get_moveable_space_keys(self.area, {(0, 1), (1, 0)}))
def test_get_moveable_adjacent_space_keys(self):
self.area.spaces[(0, 1)].accessible = False
self.assertEqual({(1, 0)}, CombatRangeUtil.get_moveable_adjacent_space_keys(self.area, (0, 0)))
def test_get_move_spaces(self):
self.area.spaces[(2, 2)].unit = Unit('', [], 1, 0)
expected1 = {(1, 2), (2, 1), (2, 3), (3, 2)}
self.assertEqual(expected1, CombatRangeUtil.get_move_spaces(self.area, (2, 2)))
self.area.spaces[(2, 2)].unit = Unit('', [], 2, 1)
expected2 = {(1, 2), (2, 1), (2, 3), (3, 2)}
self.assertEqual(expected2, CombatRangeUtil.get_move_spaces(self.area, (2, 2)))
self.area.spaces[(2, 2)].unit = Unit('', [], 2, 0)
expected3= {(2, 0), (1, 1), (2, 1), (3, 1), (0, 2), (1, 2), (3, 2), (4, 2), (1, 3), (2, 3), (3, 3), (2, 4)}
self.assertEqual(expected3, CombatRangeUtil.get_move_spaces(self.area, (2, 2)))
if __name__ == '__main__':
unittest.main() |
"""
Ordered Dict
-> Garante que o dicionario ira ser impresso na ordem correta em uma iteraçao
"""
from collections import OrderedDict
dicionario = OrderedDict({'a':1, 'b':2, 'c':3, 'd':4, 'e':5, 'f':6})
for chave, valor in dicionario.items():
print(f'{chave} = {valor}')
print()
|
from collections import OrderedDict
from cloudshell.cli.command_template.command_template import CommandTemplate
ACTION_MAP = OrderedDict()
ERROR_MAP = OrderedDict([(r'[Ee]rror:', 'Command error')])
SWITCH_INFO = CommandTemplate('switch-info-show format model,chassis-serial', ACTION_MAP, ERROR_MAP)
SWITCH_SETUP = CommandTemplate('switch-setup-show format switch-name', ACTION_MAP, ERROR_MAP)
SOFTWARE_VERSION = CommandTemplate('software-show', ACTION_MAP, ERROR_MAP)
PORT_SHOW = CommandTemplate('port-config-show format port,speed,autoneg parsable-delim ":"', ACTION_MAP, ERROR_MAP)
PHYS_PORT_SHOW = CommandTemplate('bezel-portmap-show format port,bezel-intf parsable-delim ":"', ACTION_MAP, ERROR_MAP)
ASSOCIATIONS = CommandTemplate('port-association-show format master-ports,slave-ports,bidir, parsable-delim ":"',
ACTION_MAP, ERROR_MAP)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: LiSnB
# @Date: 2015-04-26 14:42:31
# @Last Modified by: LiSnB
# @Last Modified time: 2015-04-26 15:03:48
class Solution:
"""
@param A: sorted integer array A which has m elements,
but size of A is m+n
@param B: sorted integer array B which has n elements
@return: void
"""
def mergeSortedArray(self, A, m, B, n):
# write your code here
i = m+n-1
ia, ib = m-1, n-1
if n==0:
return
if m==0:
A[:]=B
return
while ia >= 0 and ib >=0:
if A[ia]>B[ib]:
A[i]=A[ia]
ia-=1
else:
A[i]=B[ib]
ib-=1
i-=1
if ia<0:
A[:ib+1] = B[:ib+1]
if __name__ == '__main__':
s = Solution()
A = [4,5,6, None, None]
B = [1,2]
s.mergeSortedArray(A, 3, B, 2)
print A
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score
# plt.style.use('seaborn-whitegrid')
plt.style.use('dark_background')
def print2(*args):
for arg in args:
print(arg, end="\n\n")
# load pickle file
mydir = "D:\PythonDataScience\MachineLearning\SupervisedME\MarketingME"
features = pd.read_pickle(os.path.join(mydir, "features.pkl"))
target= pd.read_pickle(os.path.join(mydir, "target.pkl"))
print2(features.head())
features.fillna(features.mean(),inplace=True)
target = target.loc[:, "Churn"]
X_train, X_test, Y_train, Y_test = train_test_split(features, target, test_size=0.2, stratify=target)
print2(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
# Initialize the model
logreg = LogisticRegression(penalty = "l1", C = 0.05, solver = "liblinear", random_state=1973)
model = logreg.fit(X_train, Y_train)
Y_predict_test = model.predict(X_test)
Y_predict_train = model.predict(X_train)
accuracyscoreTest = accuracy_score(Y_test, Y_predict_test)
accuracyscoreTrain = accuracy_score(Y_train, Y_predict_train)
precisionscoreTest = precision_score(Y_test, Y_predict_test)
precisionscoreTrain = precision_score(Y_train, Y_predict_train)
recallscoresTest = recall_score(Y_test, Y_predict_test)
recallscoreTrain = recall_score(Y_train, Y_predict_train)
print2("{} accuracy score : {:.2f}".format("Test", accuracyscoreTest),
"{} accuracy score : {:.2f}".format("Train", accuracyscoreTrain),
"{} precision score : {:.2f}".format("Test", precisionscoreTest),
"{} precision score : {:.2f}".format("Train", precisionscoreTrain),
"{} recall score : {:.2f}".format("Test", recallscoresTest),
"{} recall score : {:.2f}".format("Train", recallscoreTrain))
# find the best C
depth = np.linspace(1, 0.0005, 40).tolist()
# depth = np.arange(0.0025, 1, 0.005).tolist()
depthList = np.zeros((len(depth), 5))
depthList[:, 0] = depth
print2(depth, depthList)
for indx in range(len(depth)):
logreg = LogisticRegression(penalty = "l1", C = depth[indx], solver = "liblinear", random_state=1973)
model = logreg.fit(X_train, Y_train)
yhat = logreg.predict(X_test)
depthList[indx, 1] = np.count_nonzero(model.coef_)
depthList[indx, 2] = accuracy_score(Y_test, yhat)
depthList[indx, 3] = precision_score(Y_test, yhat)
depthList[indx, 4] = recall_score(Y_test, yhat)
colname = ["C_value", "NonZero_count", "AccuracyScore", "PrecisionScore", "RecallScore"]
AssessTable = pd.DataFrame(depthList, columns=colname)
AssessTable["R/P Ratio"] = AssessTable.RecallScore / AssessTable.PrecisionScore
print(AssessTable.head())
plt.plot(AssessTable["C_value"], AssessTable.loc[:, "AccuracyScore": "R/P Ratio"])
plt.xticks(AssessTable["C_value"], rotation=45)
plt.legend(labels=("NonZero_count", 'AccuracyScore', 'PrecisionScore','RecallScore' , 'R/P Ratio'), loc='upper right')
plt.grid()
plt.show() |
'''
Title :make_predictions_1.py
Description :This script makes predictions using the 1st trained model and generates a submission file.
Author :Adil Moujahid
Date Created :20160623
Date Modified :20160625
version :0.2
usage :python make_predictions_1.py
python_version :2.7.11
'''
# modified by daniele.bagni@xilinx.com
# date 30 Aug 2018 2018
# ##################################################################################################
# USAGE
# python code/6_make_predictions.py -d ./models/alexnetBNnoLRN/m1/deploy_1_alexnetBNnoLRN.prototxt -w ./models/alexnetBNnoLRN/m1/snapshot_1_alexnetBNnoLRN__iter_12703.caffemodel
# it computes the prediction accuracy for the CNN trainined on CATS cvs DOGS by using 1000 JPEG 227x227x3 images in
# the test directory (not belonging to the trainining or validation LMDB datasets)
# ##################################################################################################
import os
import glob
import cv2
import caffe
import lmdb
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import numpy as np
from config import cats_vs_dogs_config as config
import argparse
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--description", required=True, help="description model")
ap.add_argument("-w", "--weights", required=True, help="weights caffemodel")
args = vars(ap.parse_args())
from caffe.proto import caffe_pb2
caffe.set_mode_gpu()
# ##################################################################################################
#Size of images
IMAGE_WIDTH = 227
IMAGE_HEIGHT = 227
# mean file for CATSvsDOGS training dataset
MEAN_FILE = config.MEAN_FILE # i.e. "/home/ML/cats-vs-dogs/input/mean.binaryproto"
# test dataset
TEST_DATASET = config.PROJ_JPG_DIR + "/test/*.jpg" # i.e. "/home/ML/cats-vs-dogs/input/jpg/test/*.jpg"
# ##################################################################################################
'''
#Image processing helper function
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
#Histogram Equalization
#img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
#img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
#img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
#Image Resizing
img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
return img
'''
# ##################################################################################################
'''
#Read mean image
mean_blob = caffe_pb2.BlobProto()
with open(MEAN_FILE) as f:
mean_blob.ParseFromString(f.read())
mean_array = np.asarray(mean_blob.data, dtype=np.float32).reshape(
(mean_blob.channels, mean_blob.height, mean_blob.width))
'''
mean_array = np.zeros((3,IMAGE_WIDTH, IMAGE_HEIGHT))
ONE = np.ones((IMAGE_WIDTH, IMAGE_HEIGHT))
mean_array[0, :, :] = ONE*106
mean_array[1, :, :] = ONE*116
mean_array[2, :, :] = ONE*124
#Read model architecture and trained model's weights
caffe_description = args["description"]
caffe_model = args["weights"]
net = caffe.Net(caffe_description, caffe_model, caffe.TEST)
#Define image transformers
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer.set_mean('data', mean_array)
'''
- The set_transpose transforms an image from (256,256,3) to (3,256,256).
- The set_channel_swap function will change the channel ordering. Caffe uses BGR image format, so we need to
change the image from RGB to BGR. If you are using OpenCV to load the image, then this step is not necessary
since OpenCV also uses the BGR format
- The set_raw_scale is needed only if you load images with caffe.io.load_image(). You do not need it if using OpenCV.
It means that the reference model operates on images in [0,255] range instead of [0,1].
'''
transformer.set_transpose('data', (2,0,1))
#transformer.set_raw_scale('data', 255) # use only with caffe.io.load_image()
#transformer.set_channel_swap('data', (2,1,0)) # do not need to use it with OpenCV
# reshape the blobs so that they match the image shape.
#net.blobs['data'].reshape(1,3,227,227)
# ##################################################################################################
'''
Making predictions
'''
#Reading image paths
test_img_paths = [img_path for img_path in glob.glob(TEST_DATASET)]
NUMEL = len(test_img_paths)
#Making predictions
test_ids = np.zeros(([NUMEL,1]))
preds = np.zeros(([NUMEL, 2]))
idx = 0
for img_path in test_img_paths:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
#img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT) #DB: images do not need resizing
#cv2.imshow('img_path', img)
#cv2.waitKey(0)
#img = caffe.io.load_image(img_path) # alternative way
net.blobs['data'].data[...] = transformer.preprocess('data', img)
out = net.forward()
#best_n = net.blobs['prob'].data[0].flatten().argsort()[-1: -2:-1]
#print("DBG INFO: ", best_n)
pred_probas = out['prob'] # returns the probabilities of the 2 classes
# compute top-5: take the last 5 elements [-5:] and reverse them [::-1]
top5 = pred_probas.argsort()[-2:][::-1]
filename = img_path.split("/jpg/test/")[1]
'''
if '/jpg/val/cat/' in img_path:
filename = img_path.split("/jpg/val/cat/")[1]
elif '/jpg/val/dog/' in img_path:
filename = img_path.split("/jpg/val/dog/")[1]
else: # other
print 'ERROR: your path name does not contain "/jpg/val/" '
sys.exit(0)
'''
if 'cat' in filename:
label = 0
elif 'dog' in filename:
label = 1
else:
label = -1 # non existing
test_ids[idx] = label
preds[idx] = pred_probas
#print("DBG INFO ", pred_probas)
print("IMAGE: " + img_path)
print("PREDICTED: %d" % preds[idx].argmax())
print("EXPECTED : %d" % test_ids[idx])
print '-------'
idx = idx+1
#if idx==100 :
# break
# ##################################################################################################
# SKLEARN REPORT
'''
precision = tp / (tp+fp) = ability of the classifier to not label as positive a sample that is negative
recall = tp / (tp+fn) = ability of the classifier to find all positive samples
F1-score = weighter harmonic mean of precision and recall. Best value approaches 1 and worst 0
support = number of occurrences
'''
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
lb = LabelBinarizer()
testY = lb.fit_transform(test_ids)
labelNames = ["cat", "dog"]
report=classification_report(testY, preds.argmax(axis=1), target_names=labelNames)
print(report)
from sklearn.metrics import accuracy_score
print('SKLEARN Accuracy = %.2f' % accuracy_score(testY, preds.argmax(axis=1)) )
# ##################################################################################################
# CHECK MANUALLY THE ACCURACY: false and positive predictions
list_predictions = np.array(preds.argmax(axis=1)) # actual predictions
list_str_num = np.array(testY) # ground truth
tot_true = 0
tot_false = 0
cat_true = 0
cat_false = 0
dog_true = 0
dog_false = 0
for ii in range(0, NUMEL) :
n1 = list_str_num[ii]
n2 = list_predictions[ii]
diff = n1 - n2
if diff == 0 :
tot_true = tot_true + 1
if n1==0: #cat
cat_true = cat_true + 1
elif n1==1: #dog
dog_true = dog_true + 1
else:
tot_false = tot_false+1
if n1==0: #cat
dog_false = dog_false + 1 #we predicted a "dog" but it was a "cat"
elif n1==1: #dog
cat_false = cat_false + 1 #we predicted a "cat" but it was a "dog"
print("\n")
print 'TOTAL NUMBER OF TRUE PREDICTIONS = ', tot_true
print 'TOTAL NUMBER OF FALSE PREDICTIONS = ', tot_false
print 'TOTAL NUMBER OF true dog PREDICTIONS = ', dog_true
print 'TOTAL NUMBER OF true cat PREDICTIONS = ', cat_true
print 'TOTAL NUMBER OF cat predicted as dog = ', dog_false
print 'TOTAL NUMBER OF dog predicted as cat = ', cat_false
if (tot_true+tot_false) != NUMEL :
print 'ERROR: number of total false and positive is not equal to the number of processed images'
recall = float(tot_true)/(tot_true+tot_false)
print('MANUALLY COMPUTED RECALL = %.2f\n' % recall)
|
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from lmfit import minimize, Parameters, report_fit
import sys
sys.path.append("../common")
def get_hourly_aggregate(df, how='mean'):
df_c = df.copy()
df_c["hour"] = df_c.index.hour
return df_c.groupby("hour").mean()
# Weather data store
WEATHER_DATA_STORE = "../../data/hvac/weather_2013.h5"
# Weather and HVAC data store
WEATHER_HVAC_STORE = "../../data/hvac/weather_hvac_2013.h5"
assert os.path.isfile(WEATHER_HVAC_STORE), "File does not exist"
st = pd.HDFStore(WEATHER_HVAC_STORE)
building_num = st.keys()[14][1:-2]
energy = st[str(building_num) + "_Y"]
hour_usage_df = st[str(building_num) + "_X"]
header_known = hour_usage_df.columns.tolist()
header_unknown = ["t%d" % i for i in range(24)]
header_unknown.append("a1")
header_unknown.append("a2")
header_unknown.append("a3")
MIN_MINS = 0
def is_used_hvac(mins):
#return 1
return mins
return (mins > MIN_MINS).astype('int')
if mins < MIN_MINS*np.ones(len(mins)):
return 0
else:
return 1
def fcn2min_time_fixed(params, x, data):
v = params.valuesdict()
model1 = v['a1'] * (
((x[24] - v['t0']) * x[0] * is_used_hvac(x[27])) +
((x[24] - v['t0']) * x[1] * is_used_hvac(x[27])) +
((x[24] - v['t0']) * x[2] * is_used_hvac(x[27])) +
((x[24] - v['t0']) * x[3] * is_used_hvac(x[27])) +
((x[24] - v['t0']) * x[4] * is_used_hvac(x[27])) +
((x[24] - v['t0']) * x[5] * is_used_hvac(x[27]))) + \
v['a2']*(
((x[24] - v['t1']) * x[6] * is_used_hvac(x[27])) +
((x[24] - v['t1']) * x[7] * is_used_hvac(x[27])) +
((x[24] - v['t1']) * x[8] * is_used_hvac(x[27])) +
((x[24] - v['t1']) * x[9] * is_used_hvac(x[27])) +
((x[24] - v['t2']) * x[10] * is_used_hvac(x[27])) +
((x[24] - v['t2']) * x[11] * is_used_hvac(x[27]))) +\
v['a3']*(
((x[24] - v['t2']) * x[12] * is_used_hvac(x[27])) +
((x[24] - v['t2']) * x[13] * is_used_hvac(x[27])) +
((x[24] - v['t2']) * x[14] * is_used_hvac(x[27])) +
((x[24] - v['t2']) * x[15] * is_used_hvac(x[27])) +
((x[24] - v['t2']) * x[16] * is_used_hvac(x[27])) +
((x[24] - v['t2']) * x[17] * is_used_hvac(x[27]))) +\
v['a4'] * (
((x[24] - v['t3']) * x[18] * is_used_hvac(x[27])) +
((x[24] - v['t3']) * x[19] * is_used_hvac(x[27])) +
((x[24] - v['t3']) * x[20] * is_used_hvac(x[27])) +
((x[24] - v['t3']) * x[21] * is_used_hvac(x[27])) +
((x[24] - v['t0']) * x[22] * is_used_hvac(x[27])) +
((x[24] - v['t0']) * x[23] * is_used_hvac(x[27]))
)
model2 = v['a5'] * x[25]
model3 = v['a6'] * x[26]
setpoints = np.array([v['t'+str(i)] for i in range(24)])
return np.square(model1 + model2 + model3 - data)
def fcn2min_time(params, x, data):
v = params.valuesdict()
model1 = v['a1'] * (
((x[24] - v['t0']) * x[0] * is_used_hvac(x[27])) +
((x[24] - v['t1']) * x[1] * is_used_hvac(x[27])) +
((x[24] - v['t2']) * x[2] * is_used_hvac(x[27])) +
((x[24] - v['t3']) * x[3] * is_used_hvac(x[27])) +
((x[24] - v['t4']) * x[4] * is_used_hvac(x[27])) +
((x[24] - v['t5']) * x[5] * is_used_hvac(x[27])) +
((x[24] - v['t6']) * x[6] * is_used_hvac(x[27])) +
((x[24] - v['t7']) * x[7] * is_used_hvac(x[27])) +
((x[24] - v['t8']) * x[8] * is_used_hvac(x[27])) +
((x[24] - v['t9']) * x[9] * is_used_hvac(x[27])) +
((x[24] - v['t10']) * x[10] * is_used_hvac(x[27])) +
((x[24] - v['t11']) * x[11] * is_used_hvac(x[27])) +
((x[24] - v['t12']) * x[12] * is_used_hvac(x[27])) +
((x[24] - v['t13']) * x[13] * is_used_hvac(x[27])) +
((x[24] - v['t14']) * x[14] * is_used_hvac(x[27])) +
((x[24] - v['t15']) * x[15] * is_used_hvac(x[27])) +
((x[24] - v['t16']) * x[16] * is_used_hvac(x[27])) +
((x[24] - v['t17']) * x[17] * is_used_hvac(x[27])) +
((x[24] - v['t18']) * x[18] * is_used_hvac(x[27])) +
((x[24] - v['t19']) * x[19] * is_used_hvac(x[27])) +
((x[24] - v['t20']) * x[20] * is_used_hvac(x[27])) +
((x[24] - v['t21']) * x[21] * is_used_hvac(x[27])) +
((x[24] - v['t22']) * x[22] * is_used_hvac(x[27])) +
((x[24] - v['t23']) * x[23] * is_used_hvac(x[27]))
)
model2 = v['a2'] * x[25]
model3 = v['a3'] * x[26]
setpoints = np.array([v['t'+str(i)] for i in range(24)])
return np.square(model1 + model2 + model3 - data) + \
0.005 * np.std(setpoints[8:15]) + \
0.0 * np.std(setpoints[16:21])
def fcn2min_penalty(params, x, data):
v = params.valuesdict()
model1 = v['a1'] * (
((x[24] - v['t0']) * x[0]) +
((x[24] - v['t1']) * x[1]) +
((x[24] - v['t2']) * x[2]) +
((x[24] - v['t3']) * x[3]) +
((x[24] - v['t4']) * x[4]) +
((x[24] - v['t5']) * x[5]) +
((x[24] - v['t6']) * x[6]) +
((x[24] - v['t7']) * x[7]) +
((x[24] - v['t8']) * x[8]) +
((x[24] - v['t9']) * x[9]) +
((x[24] - v['t10']) * x[10]) +
((x[24] - v['t11']) * x[11]) +
((x[24] - v['t12']) * x[12]) +
((x[24] - v['t13']) * x[13]) +
((x[24] - v['t14']) * x[14]) +
((x[24] - v['t15']) * x[15]) +
((x[24] - v['t16']) * x[16]) +
((x[24] - v['t17']) * x[17]) +
((x[24] - v['t18']) * x[18]) +
((x[24] - v['t19']) * x[19]) +
((x[24] - v['t20']) * x[20]) +
((x[24] - v['t21']) * x[21]) +
((x[24] - v['t22']) * x[22]) +
((x[24] - v['t23']) * x[23])
)
model2 = v['a2'] * x[25]
model3 = v['a3'] * x[26]
setpoints = np.array([v['t'+str(i)] for i in range(24)])
return np.square(model1 + model2 + model3 - data) + 0.0*np.std(setpoints)
def fcn2min(params, x, data):
v = params.valuesdict()
model1 = v['a1'] * (
((x[24] - v['t0']) * x[0]) +
((x[24] - v['t1']) * x[1]) +
((x[24] - v['t2']) * x[2]) +
((x[24] - v['t3']) * x[3]) +
((x[24] - v['t4']) * x[4]) +
((x[24] - v['t5']) * x[5]) +
((x[24] - v['t6']) * x[6]) +
((x[24] - v['t7']) * x[7]) +
((x[24] - v['t8']) * x[8]) +
((x[24] - v['t9']) * x[9]) +
((x[24] - v['t10']) * x[10]) +
((x[24] - v['t11']) * x[11]) +
((x[24] - v['t12']) * x[12]) +
((x[24] - v['t13']) * x[13]) +
((x[24] - v['t14']) * x[14]) +
((x[24] - v['t15']) * x[15]) +
((x[24] - v['t16']) * x[16]) +
((x[24] - v['t17']) * x[17]) +
((x[24] - v['t18']) * x[18]) +
((x[24] - v['t19']) * x[19]) +
((x[24] - v['t20']) * x[20]) +
((x[24] - v['t21']) * x[21]) +
((x[24] - v['t22']) * x[22]) +
((x[24] - v['t23']) * x[23])
)
model2 = v['a2'] * x[25]
model3 = v['a3'] * x[26]
setpoints = np.array([v['t'+str(i)] for i in range(24)])
return np.square(model1 + model2 + model3 - data) + 0.0*np.std(setpoints)
# create a set of Parameters
params = Parameters()
for i in range(24):
params.add('t%d' % i, value=70, min=60, max=90)
for i in range(1, 7):
params.add('a%d' % i, value=1)
#params.add('constant', value=1)
x = hour_usage_df.T.values
data = energy.values
result = minimize(fcn2min_time_fixed, params, args=(x, data))
final = data + result.residual
final = data + result.residual
# write error report
report_fit(params)
SAVE = False
setpoints = [params['t%d' % i].value for i in range(24)]
energy_df = pd.DataFrame({"energy": energy})
energy_hourly_mean_df = get_hourly_aggregate(energy_df)
temp_hourly_mean_df = get_hourly_aggregate(hour_usage_df[["temperature"]])
from common_functions import latexify, format_axes
latexify(columns=1, fig_height=3.0)
fig, ax = plt.subplots(nrows=2)
ax[0].scatter(data, final, color="gray",alpha=0.4, s=2)
ax[0].set_xlabel("Actual energy consumption(kWh)\n(a)")
ax[0].set_ylabel("Predicted energy\n consumption(kWh)")
ax[1].plot(data[:24], label='Actual')
ax[1].plot(final[:24], label='Predicted')
#plt.fill_between(range(len(data[:24])), data[:24], 0, color='g', alpha=1, label='actual')
#plt.fill_between(range(len(final[:24])), final[:24], 0, color='r', alpha=0.5, label='predicted')
ax[1].legend(loc="upper center")
ax[1].set_xlabel("Hours\n(b)")
ax[1].set_ylabel("Energy (kWh)")
format_axes(ax[0])
format_axes(ax[1])
plt.tight_layout()
import os
plt.savefig(os.path.expanduser("~/git/nilm-actionable/figures/hvac/model.pdf"))
plt.savefig(os.path.expanduser("~/git/nilm-actionable/figures/hvac/model.png"))
"""
if SAVE:
plt.savefig("pred_actual.png")
fig, ax = plt.subplots(nrows=3, sharex=True)
setpoints = [params['t%d' % i].value for i in range(24)]
ax[0].plot(range(24), setpoints)
ax[0].set_ylabel("Predicted setpoint")
#plt.ylim((50, 90))
ax[1].plot(range(24), energy_hourly_mean_df.values)
ax[1].set_ylabel("Hourly mean energy consumption")
ax[2].plot(range(24), temp_hourly_mean_df.values)
ax[2].set_ylabel("Hourly mean temperature")
plt.xlabel("Hour of day")
if SAVE:
plt.savefig("setpoint.png")
""" |
import requests
# UA伪装
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66'
}
url = 'https://www.sogou.com/web'
kw = input('enter a word:')
param = {
'query': kw
}
response = requests.get(url=url, params=param, headers=headers)
page_text = response.text
filename = kw+'.html'
with open(filename, 'w', encoding='utf-8') as fp:
fp.write(page_text)
print(filename, '保存成功!')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 14 21:19:31 2019
@author: jorge
"""
#Geolocalización
import geopy
import time
import math
##Creamos función distancia para calcularla
def distancia(lugar1,lugar2):
origen = geolocator.geocode(lugar1)
destino = geolocator.geocode(lugar2)
coord_origen=(origen.longitude,origen.latitude)
coord_destino=(destino.longitude,destino.latitude)
distancia = geopy.distance.distance(coord_origen,coord_destino)
return print ("La distancia entre " + lugar1 + " y " + lugar2 + " es de " + str(distancia))
#Llamamos a la función
lugar1 = input ("Introduce ciudad de Origen: ")
lugar2 = input ("Introduce ciudad de Destino: ")
distancia(lugar1,lugar2) |
from flask import Flask, render_template, flash, redirect, url_for, session, logging,request, session
from flask_mysqldb import MySQL
from wtforms import Form, StringField, TextAreaField, PasswordField, validators,DateTimeField,IntegerField
from twilio.rest import Client
from functools import wraps
import smtplib
mysql = MySQL()
app = Flask(__name__)
# Config MySQL
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = '123456'
app.config['MYSQL_DB'] = 'myflaskapp'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
# init MYSQL
mysql = MySQL(app)
#function to send mail to host and guest
def sendmail(message,sender,receiver,password):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(sender, password)
#s.login(os.environ.get("id"), os.environ.get("pass"))
s.sendmail(sender, receiver, message)
s.quit()
#print("Sent")
#function to send message to host and guest
def sendmsg(message,receiver):
account_sid = 'AC6#########################'
auth_token = '76c#######################'
client = Client(account_sid, auth_token)
message = client.messages.create(
body=message,
from_='+14124447699',
to=receiver
)
@app.route('/')
def home():
return render_template('home.html')
#Register form class
class RegisterForm(Form):
guestname = StringField('Guest Name', [validators.Length(min=1, max=100)])
guestphone = StringField('Guest Contact',[validators.Length(10)])
guestemail = StringField('Guest Email', [validators.Length(min=6, max=100)])
hostname = StringField('Host Name', [validators.Length(min=1, max=100)])
hostphone = StringField('Host Contact', [validators.Length(10)])
hostemail = StringField('Host Email', [validators.Length(min=6, max=100)])
checkin = StringField('Check In Time', [validators.Length(8)])
checkout = StringField('Check Out Time', [validators.Length(8)])
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
#get form fields
guestname = form.guestname.data
guestphone = form.guestphone.data
guestemail = form.guestemail.data
hostname = form.hostname.data
hostphone = form.hostphone.data
hostemail = form.hostemail.data
# check to make sure time in particular format
if(form.checkin.data[2]==':' and (form.checkin.data[6:]=='AM' or form.checkin.data[6:]=='PM')):
checkin = form.checkin.data
else:
flash('Time should be in HH:MM_AM')
render_template('register.html', form=form)
if(form.checkout.data[2]==':' and (form.checkout.data[6:]=='AM' or form.checkout.data[6:]=='PM')):
checkout = form.checkout.data
else:
flash('Time should be in HH:MM_AM')
render_template('register.html', form=form)
session['guestphone'] = guestphone
session['guestemail']=guestemail
session['guestname']= guestname
session['hostname'] = hostname
session['checkin'] = checkin
visitedaddress = 'ABCD'
#visitor details to be sent to host
msg_host = 'Visitor Details' + '\n'+'Name - '+guestname+'\n' + 'Email - ' +guestemail + '\n' + 'Phone - ' + guestphone + '\n' + 'Checkin Time - ' + checkin + '\n' + 'Checkout Time - ' + checkout
db = mysql.connection.cursor()
# Execute query to save data to database
db.execute("INSERT INTO visitor2(guestname,guestemail,guestphone,hostname,hostemail,hostphone,checkin,checkout,visitedaddress) VALUES(%s, %s, %s, %s, %s, %s,%s, %s, %s)",(guestname,guestemail,guestphone,hostname,hostemail,hostphone,checkin,checkout,visitedaddress))
# Commit to DB
mysql.connection.commit()
# Close connection
db.close()
#sending message to host
sendmsg(msg_host, '+91'+ hostphone)
#sending mail to host
sendmail(msg_host, 'neerajtesting1234@gmail.com', hostemail, '##############')
flash('Registration Succesful! Have a Good Day')
return redirect(url_for('home'))
return render_template('register.html', form=form)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/checkout' , methods = ['GET', 'POST'])
def checkout():
if request.method == 'POST':
# Get Form Fields
guestphone = request.form['guestphone']
guestemail = request.form['guestemail']
checkout1 = request.form['checkout']
visitedaddress = 'ABCD'
if(checkout1[2]==':' and (checkout1[6:]=='AM' or checkout1[6:]=='PM')):
checkout = checkout1
else:
flash('Time should be in this format HH:MM_AM')
return render_template('login.html')
# Create cursor
cur = mysql.connection.cursor()
# Get user by username
result = cur.execute("SELECT * FROM visitor2 WHERE guestphone = %s", [guestphone])
result1 = cur.execute("SELECT * FROM visitor2 WHERE guestemail = %s", [guestemail])
if result > 0 and result1 > 0:
# Execute to query to update checkout time
cur.execute("UPDATE visitor2 SET checkout = checkout WHERE guestemail = guestemail")
# message body to be sent to guest
session['msg_guest'] = 'Meeting Details' + '\n' + 'Name - '+ session['guestname'] +'\n' + 'Email - ' + session['guestemail'] + '\n' + 'Phone - ' + session['guestphone'] + '\n' + 'Check Time - ' + session['checkin'] +'\n'+'checkout Time - ' + checkout +'\n' + 'Host Name - ' + session['hostname'] +'\n' + 'Address Visited - ' + visitedaddress
#sending message to guest
sendmsg(session['msg_guest'], '+91'+ session['guestphone'])
#sending mail to guest
sendmail(session['msg_guest'], 'neerajtesting1234@gmail.com' ,session['guestemail'], '#####' )
flash('Succesfully Checked Out')
cur.close()
return render_template('home.html')
else:
error = 'Username not found'
return render_template('login.html', error=error)
return render_template('login.html')
if __name__ == '__main__':
#environment variables are more secure but for the easy testing code is written in this way
app.secret_key='secret123'
app.run(debug=True)
|
# -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USB Device key."""
from parsers.logs import general
class WindowsUSBDeviceEventData(general.PlasoGeneralEvent):
"""Windows USB device event data attribute container.
Attributes:
key_path (str): Windows Registry key path.
product (str): product of the USB device.
serial (str): serial number of the USB device.
subkey_name (str): name of the Windows Registry subkey.
vendor (str): vendor of the USB device.
"""
DATA_TYPE = 'windows:registry:usb'
def __init__(self):
"""Initializes event data."""
super(WindowsUSBDeviceEventData, self).__init__(data_type=self.DATA_TYPE)
self.key_path = None
self.product = None
self.serial = None
# TODO: rename subkey_name to something that closer matches its purpose.
self.subkey_name = None
self.vendor = None
def SetEventAttribute(self, event):
self.key_path = event['key_path']
if 'product' in event.keys():
self.product = event['product']
self.serial = event['serial']
# TODO: rename subkey_name to something that closer matches its purpose.
self.subkey_name = event['subkey_name']
if 'vendor' in event.keys():
self.vendor = event['vendor']
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='BaseUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('date', models.DateTimeField(auto_now_add=True)),
('modify_date', models.DateTimeField(auto_now=True)),
('username', models.CharField(default='\u672a\u586b\u5199', unique=True, max_length=50, verbose_name='\u59d3\u540d')),
('email', models.EmailField(max_length=255, null=True, verbose_name='\u7535\u5b50\u90ae\u7bb1')),
('is_active', models.BooleanField(default=True, verbose_name='\u662f\u5426\u6709\u6548')),
('is_admin', models.BooleanField(default=False, verbose_name='\u662f\u5426\u4e3a\u7ba1\u7406\u5458')),
('is_staff', models.BooleanField(default=False, verbose_name='\u53ea\u53ef\u4ee5\u67e5\u770b\u6d3b\u52a8\u7684\u5458\u5de5')),
('is_head_portrait', models.BooleanField(default=False, verbose_name='\u662f\u5426\u4fdd\u5b58\u4e86\u4e0a\u4f20\u540e\u7684\u5934\u50cf')),
('head_portrait', models.ImageField(default=b'/media/portrait/no_img/no_portrait1.jpg', upload_to=b'portrait', verbose_name='\u9009\u62e9\u5934\u50cf')),
('email_verified', models.BooleanField(default=False, verbose_name='\u662f\u5426\u4fdd\u5b58\u4e86\u90ae\u7bb1')),
('social_user_status', models.IntegerField(default=0, verbose_name='\u7b2c\u4e09\u65b9\u7528\u6237\u72b6\u6001')),
('social_site_name', models.IntegerField(default=0, verbose_name='\u7b2c\u4e09\u65b9\u540d\u79f0')),
('social_user_id', models.CharField(default='\u672a\u586b\u5199', max_length=255, verbose_name='\u7b2c\u4e09\u65b9\u7528\u6237ID')),
('thumbnail_portait', models.ImageField(default=b'/media/portrait/no_img/no_portrait1.jpg', upload_to=b'portrait', verbose_name='\u5934\u50cf\u7f29\u7565\u56fe')),
('msg_mark', models.BooleanField(default=False, verbose_name='\u6709\u65b0\u6d88\u606f')),
('phone', models.CharField(unique=True, max_length=128, verbose_name='\u7535\u8bdd')),
],
options={
'permissions': (('admin_management', 'manage group, permission and user'), ('staff', 'Check attendecies.')),
},
),
migrations.CreateModel(
name='VerifyCode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=255, null=True)),
('phone', models.CharField(default='', max_length=20, verbose_name='Phone')),
('code', models.CharField(default='', max_length=50, verbose_name='code')),
('type', models.CharField(default=b'0', max_length=5, verbose_name='type')),
],
),
migrations.CreateModel(
name='AdaptorUser',
fields=[
('baseuser_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='appuser.BaseUser')),
('cart_num', models.IntegerField(default=0)),
],
options={
'db_table': 'user',
},
bases=('appuser.baseuser',),
),
migrations.AddField(
model_name='baseuser',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AddField(
model_name='baseuser',
name='user_permissions',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions'),
),
]
|
print ("Exercise 3")
a = 10
b = 20
c = 30
avg = (a + b + c)/ 3
print ("Average= ", avg)
if (avg > a and avg > b and avg > c):
print ("Average is higher than a,b,c")
else:
if (avg > a and avg > b):
print ("Average is higher than a, b")
elif (avg > a and avg > c):
print ("Average is higher than a, c")
elif (avg > b and avg > c):
print ("Average is higher than b, c")
else:
if (avg > a):
print ("Average is just higher than a")
elif (avg > b):
print ("Average is just higher than b")
elif (avg > c):
print ("Average is just higher than c")
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = "Marcelo Souza"
__license__ = "GPL"
# Config
from configobj import ConfigObj, ConfigObjError
cfg = None
plugin_cfg = None
#
# Read ConfigObj from file
#
def read_cfg(filename):
cfg_obj = None
try:
cfg_obj = ConfigObj(filename, raise_errors=True, file_error=True)
except Exception as e:
# TODO - enhance error handling here
cfg_obj = None
return cfg_obj
|
#!/usr/bin/env python3
# encoding utf-8
from Environment import HFOEnv
import multiprocessing as mp
import argparse
from Networks import ValueNetwork
from SharedAdam import SharedAdam
from Worker import *
from ctypes import c_bool, c_double
import os
import matplotlib.pyplot as plt
import time
try:
from subprocess import DEVNULL # Python 3.
except ImportError:
DEVNULL = open(os.devnull, 'wb')
# Use this script to handle arguments and
# initialize important components of your experiment.
# These might include important parameters for your experiment, and initialization of
# your models, torch's multiprocessing methods, etc.
if __name__ == "__main__" :
os.system("killall -9 rcssserver")
parser = argparse.ArgumentParser()
parser.add_argument('--save', type=str, default="MODEL_7")
parser.add_argument('--numEpisodes', type=int, default=10000000)
parser.add_argument('--numWorkers', type=int, default=4)
parser.add_argument('--initEpsilon', type=int, default=0.95)
parser.add_argument('--updateTarget', type=int, default=5000)
parser.add_argument('--trainIter', type=int, default=500)
parser.add_argument('--lr', type=int, default=0.0005)
parser.add_argument('--weightDecay', type=int, default=0.00001)#0.00001
parser.add_argument('--discountFactor', type=int, default=0.99)
args=parser.parse_args()
path = os.path.join('v', args.save)
save_plots_every = 100000
#print('\n\n\n\n\n\n\n\n\n\ncores {}\n\n\n\n\n\n\n\n'.format(mp.cpu_count()))
#PLOT
done = mp.Value(c_bool, False)
print_eps = mp.Value(c_double ,args.initEpsilon)
print_lr = mp.Value(c_double,args.lr)
time_goal = mp.Queue()
goals = mp.Queue()
cum_rew = mp.Queue()
all_time_goal= []
all_goals = []
all_cum_rew=[]
avg_time_goal= 500
avg_goals = 0.5
avg_cum_rew=0
avg_coef = 0.0005
last_time = time.time()
last_saved_plot = 0
f, ax = plt.subplots(2, 2, figsize=(12, 8))
ax = ax.flatten()
time_line = ax[0].plot([0],[0])[0]
ax[0].set_title("avg Time-steps to score a goal")
goal_line = ax[1].plot([0],[0])[0]
ax[1].set_title("Goal probability")
rew_line = ax[2].plot([0],[0])[0]
ax[2].set_title("Cumulative reward")
text_params = ax[3].text(0.5,0.5,'TESTP')
ax[3].set_title("Parameters")
plt.ion()
plt.show()
#CREATE NETWORKS
value_network = ValueNetwork()
target_value_network = ValueNetwork()
hard_update(target_value_network, value_network)
optimizer = SharedAdam(value_network.parameters(),lr=args.lr, weight_decay=args.weightDecay)
counter = mp.Value('i', 0)
iter_update = counter.value
games_counter = mp.Value('i', 0)
lock = mp.Lock()
processes =[]
#Start Training
for idx in range(0, args.numWorkers):
trainingArgs = (idx, args, value_network, target_value_network, optimizer, lock, counter, games_counter, done,time_goal, goals, cum_rew,print_eps, print_lr)
p = mp.Process(target=train, args=(trainingArgs))
p.start()
processes.append(p)
while True:
#Print update
time.sleep(0.001)
if not time_goal.empty():
#print("\nMain Agent CHECK")
c_coef = avg_coef*2 if len(all_time_goal)>500 else 0.025*np.exp(-len(all_cum_rew)/200)
new_time_goal = time_goal.get()
avg_time_goal = (1-c_coef)*(avg_time_goal) + c_coef*new_time_goal
all_time_goal.append(avg_time_goal)
if not goals.empty():
c_coef = avg_coef if len(all_cum_rew)>500 else 0.01*np.exp(-len(all_cum_rew)/200)
new_goals = goals.get()
avg_goals = (1-c_coef)*(avg_goals) + c_coef*new_goals
all_goals.append(avg_goals)
if(not cum_rew.empty()):
# print("\n\n\nNEW CUM REW ADDED")
# print("Moving average ",avg_cum_rew)
c_coef = avg_coef*2 if len(all_cum_rew)>500 else 0.025*np.exp(-len(all_cum_rew)/200)
new_cum_rew = cum_rew.get()
avg_cum_rew = (1-c_coef)*(avg_cum_rew) + c_coef*new_cum_rew
all_cum_rew.append(avg_cum_rew)
# print("new cum reward ",new_cum_rew)
# print("new Moving avg ",avg_cum_rew)
# print("CALCULATIONS: {}*{} + {}*{}".format((1-c_coef),(avg_cum_rew) , c_coef,new_cum_rew))
if(time.time()-last_time>2):
time_line.set_ydata(all_time_goal)
time_line.set_xdata(range(len(all_time_goal)))
#time_line.set_xdata(np.linspace(0, counter.value,len(all_time_goal)))
goal_line.set_ydata(all_goals)
goal_line.set_xdata(range(len(all_goals)))
#rew_line.set_xdata(np.linspace(0, counter.value,len(all_goals)))
rew_line.set_ydata(all_cum_rew)
rew_line.set_xdata(range(len(all_cum_rew)))
#rew_line.set_xdata(np.linspace(0, counter.value,len(all_cum_rew)))
#print('\n\nax {}\n\n'.format(ax))
[axxx.relim() for axxx in ax[:-1]]
[axxx.autoscale_view() for axxx in ax[:-1]]
text_params.set_text('Game Counter: {}\nCounter: {}\nEpsilon: {}\nLearning Rate {}\niterations in update: {}\nTime left{}'.format(games_counter.value,counter.value,print_eps.value,print_lr.value, counter.value-iter_update,(args.numEpisodes-counter.value)/((counter.value+0.01-iter_update)/2)/3600))
iter_update = counter.value
f.canvas.draw()
f.canvas.flush_events()
last_time = time.time()
plt.show()
if(counter.value - last_saved_plot > save_plots_every):
f.tight_layout()
if not os.path.exists(path):
os.makedirs(path)
f.savefig(os.path.join(path, 'plot.png'))
last_saved_plot = counter.value
if(done.value):
break
for p in processes:
print("\nKILLING {} IT AT {}\n".format(p,counter.value)*100)
p.join()
|
# -*- coding: utf-8 -*-
from selenium import webdriver
import eCenter_buttons
import eCenter_Login
import eCenter_Create_Data
import time
from eCenter_buttons import xpaths
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import Select
#--------------------------------------------------------------------------------------------------------------------------------------------------------#
End_Spelling_Button = eCenter_buttons.xpaths['End_Spell_Btn']
def Check_for_Spellcheck():
if End_Spelling_Button == End_Spelling_Button:
eCenter_buttons.mydriver.find_element_by_xpath(xpaths['End_Spell_Btn']).click()
def Create_TimeSlip(pClientName, pActivityName, pReferenceName):
eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Menu_NewTS']).click()
eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Client_Combo']).send_keys(pClientName)
eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Task_Combo']).send_keys(pActivityName)
eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Ref_Combo']).send_keys(pReferenceName)
eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Complete_box']).click()
eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Slp_Submit_btn']).click()
#def Client_combo_lists(x):
#clients = Select(eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Client_Combo'])).options
#for index, value in enumerate(clients, 1):
#break
eCenter_Login.eCenterLogin('jknox2', 'password')
eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Menu_NewTS']).click()
clients = Select(eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Client_Combo'])).options
test = False
#need to store client list in memory after it is indexed the first time otherwise the list will be lost when we go back to the page
#python create dynamic list (search parameter in google)
for client in clients:
print ((client.text))
if not test:
test = True
else:
eCenter_Create_Data.Create_TimeSlip(client.text, 'bid', 'aaa-project', 'DTU Extra TIME', 'This is a test for the DTU Smoketest Timeslips', '04/1/2017', '01:02:03')
#Create_TimeSlip(clients[i], 'BID', 'aaa-project')
##Create_baseslip(pClientName, pReferenceName, pExtra, pDescription, pDate)
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Extra_Field']).send_keys(pExtra)
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Desc_Field']).clear()
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Desc_Field']).send_keys(pDescription)
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Date_Field']).clear()
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Date_Field']).send_keys(pDate)
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Time_Spent']).clear()
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Time_Spent']).send_keys(pTimeSpent)
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Complete_box']).click()
#eCenter_buttons.mydriver.find_element_by_xpath(xpaths['Slp_Submit_btn']).click()
#test
|
# Copyright 2019 Bruno P. Kinoshita
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from protobuf_uml_diagram import PathPath, Diagram
def test_path_path():
"""Test the converter used for the command line args."""
path_path = PathPath()
path = path_path.convert(value="blue", param="color", ctx=None)
assert isinstance(path, Path)
class TestDiagramBuilder:
def test_from_file_raises(self):
with pytest.raises(ValueError) as e:
Diagram().from_file('')
assert 'Missing proto file' in str(e.value)
def test_to_file_raises(self):
with pytest.raises(ValueError) as e:
Diagram().to_file(None)
assert 'Missing output location' in str(e.value)
def test_with_format_raises(self):
with pytest.raises(ValueError) as e:
Diagram().with_format(None)
assert 'Missing file' in str(e.value)
def test_build_raises(self):
with pytest.raises(ValueError) as e:
Diagram().build()
assert 'No Protobuf' in str(e.value)
with pytest.raises(ValueError) as e:
Diagram() \
.from_file('test_data.data_messages.proto') \
.build()
assert 'No output' in str(e.value)
with pytest.raises(ValueError) as e:
d = Diagram() \
.from_file('test_data.data_messages.proto') \
.to_file(Path('abc'))
d._file_format = None
d.build()
assert 'No file format' in str(e.value)
def test_happy_path(self):
with TemporaryDirectory() as tmpdir:
tf = os.path.join(tmpdir, 'diagram.png')
Diagram() \
.from_file('test_data.data_messages.proto') \
.to_file(Path(tf)) \
.with_format('png') \
.build()
assert os.path.getsize(tf) > 0
def test_homonymous(self):
"""A test for when you have two 'subclasses' with same names."""
with TemporaryDirectory() as tmpdir:
tf = os.path.join(tmpdir, 'diagram.png')
Diagram() \
.from_file('test_data.issue_10.proto') \
.to_file(Path(tf)) \
.with_format('png') \
.build()
assert os.path.getsize(tf) > 0
def test_logs_module_not_found(self):
with pytest.raises(ModuleNotFoundError) as e:
Diagram() \
.from_file('piracicaba') \
.build()
assert 'piracicaba' in str(e)
def test_contains_dot_proto_in_middle_of_the_name(self):
"""A test where the input data contains .proto, but doesn't end with it."""
with TemporaryDirectory() as tmpdir:
tf = os.path.join(tmpdir, 'diagram.png')
Diagram() \
.from_file('test_data.issue_27.proto.configs_data_pb2') \
.to_file(Path(tf)) \
.with_format('png') \
.build()
assert os.path.getsize(tf) > 0
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from collections import Counter
@api_view(['POST'])
def lambda_function(request):
if request.method == 'POST':
data = request.data.get('question')
counter = list(Counter(data).elements())
response = sorted(counter, key=counter.count, reverse=True)
return Response({'solution': response}, status=status.HTTP_200_OK)
|
from bc4py.config import P, NewInfo
from bc4py.chain import Block, TX
from bc4py.contract.watch.checkdata import *
import logging
from threading import Thread
def start_contract_watch():
assert P.F_WATCH_CONTRACT is False
P.F_WATCH_CONTRACT = True
Thread(target=loop, name='Watch', daemon=True).start()
def loop():
logging.info("Watching contract start.")
while not P.F_STOP and P.F_WATCH_CONTRACT:
try:
obj = NewInfo.get(channel='watch contract', timeout=2)
if isinstance(obj, TX):
check_new_tx(tx=obj)
elif isinstance(obj, Block):
check_new_block(block=obj)
else:
pass
except NewInfo.empty:
pass
except CheckWatchError as e:
logging.error(e)
except Exception as e:
logging.error(e, exc_info=True)
logging.info("Close watching contract.")
def close_contract_watch():
assert P.F_WATCH_CONTRACT is True
P.F_WATCH_CONTRACT = False
__all__ = [
"C_Conclude",
"C_Validator",
"C_RequestConclude",
"C_FinishConclude",
"C_FinishValidator",
"watching_tx",
"start_contract_watch",
"close_contract_watch",
"CheckWatchError",
]
|
import logging
import requests
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
try:
r = requests.get(
'http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience=https%3A%2F%2Fexample.com%2F',
headers={'Metadata-Flavor': 'Google'},
timeout=2)
return "ID Token %s" % r.text
except requests.RequestException:
logging.info('Metadata server could not be reached, assuming local.')
return 'localhost'
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True) |
#!/usr/bin/env python
"""
test file for codingbat module
This version used unittest
"""
import unittest
from codingbat import sleep_in
class Test_sleep_in(unittest.TestCase):
def test_false_false(self):
self.assertTrue( sleep_in(False, False) )
def test_true_false(self):
self.assertFalse( sleep_in(True, False) )
def test_false_true(self):
self.assertTrue( sleep_in(False, True) )
def test_true_true(self):
self.assertTrue( sleep_in(True, True) )
if __name__ == "__main__":
unittest.main()
|
import abc
class AbstractAgent(abc.ABC):
@abc.abstractmethod
def get_parameters(self):
""" Returns a dictionary containing all agent parameters.
Returns:
Dict: the agent parameters.
"""
@abc.abstractmethod
def get_action_probabilities(self, state, available_actions):
"""
Args:
state (np.array): current observable state
available_actions (list[src.game.Action]): the list of available actions.
Returns:
Dict[src.game.Action, float]: probabilities for each actions.
"""
|
#!/usr/bin/env python
import os, sys
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
from importlib import import_module
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import PostProcessor
from wgFakePhotonModule import *
from PhysicsTools.NanoAODTools.postprocessing.modules.common.countHistogramsModule import *
from PhysicsTools.NanoAODTools.postprocessing.modules.common.PrefireCorr import *
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from PhysicsTools.NanoAODTools.postprocessing.framework.crabhelper import inputFiles,runsAndLumis
p=PostProcessor(".",inputFiles(),None,"wg_fake_photon_keep_and_drop.txt",[countHistogramsModule(),wgFakePhotonModule()],provenance=True,justcount=False,noOut=False,fwkJobReport=True,jsonInput=runsAndLumis(),outputbranchsel = "wg_fake_photon_output_branch_selection.txt")
#p=PostProcessor(".",inputFiles(),None,"wg_fake_photon_keep_and_drop.txt",[countHistogramsModule(),wgFakePhotonModule(),PrefCorr(),puWeight_2017()],provenance=True,justcount=False,noOut=False,fwkJobReport=True,jsonInput=runsAndLumis(),outputbranchsel = "wg_fake_photon_output_branch_selection.txt")
p.run()
print "DONE"
|
''' Find coordinate of Closest Point on Shapely Polygon '''
from shapely.geometry import Point
from shapely.geometry import Polygon
from shapely.geometry import LinearRing
point = Point(0.0, 0.0)
poly = Polygon ([(-1 ,1), (2,1), (2,2),(-1,2)])
# Polygon exterior ring
exterior = LinearRing(poly.exterior.coords)
dist = exterior.project(point)
closest_point = exterior.interpolate(dist)
print(closest_point) |
import numpy as np
import pandas as pd
import os
from PIL import Image
import glob
import torch
import torchfile
from os.path import join as pjoin
from utils.util import label_colormap
from utils.util import pad_and_crop
from scipy.io import loadmat
from torchvision import transforms
import torchvision.transforms.functional as TF
from torch.utils.data.dataset import Dataset
from data_loader.augmentations import get_composed_augmentations
import torch.nn.functional as F
import pickle
from io import BytesIO
import sys
from pathlib import Path
import matplotlib
class PcaAug(object):
_eigval = torch.Tensor([0.2175, 0.0188, 0.0045])
_eigvec = torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
def __init__(self, alpha=0.1):
self.alpha = alpha
def __call__(self, im):
alpha = torch.randn(3) * self.alpha
rgb = (self._eigvec * alpha.expand(3, 3) * self._eigval.expand(3, 3)).sum(1)
return im + rgb.reshape(3, 1, 1)
class JPEGNoise(object):
def __init__(self, low=30, high=99):
self.low = low
self.high = high
def __call__(self, im):
H = im.height
W = im.width
rW = max(int(0.8 * W), int(W * (1 + 0.5 * torch.randn([]))))
im = TF.resize(im, (rW, rW))
buf = BytesIO()
im.save(buf, format='JPEG', quality=torch.randint(self.low, self.high,
[]).item())
im = Image.open(buf)
im = TF.resize(im, (H, W))
return im
def kp_normalize(H, W, kp):
kp = kp.clone()
kp[..., 0] = 2. * kp[..., 0] / (W - 1) - 1
kp[..., 1] = 2. * kp[..., 1] / (H - 1) - 1
return kp
def kp_unnormalize(H, W, kp):
kp = kp.clone()
kp[..., 0] = W * kp[..., 0]
kp[..., 1] = H * kp[..., 1]
return kp
class AnimalBase(Dataset):
def __len__(self):
return len(self.filenames)
def __getitem__(self, index):
im = Image.open(self.filenames[index]).convert("RGB")
kp = -1
kp_normalized = -1 # None
visible = -1
if self.pair_image: # unsupervised contrastive learning
# randomresizecrop is the key to generate pairs of images
img1 = self.transforms(self.initial_transforms(im))
img2 = self.transforms(self.initial_transforms(im))
data = torch.cat([img1, img2], dim=0)
if self.crop != 0: # maybe useful for datasets other than celebA/MAFL
data = data[:, self.crop:-self.crop, self.crop:-self.crop]
else: # supervised postprocessing
kp = self.keypoints[index].copy()
data = self.transforms(self.initial_transforms(im))
if self.crop != 0: # maybe useful for datasets other than celebA/MAFL
data = data[:, self.crop:-self.crop, self.crop:-self.crop]
kp = kp - self.crop
kp = torch.as_tensor(kp)
C, H, W = data.shape
# import pdb; pdb.set_trace()
kp = kp_unnormalize(H, W, kp) # the initial form of kp is normalized to [0,1]
kp_normalized = kp_normalize(H, W, kp)
visible = self.visible[index]
if self.visualize:
# from torchvision.utils import make_grid
from utils.visualization import norm_range
plt.clf()
fig = plt.figure()
if self.pair_image:
im1, im2 = torch.split(data, [3, 3], dim=0)
ax = fig.add_subplot(121)
ax.imshow(norm_range(im1).permute(1, 2, 0).cpu().numpy())
ax = fig.add_subplot(122)
ax.imshow(norm_range(im2).permute(1, 2, 0).cpu().numpy())
print(im1.shape, im2.shape)
else:
ax = fig.add_subplot(111)
ax.imshow(norm_range(data).permute(1, 2, 0).cpu().numpy())
kp_x = kp[visible][:, 0].numpy()
kp_y = kp[visible][:, 1].numpy()
ax.scatter(kp_x, kp_y)
print(data.shape)
# plt.savefig('check_dataloader.png', bbox_inches='tight')
plt.savefig(os.path.join('sanity_check', vis_name + '.png'), bbox_inches='tight')
print(self.filenames[index])
plt.close()
# import pdb; pdb.set_trace()
return data, visible, kp_normalized, index
class InatAve(AnimalBase):
def __init__(self, root, train=True, pair_image=True, imwidth=224, crop=0,
do_augmentations=True, visualize=False, imagelist=None, **kwargs):
self.root = root
self.imwidth = imwidth
self.train = train
self.pair_image = pair_image
self.visualize = visualize
self.crop = crop
# get the imagelist
if imagelist is not None:
print('Load data from %s' % imagelist)
with open(imagelist, 'r') as f:
self.filenames = [x.strip() for x in f]
else:
print('Load data from %s' % self.root)
self.filenames = glob.glob(os.path.join(self.root, '*', '*jpg'))
print('Number of images from Inat Ave.: %d' % len(self.filenames))
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
augmentations = [
JPEGNoise(),
# the only augmentation added on the top of DVE
transforms.RandomResizedCrop(self.imwidth, scale=(0.2, 1.)),
transforms.transforms.ColorJitter(.4, .4, .4),
transforms.ToTensor(),
PcaAug()
] if (train and do_augmentations) else [transforms.ToTensor()]
self.initial_transforms = transforms.Resize((self.imwidth, self.imwidth))
self.transforms = transforms.Compose(augmentations + [normalize])
def __len__(self):
return len(self.filenames)
class CUB(AnimalBase):
# place the annotations file under /datasets/CUB-200-2011/anno
# place the train/val/test text files under /datasets/CUB-200-2011/split
def load_annos(self):
train_annos = torchfile.load(os.path.join(self.root, 'anno', 'train.dat'))
val_annos = torchfile.load(os.path.join(self.root, 'anno', 'val.dat'))
train_val_annos = {**train_annos, **val_annos}
annos = {}
for name, kp in train_val_annos.items():
name = name.decode()
annos[name] = {}
for idx, loc in kp.items():
annos[name][int(idx.decode())] = tuple(loc)
return annos
def load_txt(self, imagetxt):
return [line.rstrip('\n') for line in open(imagetxt)]
def __init__(self, root, train=False, val=False, test=False, imagelist=None,
pair_image=False, imwidth=224, crop=0, visualize=False, **kwargs):
self.root = root
self.imwidth = imwidth
self.train = train
self.val = val
self.test = test
self.pair_image = pair_image
self.visualize = visualize
self.crop = crop
self.kp_num = 15
# load training/val/test txt
#train_path = os.path.join(root, 'split', 'train.txt')
val_path = os.path.join(root, 'split', 'val.txt')
test_path = os.path.join(root, 'split', 'test.txt')
# get the imagelist
annos = self.load_annos()
if train:
prefix = 'Train'
print('Load image from %s' % imagelist)
self.filenames = self.load_txt(imagelist)
elif val:
prefix = 'Val'
print('Load image from %s' % val_path)
self.filenames = self.load_txt(val_path)
elif test:
prefix = 'Test'
print('Load image from %s' % test_path)
self.filenames = self.load_txt(test_path)
# double check the format of the loaded annotations
self.keypoints = []
self.visible = []
for fname in self.filenames:
keypoints = []
visible = []
kps = annos[fname.split('/')[-1]] # change the format of keypoints
for idx in range(self.kp_num):
if int(kps[idx+1][2]) == 1: # the keyvalue is from 1 to 15
keypoints.append([kps[idx+1][0], kps[idx+1][1]])
visible.append(True)
else:
keypoints.append([0, 0])
visible.append(False)
self.keypoints.append(np.array(keypoints))
self.visible.append(np.array(visible))
print('%s: number of images: %d; number of keypoints: %d' % (prefix, len(self.filenames), len(self.keypoints)))
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
self.initial_transforms = transforms.Resize((self.imwidth, self.imwidth))
self.transforms = transforms.Compose([transforms.ToTensor(), normalize])
def __len__(self):
return len(self.filenames)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="Helen")
parser.add_argument("--train", action="store_true")
parser.add_argument("--val", action="store_true")
parser.add_argument("--test", action="store_true")
parser.add_argument("--pair_image", action="store_true")
parser.add_argument("--do_augmentations", action="store_true")
parser.add_argument("--vis_name", type=str, default='check_dataloader')
parser.add_argument("--imagelist", type=str, default='list of image to load')
args = parser.parse_args()
imwidth = 96
crop = 0
vis_name = args.vis_name
kwargs = {
"train": args.train,
"val": args.val,
"test": args.test,
"pair_image": args.pair_image,
'do_augmentations': args.do_augmentations,
'vis_name': args.vis_name,
"visualize": True,
"imwidth": imwidth,
"crop": crop,
'imagelist': args.imagelist
}
dataset = globals()[args.dataset](**kwargs)
dataset[0]
|
import tensorflow as tf
class Prediction:
"""
model分类预测
es 2018-10-10
"""
def __init__(self, model, width, height, channels, classes, model_path):
self.load_model(model, width, height, channels, classes, model_path)
def load_model(self, model, width, height, channels, classes, model_path):
"""
加载最佳的model
"""
with tf.variable_scope('placeholder'):
X = tf.placeholder(tf.float32, [None, width, height, channels])
Y = tf.placeholder(tf.float32, [None, classes])
Dropout = tf.placeholder(tf.float32)
Is_training = tf.placeholder("bool")
with tf.variable_scope('prediction'):
logits, prediction = model.build(X, width, height, channels, classes,
dropout=Dropout,
is_training=Is_training)
with tf.variable_scope('accuracy'):
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
# 保存最佳的model
saver = tf.train.Saver()
# 加载开始
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# 加载最佳的model
saver.restore(sess, model_path)
print("[INFO] load best model: %s" % model_path)
self.sess = sess
self.graphs = {
"X": X,
"Y": Y,
"Dropout": Dropout,
"Is_training": Is_training,
"logits": logits,
"prediction": prediction,
"accuracy": accuracy
}
def check_model(self, vatX, vatY):
"""
确认model是否成功加载
"""
sess = self.sess
graphs = self.graphs
# 准确率确认
acc_vat = sess.run(graphs["accuracy"], feed_dict={graphs["X"]: vatX,
graphs["Y"]: vatY,
graphs["Dropout"]: 1.0,
graphs["Is_training"]: False})
print("[INFO] best model ok: {:.6f}".format(acc_vat))
return acc_vat > 0.5
def prediction(self, sample):
"""
分类预测
"""
sess = self.sess
graphs = self.graphs
# 计算属于每个分类的概率
prediction = sess.run(graphs["prediction"],
feed_dict={graphs["X"]: sample,
graphs["Dropout"]: 1.0,
graphs["Is_training"]: False})
return prediction
|
from tkinter import *
from tkinter import ttk, filedialog
import cv2
import matplotlib
from PIL import ImageTk, Image
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
class Widget(Frame):
IMG = None
TempIMG = None
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.parent = parent
self.pack()
self.make_widgets()
def make_widgets(self):
self.winfo_toplevel().title("GUI")
self.winfo_toplevel().geometry("1200x800")
self.LeftFrame = LabelFrame(self, text="Oryginal image", height=500, width=500)
self.LeftFrame.grid(row=0, column=0, columnspan=5, rowspan=5)
self.ImagePanel = Canvas(self.LeftFrame, height=500, width=300)
self.ImagePanel.pack(expand=YES, fill=BOTH)
self.ImageOnPanel = self.ImagePanel.create_image(0, 0, anchor=NW)
self.Load = Button(self, text="Select an image", command=self.select_image)
self.Load.grid(row=6, column=0)
self.RightFrame = LabelFrame(self, text="Modified image", height=500, width=500)
self.RightFrame.grid(row=0, column=5, columnspan=5, rowspan=5)
self.ImagePanel2 = Canvas(self.RightFrame, height=500, width=300)
self.ImagePanel2.pack(expand=YES, fill=BOTH)
self.ImageOnPanel2 = self.ImagePanel2.create_image(0, 0, anchor=NW)
self.Choose = ttk.Combobox(self, values=["Edges", "Binary Threshold", "Binary Threshold Inverse", "To Zero",
"To Zero Inverse", "Adaptive Mean Thresholding",
"Adaptive Gaussian Thresholding", "Gaussian Blur", "Avrege Blur"])
self.Choose.current(0)
self.Choose.grid(row=6, column=1, rowspan=3)
self.Confirm = Button(self, text="Confirm", command=self.confirm)
self.Confirm.grid(row=6, column=4)
self.PlotFrame = LabelFrame(self, text="Plot", height=500, width=200)
self.PlotFrame.grid(row=0, column=10, columnspan=5, rowspan=5)
self.Fig = Figure()
self.Plot = self.Fig.add_subplot(1, 1, 1)
self.canvas = FigureCanvasTkAgg(self.Fig, self.PlotFrame)
self.canvas.draw()
self.canvas.get_tk_widget().pack()
self.Hist = Button(self, text="Calculate Histogram", command=self.calc_hist)
self.Hist.grid(row=6, column=11)
self.Slider = Scale(self, from_=0, to=255, orient=HORIZONTAL)
self.Slider.set(128)
self.Slider.grid(row=6, column=5)
def select_image(self):
filename = filedialog.askopenfilename(initialdir="/", title="Select file", filetypes=(
("jpeg files", "*.jpg"),
("all files", "*.*")
))
if len(filename) > 0:
print(filename)
tmp = cv2.imread(filename)
self.IMG = IMG = cv2.cvtColor(tmp, cv2.COLOR_BGR2RGB)
self.confirm()
def show_pic(self):
if self.IMG.size > 0:
img = Image.fromarray(self.IMG)
img2 = Image.fromarray(self.TempIMG)
h = self.ImagePanel.winfo_height()
w = self.ImagePanel.winfo_width()
h_ratio = h / self.IMG.shape[0]
w_ratio = w / self.IMG.shape[1]
if (h_ratio < 1.0) | (w_ratio < 1.0):
if h_ratio < w_ratio:
ratio = h_ratio * w / w_ratio
img = img.resize((round(ratio), round(h)), Image.ANTIALIAS)
img2 = img2.resize((round(ratio), round(h)), Image.ANTIALIAS)
else:
ratio = w_ratio * h / h_ratio
img = img.resize((round(w), round(ratio)), Image.ANTIALIAS)
img2 = img2.resize((round(w), round(ratio)), Image.ANTIALIAS)
self.ImagePanel.ImgCatch = ImageTk.PhotoImage(img)
self.ImagePanel.itemconfigure(self.ImageOnPanel, image=self.ImagePanel.ImgCatch)
self.ImagePanel2.ImgCatch = ImageTk.PhotoImage(img2)
self.ImagePanel2.itemconfigure(self.ImageOnPanel2, image=self.ImagePanel2.ImgCatch)
self.canvas.draw()
def confirm(self):
if (self.Choose.current() == 0):
self.TempIMG = cv2.Canny(self.IMG, 50, 100)
if (self.Choose.current() == 1):
res, ret = cv2.threshold(
cv2.cvtColor(self.IMG, cv2.COLOR_RGB2GRAY),
self.Slider.get(), 255, cv2.THRESH_BINARY)
self.TempIMG = ret
if (self.Choose.current() == 2):
res, ret = cv2.threshold(
cv2.cvtColor(self.IMG, cv2.COLOR_RGB2GRAY),
self.Slider.get(), 255, cv2.THRESH_BINARY_INV)
self.TempIMG = ret
if (self.Choose.current() == 3):
res, ret = cv2.threshold(
cv2.cvtColor(self.IMG, cv2.COLOR_RGB2GRAY),
self.Slider.get(), 255, cv2.THRESH_TOZERO)
self.TempIMG = ret
if (self.Choose.current() == 4):
res, ret = cv2.threshold(
cv2.cvtColor(self.IMG, cv2.COLOR_RGB2GRAY),
self.Slider.get(), 255, cv2.THRESH_TOZERO_INV)
self.TempIMG = ret
if (self.Choose.current() == 5):
res, ret = cv2.threshold(
cv2.cvtColor(self.IMG, cv2.COLOR_RGB2GRAY),
self.Slider.get(), 255, cv2.ADAPTIVE_THRESH_MEAN_C)
self.TempIMG = ret
if (self.Choose.current() == 6):
res, ret = cv2.threshold(
cv2.cvtColor(self.IMG, cv2.COLOR_RGB2GRAY),
self.Slider.get(), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C)
self.TempIMG = ret
if (self.Choose.current() == 7):
self.TempIMG = cv2.GaussianBlur(self.IMG, (0, 0), self.Slider.get() / 10 + 1)
if (self.Choose.current() == 8):
kernel = (self.Slider.get() + 1) / 255
self.TempIMG = cv2.blur(self.IMG, (int(kernel * self.IMG.shape[0]), int(kernel * self.IMG.shape[1])))
self.show_pic()
def calc_hist(self):
if self.IMG.size > 0:
self.Plot.cla()
color = ('r', 'g', 'b')
for i, col in enumerate(color):
histr = cv2.calcHist([self.IMG], [i], None, [256], [0, 256])
self.Plot.plot(histr, color=col)
self.show_pic()
if __name__ == "__main__":
root = Tk()
something = Widget(root)
root.mainloop()
|
#-*- coding: utf-8 -*-
import sys
import replaceparameter
import os.path
parameter = sys.argv[1]
value = sys.argv[2]
for filename in os.listdir("."):
if filename.split(".")[0] == "inputparameters" and \
unicode(filename.split(".")[1]).isnumeric():
replaceparameter.replace(filename, parameter, value)
|
from django.urls import path
from .views import (
HomePageView,
AboutPageView,
BookPageView,
CodingPageView,
MusicPageView,
PeoplePageView,
PerspectivePageView,
TravelPageView,
)
urlpatterns = [
path('travel/', TravelPageView.as_view(), name='travel'),
path('perspective/', PerspectivePageView.as_view(), name='perspective'),
path('people/', PeoplePageView.as_view(), name='people'),
path('music/', MusicPageView.as_view(), name='music'),
path('coding/', CodingPageView.as_view(), name='coding'),
path('book/', BookPageView.as_view(), name='book'),
path('about/', AboutPageView.as_view(), name='about'),
path('', HomePageView.as_view(), name='home'),
]
|
import heapq
N = int(input())
A = list(map(int,input().split()))
C = A[0:N]
S = sum(C)
prefix = [S]
heapq.heapify(C)
for i in range(N, 2*N):
heapq.heappush(C,A[i])
popped = heapq.heappop(C)
S = S + A[i] - popped
prefix.append(S)
C = [-e for e in A[2*N:]]
S = -sum(C)
suffix = [S]
heapq.heapify(C)
for i in range(2*N-1, N-1, -1):
heapq.heappush(C,-A[i])
popped = heapq.heappop(C)
S = S + A[i] - (-popped)
suffix.append(S)
res = -10**15
j = len(suffix)-1
for i in range(len(prefix)):
res = max(res, prefix[i] - suffix[j])
j -= 1
print(res) |
import argparse
import threading
from queue import Queue
import cv2
import imutils
from imutils.video import FPS
from imutils.video import FileVideoStream
from scripts.all_behaviours import AllBehaviours
ap = argparse.ArgumentParser()
ap.add_argument("-i1", "--input1", required=True, type=str)
ap.add_argument("-i2", "--input2", required=True, type=str)
args = vars(ap.parse_args())
def preprocess(frame):
return imutils.resize(frame, width=500)
def overlayAndShow(frame, outputs):
# Unpack all behaviours outputs
for cur_behav_out in outputs:
behav_name = cur_behav_out[0]
cur_behav_out = cur_behav_out[1:]
if behav_name == "Face-mask":
for box in cur_behav_out:
(x1, y1, x2, y2, total_conf, cls_pred) = box # cls_pred == 0 means MASK
cls_pred = int(cls_pred)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255 * (1 - cls_pred), 255 * cls_pred), 2)
return frame
class CustomBehaviourThread(threading.Thread):
def __init__(self, inQ, indexB, camind, obj):
super(CustomBehaviourThread, self).__init__()
self.inQ = inQ
self.indexB = indexB
self.camind = camind
self.obj = obj
def run(self):
global outputQs, NUM_VIDS, NUM_BEHAVIOURS, BEHAVIOURS_NAMES
while True:
if not self.inQ.empty():
frame = list(self.inQ.queue)[0] # basically its just inQ.get() but without removing!
if frame is None:
break
frame = preprocess(frame)
if BEHAVIOURS_NAMES[self.indexB] == "Face-mask":
out = self.obj.faceMaskDetector(frame)
# Just for testing, uncomment below line, and comment out above line, for having fixed random
# output!
# out = ["Face-mask", (1, 1, 20, 20, 0.9, 1), (50, 50, 100, 100, 0.8, 0)]
outputQs[self.camind][self.indexB].put(out)
# Create a subclass of the threading class. This creates a thread for each camera, and overlays our two behaviours
# onto it. And then outputs the image.
class CustomMainThread(threading.Thread):
def __init__(self, src, ind):
super(CustomMainThread, self).__init__()
self.src = src # the input camera/video source link
self.fvs = FileVideoStream(self.src, queue_size=64).start()
self.ind = ind
self.inputQ = self.fvs.Q
self.obj = AllBehaviours()
def run(self):
global outputQs, NUM_VIDS, NUM_BEHAVIOURS, BEHAVIOURS_NAMES
BehavList = []
for i in range(NUM_BEHAVIOURS):
t = CustomBehaviourThread(self.inputQ, i, self.ind, self.obj)
t.daemon = True
t.start()
BehavList.append(t)
fpstot = FPS().start()
while self.fvs.more():
# take input
frame = self.fvs.read()
if frame is None:
break
frame = preprocess(frame)
outs = []
for i in range(NUM_BEHAVIOURS):
out = outputQs[self.ind][i].get()
outs.append(out)
frame = overlayAndShow(frame, outs)
cv2.imshow(f"cam {self.ind}", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
fpstot.update()
self.fvs.stop()
fpstot.stop()
print(f"Fps is {round(fpstot.fps(), 2)} for video {self.ind}")
cv2.destroyWindow(f"cam {self.ind}")
outputQs = []
NUM_VIDS = 2
NUM_BEHAVIOURS = 1
BEHAVIOURS_NAMES = ["Face-mask"]
for _ in range(NUM_VIDS):
Blist = []
for _ in range(NUM_BEHAVIOURS):
q = Queue()
Blist.append(q)
outputQs.append(Blist)
src1 = args["input1"]
src2 = args["input2"]
t1 = CustomMainThread(src1, 0)
t2 = CustomMainThread(src2, 1)
t1.start()
t2.start()
t1.join()
t2.join()
|
import tensorflow as tf
import numpy as np
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
intermed = tf.add(input2, input3)
mul = tf.multiply(input1, intermed)
with tf.Session() as sess:
result = sess.run([mul, intermed])
print(result) |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
try:
import RPi.GPIO as GPIO
except:
print ("Erreur: RPi.GPIO n'est pas disponible");
exit();
import libi2c
class Port:
IN=GPIO.IN;
OUT=GPIO.OUT;
def __init__(self, num, mode):
self._nump=num;
self._status=mode;
GPIO.setmode(GPIO.BOARD);
GPIO.setup(num, mode);
def read(self):
return GPIO.input(self._nump);
def write(self, v):
if (v==True or v==False):
GPIO.output(self._nump, v);
else:
print "Error : Bad value";
def close(self):
GPIO.cleanup(self._nump);
# Registre pour la puce MCP23017
IODIRA=0x00;
IODIRB=0x01;
GPIOA=0x12;
GPIOB=0x13;
GPPUA=0x0C;
GPPUB=0x0D;
OLATA=0x14;
OLATB=0x15;
class ExtPort(Port):
IN=1;
OUT=0;
def __init__(self, num, addr, mode):
assert num<16 and num>=0, "Bad pin number";
self._nump=num;
self._i2c=libi2c.I2C(addr);
self.config(mode);
def _changeBit(self, bits, pos, v):
if (v==0):
return bits & ~(1<<pos);
else:
return bits | (1<<pos);
def config(self, mode):
if (self._nump<8):
iodir=IODIRA;
pos=self._nump;
else:
iodir=IODIRB;
pos=self._nump%8;
self._portmap=self._i2c.readU8(iodir);
self._portmap=self._changeBit(self._portmap, pos, mode);
self._i2c.writeU8(iodir, self._portmap);
def pullup(self, branch):
assert not (self._portmap&(1<<self._nump%8)==0),"Bad mode";
if (self._nump<8):
gppu=GPPUA;
pos=self._nump;
else:
gppu=GPPUB;
pos=self._nump%8;
gppuv=self._i2c.readU8(gppu);
gppuv=self._changeBit(gppuv, pos, branch);
self._i2c.writeU8(gppu, gppuv);
def read(self):
assert not (self._portmap&(1<<self._nump%8)==0),"Bad mode";
if (self._nump<8):
val=self._i2c.readU8(GPIOA);
return (val>>self._nump)&(0x1);
else:
val=self._i2c.readU8(GPIOB);
return (val>>(self._nump-8))&(0x1);
def write(self, v):
assert (self._portmap&(1<<(self._nump%8)))==0,"Bad mode";
assert (v==1 or v==0), "Bad value";
if (self._nump<8):
gpioav=self._i2c.readU8(OLATA);
gpioav=self._changeBit(gpioav, self._nump, v);
self._i2c.writeU8(GPIOA, gpioav);
else:
gpiobv=self._i2c.readU8(OLATB);
gpiobv=self._changeBit(gpiobv, self._nump%8, v);
self._i2c.writeU8(GPIOB, gpiobv);
|
# -*- coding: utf-8 -*-
number = raw_input()
salary = int(raw_input()) * float(raw_input())
print 'NUMBER =', number
print 'SALARY = U$ %.2f' % salary
|
"""
import csv
f = open('gender.csv')
data = csv.reader(f)
m = []
f = []
"""
import csv
import matplotlib.pyplot as plt
f = open('daegu2000.csv', encoding='cp949')
data = csv.reader(f)
next(data)
result = []
for row in data:
if row[-1] != '': #값이 존재한다면
if row[0].split('.')[1] == '4' and row[0].split('.')[2] == '18':
result.append(float(row[-1]))
plt.plot(result, 'hotpink')
plt.show() |
# coding=utf-8
from PIL import Image
import numpy as np
import shutil
import os
'''
_noise
为图像添加噪声
随机生成5000个椒盐
'''
def addNoise(img):
rows,cols,dims = img.shape
noise_img = img
for i in range(5000):
x = np.random.randint(0,rows)
y = np.random.randint(0,cols)
noise_img[x,y,:] = 255
noise_img.flags.writeable = True # 将数组改为读写模式
return Image.fromarray(np.uint8(noise_img))
def saveNoiseLabel(name):
shutil.copyfile(name + ".txt", name + "_noise.txt") |
"""
This script is written for CASA 4.5.3.
Note: if you do everything in this script, you'll use up about 260 GB of space.
The final calibrated continuum MS is 3.8 GB.
"""
# Labeling setups
SB_field = 'Wa_Oph_6'
LB_field = 'Wa_Oph_6'
all_field = 'Wa_Oph_6'
SB_tag = 'SB'
LB_tag = 'LB'
all_tag = 'all'
SB_data = '/data/sandrews/LP/2016.1.00484.L/science_goal.uid___A001_Xbd4641_X1e/group.uid___A001_Xbd4641_X22/member.uid___A001_Xbd4641_X23/calibrated/calibrated_final.ms'
LB_data = '/data/sandrews/LP/2016.1.00484.L/science_goal.uid___A001_X8c5_X68/group.uid___A001_X8c5_X69/member.uid___A001_X8c5_X6a/calibrated/calibrated_final.ms'
SB_refant = 'DA49, DA59'
all_refant = 'DV09, DV24, DA61, DA59, DA49'
SB_contspws = '0~3'
LB_contspws = '0~7'
all_contspws = '0~15'
SB_mask = 'circle[[258pix,238pix], 1.4arcsec]'
all_mask = 'ellipse[[1581pix, 1378pix], [1.3arcsec,1.0arcsec], 162deg]'
##################################################################
##################################################################
## short baseline (SB) data
##################################################################
##################################################################
# split out all the data from the given field
SB_ms = SB_field+'_'+SB_tag+'.ms'
os.system('rm -rf '+SB_ms+'*')
split2(vis=SB_data, field = SB_field, outputvis=SB_ms, datacolumn='data')
# @@ initial inspection of data before spectral averaging
plotms(vis=SB_ms, xaxis='channel', yaxis='amplitude', field=SB_field,
ydatacolumn='data', avgtime='1e8', avgscan=True, avgbaseline=True,
iteraxis='spw')
# flag the CO 2-1 line
flagmanager(vis=SB_ms, mode='save', versionname='before_cont_flags')
flagchannels = '0:1700~2200'
flagdata(vis=SB_ms, mode='manual', spw=flagchannels, flagbackup=False,
field=SB_field)
# spectral averaging for continuum MS
SB_initcont = SB_field+'_'+SB_tag+'_initcont.ms'
os.system('rm -rf '+SB_initcont+'*')
split2(vis=SB_ms, field = '', spw=SB_contspws, outputvis=SB_initcont,
width=[480,8,8,8], datacolumn='data')
# restore flagged CO 2-1 line channels in the original MS
flagmanager(vis=SB_ms, mode='restore', versionname='before_cont_flags')
# @@ check that amplitude vs. uvdist looks normal
plotms(vis=SB_initcont, xaxis='uvdist', yaxis='amp', coloraxis='spw',
avgtime='30', avgchannel='1000')
### only a single EB; all looks ok
# initial imaging
SB_initcontimage = SB_field+'_'+SB_tag+'_initcontinuum'
os.system('rm -rf '+SB_initcontimage+'.*')
clean(vis=SB_initcont, imagename=SB_initcontimage, mode='mfs',
multiscale=[0,10,30,50], psfmode='clark', imagermode='csclean',
weighting='briggs', robust=0.5, gain=0.1, imsize=500, cell='0.03arcsec',
mask=SB_mask, interactive=True)
"""
cleaned for 2 cycles (200 iterations)
peak = 39.4 mJy/beam, flux = 160 mJy, rms = 153 uJy/beam, beam = 0.26 x 0.23"
peak SNR = 258
"""
# @@ Gaussian image fit for astrometry later
imfit(imagename=SB_initcontimage+'.image', mask=SB_initcontimage+'.mask')
# SELF-CALIBRATION
# make a copy of the MS
SB_selfcalp0 = SB_field+'_'+SB_tag+'_selfcalp0.ms'
os.system('rm -rf '+SB_selfcalp0)
os.system('cp -r '+SB_initcont+' '+SB_selfcalp0)
# first round of phase-only self-cal
SB_p1 = SB_field+'_'+SB_tag+'.p1'
os.system('rm -rf ' + SB_p1)
gaincal(vis=SB_selfcalp0, caltable=SB_p1, gaintype='T', combine='spw',
spw=SB_contspws, refant=SB_refant, calmode='p', solint='inf',
minsnr=2.0, minblperant=4)
# @@ look at solutions
plotcal(caltable=SB_p1, xaxis='time', yaxis='phase', spw='',
iteration='antenna', subplot=221, plotrange=[0,0,-180,180])
# apply the calibration table
applycal(vis=SB_selfcalp0, spw=SB_contspws, spwmap=[0]*4, gaintable=[SB_p1],
calwt=True, applymode='calonly', flagbackup=True, interp='linearPD')
# split out the corrected MS for another round
SB_selfcalp1 = SB_field+'_'+SB_tag+'_selfcalp1.ms'
os.system('rm -rf '+SB_selfcalp1)
split2(vis=SB_selfcalp0, outputvis=SB_selfcalp1, datacolumn='corrected')
# imaging
SB_contimagep1 = SB_field+'_'+SB_tag+'_continuump1'
os.system('rm -rf '+SB_contimagep1+'.*')
clean(vis=SB_selfcalp1, imagename=SB_contimagep1, mode='mfs', psfmode='clark',
multiscale=[0,10,30,50], imagermode='csclean', weighting='briggs',
robust=0.5, gain=0.1, imsize=500, cell='0.03arcsec', mask=SB_mask,
interactive=True)
"""
cleaned for 2 cycles (200 iterations)
peak = 40.4 mJy/beam, flux = 163 mJy, rms = 59 uJy/beam, beam = 0.26 x 0.23"
peak SNR = 685
"""
# second round of phase-only self-cal
SB_p2 = SB_field+'_'+SB_tag+'.p2'
os.system('rm -rf '+SB_p2)
gaincal(vis=SB_selfcalp1, caltable=SB_p2, gaintype='T', combine='spw,scans',
spw=SB_contspws, refant=SB_refant, calmode='p', solint='30s',
minsnr=2.0, minblperant=4)
# @@ look at solutions
plotcal(caltable=SB_p2, xaxis='time', yaxis='phase', spw='',
iteration='antenna', subplot=221, plotrange=[0,0,-180,180])
# apply the calibration table
applycal(vis=SB_selfcalp1, spw=SB_contspws, spwmap=[0]*4, gaintable=[SB_p2],
calwt=True, applymode='calonly', flagbackup=True, interp='linearPD')
# split out corrected MS for another round
SB_selfcalp2 = SB_field+'_'+SB_tag+'_selfcalp2.ms'
os.system('rm -rf '+SB_selfcalp2)
split2(vis=SB_selfcalp1, outputvis=SB_selfcalp2, datacolumn='corrected')
# imaging
SB_contimagep2 = SB_field+'_'+SB_tag+'_continuump2'
os.system('rm -rf '+SB_contimagep2+'.*')
clean(vis=SB_selfcalp2, imagename=SB_contimagep2, mode='mfs', psfmode='clark',
multiscale=[0,10,30,50], imagermode='csclean', weighting='briggs',
robust=0.5, gain=0.1, imsize=500, cell='0.03arcsec', mask=SB_mask,
interactive=True)
"""
cleaned for 2 cycles (200 iterations)
peak = 40.9 mJy/beam, flux = 164 mJy, rms = 57 uJy/beam, beam = 0.26 x 0.23"
peak SNR = 717
"""
# one round of amplitude self-cal
SB_ap1 = SB_field+'_'+SB_tag+'.ap1'
os.system('rm -rf '+SB_ap1)
gaincal(vis=SB_selfcalp2, caltable=SB_ap1, gaintype='T', combine='spw',
spw=SB_contspws, refant=SB_refant, calmode='ap', gaintable=[SB_p2],
spwmap=[0]*4, solint='inf', minsnr=3.0, minblperant=4)
# @@ look at solutions
plotcal(caltable=SB_ap1, xaxis='time', yaxis='amp', spw='',
iteration='antenna', subplot=221, plotrange=[0,0,0,2])
# apply the calibration tables
applycal(vis=SB_selfcalp2, spw=SB_contspws, spwmap=[[0]*4,[0]*4],
gaintable=[SB_p2,SB_ap1], calwt=True, applymode='calonly',
flagbackup=True, interp='linearPD')
# split out a corrected MS
SB_selfcalap1 = SB_field+'_'+SB_tag+'_selfcalap1.ms'
os.system('rm -rf '+SB_selfcalap1)
split2(vis=SB_selfcalp2, outputvis=SB_selfcalap1, datacolumn='corrected')
# imaging
SB_contimageap1 = SB_field+'_'+SB_tag+'_continuumap1'
os.system('rm -rf '+SB_contimageap1+'.*')
clean(vis=SB_selfcalap1, imagename=SB_contimageap1, mode='mfs',
multiscale=[0,10,30], psfmode='clark', imagermode='csclean',
weighting='briggs', robust=0.5, gain=0.1, imsize=500, cell='0.03arcsec',
mask=SB_mask, interactive=True)
"""
cleaned for 5 cycles (500 iterations); (note different multiscale)
peak = 40.2 mJy/beam, flux = 165 mJy, rms = 45 uJy/beam, beam = 0.26 x 0.23"
peak SNR = 893
"""
# @@ Gaussian image fit for astrometry
imfit(imagename=SB_contimageap1+'.image', mask=SB_contimageap1+'.mask')
##################################################################
##################################################################
## initial look at long baseline (LB) data
##################################################################
##################################################################
# make a local copy of the long-baseline calibrated MS
LB_ms = LB_field+'_'+LB_tag+'.ms'
os.system('rm -rf '+LB_ms+'*')
os.system('cp -r '+LB_data+' '+LB_ms)
# flag the CO 2-1 line (based on its location in in short baseline data)
flagmanager(vis=LB_ms, mode='save', versionname='before_cont_flags')
flagchannels = '3:1700~2200, 7:1700~2200'
flagdata(vis=LB_ms, mode='manual', spw=flagchannels, flagbackup=False,
field=LB_field)
# spectral and time averaging for continuum MS
LB_initcont = LB_field+'_'+LB_tag+'_initcont.ms'
os.system('rm -rf '+LB_initcont+'*')
split2(vis=LB_ms, field = '', spw=LB_contspws, outputvis=LB_initcont,
width=[8,8,8,480,8,8,8,480], timebin='6s', datacolumn='data')
# restore flagged CO 2-1 line channels in this MS
flagmanager(vis=LB_ms, mode='restore', versionname='before_cont_flags')
# @@ check that amplitude vs. uvdist looks normal
plotms(vis=LB_initcont, xaxis='uvdist', yaxis='amp', coloraxis='observation',
avgtime='30', avgchannel='1000')
### the overlap between EBs looks pretty good
# initial imaging for LB execution block 0 = LB0, 2017/09/09 (C40-8)
LB0_initcontimage = LB_field+'_'+LB_tag+'0_initcontinuum'
os.system('rm -rf '+LB0_initcontimage+'.*')
clean(vis=LB_initcont, imagename=LB0_initcontimage, observation='0',
mode='mfs', multiscale=[0,10,25,50,100], psfmode='hogbom',
imagermode='csclean', weighting='briggs', robust=0.5, gain=0.3,
niter=50000, cyclefactor=5, imsize=1800, cell='0.003arcsec',
interactive=True)
"""
cleaned for 5 cycles (500 iterations); mask defined interactively
peak = 4.1 mJy/beam, flux = 155 mJy, rms = 46 uJy/beam, beam = 90 x 44 mas
peak SNR = 89
"""
# initial imaging for LB execution block 1 = LB1, 2017/09/20 (C40-9)
LB1_initcontimage = LB_field+'_'+LB_tag+'1_initcontinuum'
os.system('rm -rf '+LB1_initcontimage+'.*')
clean(vis=LB_initcont, imagename=LB1_initcontimage, observation='1',
mode='mfs', multiscale = [0,10,25,50,100], psfmode='hogbom',
imagermode='csclean', weighting='briggs', robust=0.5, gain=0.3,
niter=50000, cyclefactor=5, imsize=1800, cell='0.003arcsec',
mask=LB0_initcontimage+'.mask', interactive=True)
"""
cleaned for 5 cycles (500 iterations)
peak = 4.5 mJy/beam, flux = 149 mJy, rms = 30 uJy/beam, beam = 56 x 30 mas
peak SNR = 150
"""
# @@ Gaussian image fits for astrometry
imfit(imagename=LB0_initcontimage+'.image', mask=LB0_initcontimage+'.mask')
imfit(imagename=LB1_initcontimage+'.image', mask=LB1_initcontimage+'.mask')
##################################################################
##################################################################
## spatial alignment
##################################################################
##################################################################
# Some astrometric analysis to calculate positional shifts:
# phase centers (from listobs): [all aligned]
pc_SB = au.radec2deg('16:48:45.638000, -14.16.35.90000')
pc_LB0 = au.radec2deg('16:48:45.638000, -14.16.35.90000')
pc_LB1 = au.radec2deg('16:48:45.638000, -14.16.35.90000')
# disk centroids (guided by imfit, but estimated manually in this case):
peak_SB = au.radec2deg('16:48:45.621, -14.16.36.264')
peak_LB0 = au.radec2deg('16:48:45.618, -14.16.36.227')
peak_LB1 = au.radec2deg('16:48:45.621, -14:16:36.261')
# measure position shifts
pkoff_SB = au.angularSeparation(peak_SB[0], peak_SB[1], pc_SB[0], pc_SB[1],
True)
pkoff_LB0 = au.angularSeparation(peak_LB0[0], peak_LB0[1], pc_LB0[0],
pc_LB0[1], True)
pkoff_LB1 = au.angularSeparation(peak_LB1[0], peak_LB1[1], pc_LB1[0],
pc_LB1[1], True)
# peak offsets relative to phase centers (RA, DEC):
# SB : -0.255", -0.364"
# LB0 : -0.300", -0.327"
# LB1 : -0.255", -0.361"
# measure position shifts
shift_SB_LB0 = au.angularSeparation(peak_LB0[0], peak_LB0[1], peak_SB[0],
peak_SB[1], True)
shift_SB_LB1 = au.angularSeparation(peak_LB1[0], peak_LB1[1], peak_SB[0],
peak_SB[1], True)
shift_LB0_LB1 = au.angularSeparation(peak_LB1[0], peak_LB1[1], peak_LB0[0],
peak_LB0[1], True)
# absolute peak shifts between observations
# SB-LB0 : -45, +37 mas
# SB-LB1 : 0, +3 mas
# LB0-LB1 : +45, -34 mas
# you can see the LB0-LB1 offset, but it is made difficult by the asymmetric
# peak in the LB0 data (the peaks are clearly shifted, as is the larger-scale
# emission pattern; we need to try and correct for this)
# We need to manually align before trying a combined self-cal solution.
# The plan is to shift the LB0 data to the LB1 position (best measurement). No
# shift is necessary for the SB dataset.
# manual astrometric shift for the LB0 data
# split the LB0 data into a new MS
LB0_shift = LB_field+'_'+LB_tag+'0_shift.ms'
os.system('rm -rf '+LB0_shift)
split2(vis=LB_initcont, outputvis=LB0_shift, observation='0',
datacolumn='data')
# compute shifted phase center to account for offsets
ra_LB0_new = pc_LB0[0] - shift_LB0_LB1[1]
dec_LB0_new = pc_LB0[1] - shift_LB0_LB1[2]
# do the shift
au.deg2radec(ra_LB0_new, dec_LB0_new)
# 16:48:45.635000, -14:16:35.86600
fixvis(vis=LB0_shift, outputvis=LB0_shift, field='4',
phasecenter='ICRS 16h48m45.635s -14d16m35.866s')
# @@ check that the MS was properly updated
listobs(LB0_shift)
# now re-assign the phase center (LB1 phase center, in J2000)
radec_pc_LB1 = au.deg2radec(pc_LB1[0], pc_LB1[1])
au.ICRSToJ2000(radec_pc_LB1)
# Separation: radian = 7.66094e-08, degrees = 0.000004, arcsec = 0.015802
# Out[182]: '16:48:45.63874, -014:16:35.888425'
fixplanets(vis=LB0_shift, field='4',
direction='J2000 16h48m45.63874s -14d16m35.888425s')
# @@ check that the MS was properly updated
listobs(LB0_shift)
# We are not doing a shift on SB or LB1, but we need to convert to J2000
# split the LB1 data (only) into a new MS
LB1_ref = LB_field+'_'+LB_tag+'1_ref.ms'
os.system('rm -rf '+LB1_ref)
split2(vis=LB_initcont, outputvis=LB1_ref, observation='1', datacolumn='data')
# now re-assign the phase center
fixplanets(vis=LB1_ref, field='4',
direction='J2000 16h48m45.63874s -14d16m35.888425s')
# @@ check that the MS was properly updated
listobs(LB1_ref)
# split the LB1 data (only) into a new MS
SB_ref = SB_field+'_'+SB_tag+'_ref.ms'
os.system('rm -rf '+SB_ref)
split2(vis=SB_selfcalap1, outputvis=SB_ref, datacolumn='data')
# now re-assign the phase center
fixplanets(vis=SB_ref, field='0',
direction='J2000 16h48m45.63874s -14d16m35.888425s')
# @@ check that the MS was properly updated
listobs(SB_ref)
##################################################################
##################################################################
## flux alignment
##################################################################
##################################################################
"""
Before moving on to a combined self-calibration, I want to check the quality of
the flux calibration by comparing the visibility profiles. First, I do a full
spectral average to condense the datasets, then I export the MS into a numpy
save file using the script 'ExportMS.py' and compare the deprojected,
azimuthally-averaged visibility profiles.
"""
# full spectral averaging + visibility output
os.system('rm -rf '+SB_tag+'_quick.ms*')
split2(vis=SB_ref, field='', spw='', outputvis='SB_quick.ms',
width=[8,16,16,16], datacolumn='data')
execfile('ExportMS.py') # for MSname = 'SB_quick'
os.system('rm -rf '+LB_tag+'0_quick.ms*')
split2(vis=LB0_shift, field='4', spw='0~3', outputvis='LB0_quick.ms',
width=[16,16,16,8], datacolumn='data')
execfile('ExportMS.py') # for MSname = 'LB0_quick'
os.system('rm -rf '+LB_tag+'1_quick.ms*')
split2(vis=LB1_ref, field='4', spw='4~7', outputvis='LB1_quick.ms',
width=[16,16,16,8], datacolumn='data')
execfile('ExportMS.py') # for MSname = 'LB1_quick'
# (offx, offy) = (-0.255, -0.361)" [i.e., to the SW]
# Can also make crude estimate of viewing geometry from Gaussian fits:
# deconvolved minor/major axis ratio = 240 / 328 = 0.732, so i = 42.7 degrees.
# The deconvolved PA = 161.6 degrees.
"""
With that information, we can directly examine the visibility profiles using
the script 'check_visprofiles.py' (outside CASA).
A comparison of these profiles shows that there is non-trivial discrepancies
between the calibrations. If we use SB as a reference, then we find that
LB1 is too low (by about 10%). LB0 is more problematic: it is too low by about
10% at short baselines, but much worse at long baselines. I suspect that this
is really a phase noise problem in LB0.
For the flux (amplitude) calibration, the pipeline used:
SB: J1733-1304, mean Fnu = 1.46 Jy @ 238.8 GHz
LB0: J1733-1304, mean Fnu = 1.49 Jy @ 238.8 GHz
LB1: J1733-1304, mean Fnu = 1.56 Jy @ 238.8 GHz
Updated queries (see below) find:
SB: J1733-1304, Fnu = 1.50 +/- 0.13 Jy
LB0: J1733-1304, Fnu = 1.40 +/- 0.15 Jy
LB1: J1733-1304, Fnu = 1.85 +/- 0.16 Jy
au.getALMAFlux('J1733-1304', 238.8, date='2017/05/09')
au.getALMAFlux('J1733-1304', 238.8, date='2017/09/09')
au.getALMAFlux('J1733-1304', 238.8, date='2017/09/20')
So, this suggests that the SB and LB0 calibrations are not terrible. Indeed,
I see about 10% increases in LB0 and LB1 would give good overlap on shorter
spacings, which is roughly in line with the updated calibration numbers.
We will manually scale the LB flux calibrations up into alignment.
"""
# re-scale the LB1 flux calibration
sf = 1./sqrt(1.1)
os.system('rm -rf scale_LB0.gencal*')
gencal(vis=LB0_shift, caltable='scale_LB0.gencal', caltype='amp',
parameter=[sf])
applycal(vis=LB0_shift, gaintable=['scale_LB0.gencal'], calwt=T, flagbackup=T)
# now extract the re-scaled LB1 visibilities
LB0_rescaled = LB_field+'_'+LB_tag+'0_rescaled.ms'
os.system('rm -rf '+LB0_rescaled+'*')
split2(vis=LB0_shift, outputvis=LB0_rescaled, datacolumn='corrected')
# re-scale the LB1 flux calibration
sf = 1./sqrt(1.1)
os.system('rm -rf scale_LB1.gencal*')
gencal(vis=LB1_ref, caltable='scale_LB1.gencal', caltype='amp', parameter=[sf])
applycal(vis=LB1_ref, gaintable=['scale_LB1.gencal'], calwt=T, flagbackup=T)
# now extract the re-scaled LB1 visibilities
LB1_rescaled = LB_field+'_'+LB_tag+'1_rescaled.ms'
os.system('rm -rf '+LB1_rescaled+'*')
split2(vis=LB1_ref, outputvis=LB1_rescaled, datacolumn='corrected')
##################################################################
##################################################################
## combine data and self-calibration
##################################################################
##################################################################
# concatenate all datasets
ms_list = [SB_ref, LB0_rescaled, LB1_rescaled]
all_concat = LB_field + '_concat.ms'
os.system('rm -rf '+all_concat+'*')
concat(vis=ms_list, concatvis=all_concat)
# set some imaging parameters (useful for experimentation)
robust = 0.5
gain = 0.1
imsize = 3000
cell = '0.003arcsec'
npercycle = 100
niter = 50000
cyclefactor = 5
multiscale = [0, 20, 40, 80, 160]
# initial clean
all_initcontimage = all_field +'_'+all_tag+'_initcontinuum'
os.system('rm -rf '+all_initcontimage+'.*')
clean(vis=all_concat, imagename=all_initcontimage, mode='mfs',
multiscale=multiscale, psfmode='hogbom', imagermode='csclean',
weighting='briggs', robust=robust, gain=gain, niter=niter,
cyclefactor=cyclefactor, npercycle=npercycle, imsize=imsize, cell=cell,
interactive=True, usescratch=True, mask=all_mask)
"""
cleaned for 8 cycles (800 iterations)
peak: 5.4 mJy/beam, flux: 214 mJy, rms: 27 uJy/beam, beam = 70 x 40 mas
peak SNR: 200
"""
# make a copy of the concatenated MS
all_selfcalp0 = all_field+'_'+all_tag+'_selfcalp0.ms'
os.system('rm -rf '+all_selfcalp0+'*')
os.system('cp -r '+all_concat+' '+all_selfcalp0)
# first round of phase-only self-cal
all_p1 = all_field+'_'+all_tag+'.p1'
os.system('rm -rf '+all_p1)
gaincal(vis=all_selfcalp0, caltable=all_p1, gaintype='T', combine='spw,scan',
spw=all_contspws, refant=all_refant, calmode='p', field='0',
solint='300s', minsnr=2.0, minblperant=4)
# @@ look at the solutions
plotcal(caltable=all_p1,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/05/08/00~2017/05/10/00')
plotcal(caltable=all_p1,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/09/08/00~2017/09/10/00')
plotcal(caltable=all_p1,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/09/19/00~2017/09/21/00')
# apply the calibration table
applycal(vis=all_selfcalp0, spw=all_contspws, spwmap=[0]*12,
gaintable=[all_p1], calwt=True, applymode='calonly', flagbackup=True,
interp='linearPD')
# split out a corrected MS for another round
all_selfcalp1 = all_field+'_'+all_tag+'_selfcalp1.ms'
os.system('rm -rf '+all_selfcalp1+'*')
split2(vis=all_selfcalp0, outputvis=all_selfcalp1, datacolumn='corrected')
# image
all_contimagep1 = all_field +'_'+all_tag+'_continuum_p1'
os.system('rm -rf '+all_contimagep1+'.*')
clean(vis=all_selfcalp1, imagename=all_contimagep1, mode='mfs',
multiscale=multiscale, psfmode='hogbom', imagermode='csclean',
weighting='briggs', robust=robust, gain=gain, niter=niter,
cyclefactor=cyclefactor, npercycle=npercycle, imsize=imsize, cell=cell,
interactive=True, usescratch=True, mask=all_mask)
"""
cleaned for 12 cycles (1200 iterations)
peak: 6.8 mJy/beam, flux: 176 mJy, rms: 16.9 uJy/beam, beam = 70 x 30 mas
peak SNR: 402
"""
# second round of phase-only self-cal
all_p2 = all_field+'_'+all_tag+'.p2'
os.system('rm -rf '+all_p2)
gaincal(vis=all_selfcalp1, caltable=all_p2, gaintype='T', combine='spw,scan',
spw=all_contspws, refant=all_refant, calmode='p', field='0',
solint='120s', minsnr=2.0, minblperant=4)
# flagging <5% of the solutions for low SNR
# @@ look at the solutions
plotcal(caltable=all_p2,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/05/08/00~2017/05/10/00')
plotcal(caltable=all_p2,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/09/08/00~2017/09/10/00')
plotcal(caltable=all_p2,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/09/19/00~2017/09/21/00')
# apply the calibration table
applycal(vis=all_selfcalp1, spw=all_contspws, spwmap=[0]*12,
gaintable=[all_p2], calwt=True, applymode='calonly', flagbackup=True,
interp='linearPD')
# split out a corrected MS for another round
all_selfcalp2 = all_field+'_'+all_tag+'_selfcalp2.ms'
os.system('rm -rf '+all_selfcalp2+'*')
split2(vis=all_selfcalp1, outputvis=all_selfcalp2, datacolumn='corrected')
# image
all_contimagep2 = all_field +'_'+all_tag+'_continuum_p2'
os.system('rm -rf '+all_contimagep2+'.*')
clean(vis=all_selfcalp2, imagename=all_contimagep2, mode='mfs',
multiscale=multiscale, psfmode='hogbom', imagermode='csclean',
weighting='briggs', robust=robust, gain=gain, niter=niter,
cyclefactor=cyclefactor, npercycle=npercycle, imsize=imsize, cell=cell,
interactive=True, usescratch=True, mask=all_mask)
"""
cleaned for 18 cycles (1800 iterations)
peak: 7.37 mJy/beam, flux: 170 mJy, rms: 15.4 uJy/beam, beam = 70 x 30 mas
peak SNR: 479
"""
# third round of phase-only self-cal
all_p3 = all_field+'_'+all_tag+'.p3'
os.system('rm -rf '+all_p3)
gaincal(vis=all_selfcalp2, caltable=all_p3, gaintype='T', combine='spw,scan',
spw=all_contspws, refant=all_refant, calmode='p', field='0',
solint='60s', minsnr=2.0, minblperant=4)
# flagging <10% of the solutions for low SNR
# @@ look at the solutions
plotcal(caltable=all_p3,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/05/08/00~2017/05/10/00')
plotcal(caltable=all_p3,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/09/08/00~2017/09/10/00')
plotcal(caltable=all_p3,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/09/19/00~2017/09/21/00')
# apply the calibration table
applycal(vis=all_selfcalp2, spw=all_contspws, spwmap=[0]*12,
gaintable=[all_p3], calwt=True, applymode='calonly', flagbackup=True,
interp='linearPD')
# split out a corrected MS for another round
all_selfcalp3 = all_field+'_'+all_tag+'_selfcalp3.ms'
os.system('rm -rf '+all_selfcalp3+'*')
split2(vis=all_selfcalp2, outputvis=all_selfcalp3, datacolumn='corrected')
# image
all_contimagep3 = all_field +'_'+all_tag+'_continuum_p3'
os.system('rm -rf '+all_contimagep3+'.*')
clean(vis=all_selfcalp3, imagename=all_contimagep3, mode='mfs',
multiscale=multiscale, psfmode='hogbom', imagermode='csclean',
weighting='briggs', robust=robust, gain=gain, niter=niter,
cyclefactor=cyclefactor, npercycle=npercycle, imsize=imsize, cell=cell,
interactive=True, usescratch=True, mask=all_mask)
"""
cleaned for 20 cycles (2000 iterations)
peak: 7.72 mJy/beam, flux: 169 mJy, rms: 15.1 uJy/beam, beam = 70 x 30 mas
peak SNR: 511
"""
# fourth round of phase-only self-cal
all_p4 = all_field+'_'+all_tag+'.p4'
os.system('rm -rf '+all_p4)
gaincal(vis=all_selfcalp3, caltable=all_p4, gaintype='T', combine='spw,scan',
spw=all_contspws, refant=all_refant, calmode='p', field='0',
solint='30s', minsnr=2.0, minblperant=4)
# flagging about 20% of the solutions for low SNR
# @@ look at the solutions
plotcal(caltable=all_p4,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/05/08/00~2017/05/10/00')
plotcal(caltable=all_p4,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/09/08/00~2017/09/10/00')
plotcal(caltable=all_p4,xaxis='time',yaxis='phase',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,-180,180],
timerange='2017/09/19/00~2017/09/21/00')
# apply calibration table
applycal(vis=all_selfcalp3, spw=all_contspws, spwmap=[0]*12,
gaintable=[all_p4], calwt=True, applymode='calonly', flagbackup=True,
interp='linearPD')
# split out a corrected MS for another round
all_selfcalp4 = all_field+'_'+all_tag+'_selfcalp4.ms'
os.system('rm -rf '+all_selfcalp4+'*')
split2(vis=all_selfcalp3, outputvis=all_selfcalp4, datacolumn='corrected')
# image
all_contimagep4 = all_field +'_'+all_tag+'_continuum_p4'
os.system('rm -rf '+all_contimagep4+'.*')
clean(vis=all_selfcalp4, imagename=all_contimagep4, mode='mfs',
multiscale=multiscale, psfmode='hogbom', imagermode='csclean',
weighting='briggs', robust=robust, gain=gain, niter=niter,
cyclefactor=cyclefactor, npercycle=npercycle, imsize=imsize, cell=cell,
interactive=True, usescratch=True, mask=all_mask)
"""
cleaned for 25 cycles (2500 iterations)
peak: 7.92 mJy/beam, flux: 168 mJy, rms: 15.1 uJy/beam, beam = 70 x 30 mas
peak SNR: 525
"""
# stopping phase-only self-cal here; improvements are modest, and when we go to
# shorter solution intervals there are too many flagged solutions
# first round of amplitude self-cal
all_ap1 = all_field+'_'+all_tag+'.ap1'
os.system('rm -rf '+all_ap1)
gaincal(vis=all_selfcalp4, caltable=all_ap1, gaintype='T', combine='spw,scan',
spw=all_contspws, refant=all_refant, calmode='ap', gaintable=[all_p4],
spwmap=[0]*12, solint='300s', minsnr=3.0, minblperant=4)
# @@ look at the solutions
plotcal(caltable=all_ap1,xaxis='time',yaxis='amp',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,0,2],
timerange='2017/05/08/00~2017/05/10/00')
plotcal(caltable=all_ap1,xaxis='time',yaxis='amp',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,0,2],
timerange='2017/09/08/00~2017/09/10/00')
plotcal(caltable=all_ap1,xaxis='time',yaxis='amp',
spw='',iteration='antenna',subplot=221,plotrange=[0,0,0,2],
timerange='2017/09/19/00~2017/09/21/00')
# apply calibration tables
applycal(vis=all_selfcalp4, spw=all_contspws, spwmap=[[0]*12,[0]*12],
gaintable=[all_p4,all_ap1], calwt=True, applymode='calonly',
flagbackup=True, interp='linearPD')
# split out a corrected MS
all_selfcalap1 = all_field+'_'+all_tag+'_selfcalap1.ms'
os.system('rm -rf '+all_selfcalap1)
split2(vis=all_selfcalp4, outputvis=all_selfcalap1, datacolumn='corrected')
# image
all_contimageap1 = all_field +'_'+all_tag+'_continuum_ap1'
os.system('rm -rf '+all_contimageap1+'.*')
clean(vis=all_selfcalap1, imagename=all_contimageap1, mode='mfs',
multiscale=multiscale, psfmode='hogbom', imagermode='csclean',
weighting='briggs', robust=robust, gain=gain, niter=niter,
cyclefactor=cyclefactor, npercycle=npercycle, imsize=imsize, cell=cell,
interactive=True, usescratch=True, mask=all_mask)
"""
cleaned deep for 30 cycles (3000 iterations); <30 uJy/beam residuals
peak: 7.55 mJy/beam, flux: 168 mJy, rms: 14.3 uJy/beam, beam = 70 x 30 mas
peak SNR = 528.
"""
# split out the "FINAL" self-calibrated MS
all_selfcalfinal = all_field+'_'+'combined'+'_selfcal_final.ms'
os.system('rm -rf '+all_selfcalfinal)
split2(vis=all_selfcalap1, outputvis=all_selfcalfinal, datacolumn='data')
"""
Worthwhile to take a look at the self-calibrated visibilities:
os.system('rm -rf combined_quick.ms*')
split2(vis='Wa_Oph_6_combined_selfcal_final.ms', field='0', spw='0~12',
outputvis='combined_quick.ms',
width=[8,16,16,16, 16,16,16,8, 16,16,16,8],
datacolumn='data')
execfile('ExportMS.py') # for MSname = 'combined_quick'
pc_all = au.radec2deg('16:48:45.638470, -14.16.35.88842')
peak_all = au.radec2deg('16:48:45.622, -14.16.36.248')
offsets = au.angularSeparation(peak_all[0], peak_all[1], pc_all[0],
pc_all[1], True)
A Gaussian image-plane fit finds some updated geometric parameters and offsets:
incl = 44.7
PA = 169.
offx = -0.2471
offy = -0.3596
Its pretty awesome-looking.
"""
# play with the imaging...
# ------ Imaging ------ #
robust = 0.0
cyclefactor = 5
npercycle = 1000
uvtaper = True
taper0 = ['30mas']
taper1 = ['23mas']
outertaper = taper2
multiscale0 = [0, 20, 40, 80, 120, 360]
multiscale1 = [0, 12, 35, 60]
multiscale2 = [0, 12, 24, 48, 96, 192]
multiscale3 = [0, 12, 24, 48, 96, 192, 384]
multiscale = multiscale0
comments = 'rob00_ms0_taper1'
# --------------------- #
# image
test_image = LB_field +'_'+'all'+'_continuum_'+comments
os.system('rm -rf '+test_image+'.*')
clean(vis=all_selfcalfinal, imagename=test_image, mode='mfs',
multiscale=multiscale, psfmode='hogbom', imagermode='csclean',
weighting='briggs', robust=robust, gain=gain, niter=niter,
cyclefactor=cyclefactor, npercycle=npercycle, imsize=imsize, cell=cell,
uvtaper=uvtaper, outertaper=outertaper, interactive=True,
usescratch=True, mask=all_mask)
"""
Comments on imaging tests: TBD
"""
|
from django.db import models
class Employee(models.Model):
# Create your models here.
eid=models.IntegerField()
ename=models.CharField(max_length=10)
esal=models.IntegerField()
|
from django.db import models
# Create your models here
class Contact(models.Model):
srno = models.AutoField(primary_key=True)
name = models.CharField(max_length=250)
email = models.CharField(max_length=250)
message = models.TextField()
def __str__(self):
return 'Message from' + ' ' + self.name
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.schema import Table as dpTable
from dp_tornado.engine.schema import Schema as dpSchema
from dp_tornado.engine.schema import Attribute as dpAttribute
class FieldsSchema(dpTable):
__table_name__ = 'fields'
__engine__ = 'MyISAM'
PK = dpAttribute.field(dpAttribute.DataType.BIGINT, ai=True, pk=True, nn=True, un=True, comment='Primary Key')
INT = dpAttribute.field(dpAttribute.DataType.INT)
TINYINT = dpAttribute.field(dpAttribute.DataType.TINYINT)
SMALLINT = dpAttribute.field(dpAttribute.DataType.SMALLINT)
MEDIUMINT = dpAttribute.field(dpAttribute.DataType.MEDIUMINT)
BIGINT = dpAttribute.field(dpAttribute.DataType.BIGINT)
DOUBLE = dpAttribute.field(dpAttribute.DataType.DOUBLE)
FLOAT = dpAttribute.field(dpAttribute.DataType.FLOAT)
DECIMAL = dpAttribute.field(dpAttribute.DataType.DECIMAL(10, 2))
CHAR = dpAttribute.field(dpAttribute.DataType.CHAR(8))
VARCHAR = dpAttribute.field(dpAttribute.DataType.VARCHAR(32))
TEXT = dpAttribute.field(dpAttribute.DataType.TEXT)
TINYTEXT = dpAttribute.field(dpAttribute.DataType.TINYTEXT)
MEDIUMTEXT = dpAttribute.field(dpAttribute.DataType.MEDIUMTEXT)
LONGTEXT = dpAttribute.field(dpAttribute.DataType.LONGTEXT)
ENUM = dpAttribute.field(dpAttribute.DataType.ENUM('A', 'B', 'C', 'D'))
BLOB = dpAttribute.field(dpAttribute.DataType.BLOB)
LONGBLOB = dpAttribute.field(dpAttribute.DataType.LONGBLOB)
MEDIUMBLOB = dpAttribute.field(dpAttribute.DataType.MEDIUMBLOB)
TINYBLOB = dpAttribute.field(dpAttribute.DataType.TINYBLOB)
DATETIME = dpAttribute.field(dpAttribute.DataType.DATETIME)
DATE = dpAttribute.field(dpAttribute.DataType.DATE)
TIME = dpAttribute.field(dpAttribute.DataType.TIME)
TIMESTAMP = dpAttribute.field(dpAttribute.DataType.TIMESTAMP)
YEAR = dpAttribute.field(dpAttribute.DataType.YEAR)
BINARY = dpAttribute.field(dpAttribute.DataType.BINARY(6))
VARBINARY = dpAttribute.field(dpAttribute.DataType.VARBINARY(64))
idx_fields_char = dpAttribute.index(dpAttribute.IndexType.FULLTEXT, 'CHAR')
idx_fields_varchar = dpAttribute.index(dpAttribute.IndexType.FULLTEXT, 'VARCHAR')
|
# -*- coding: utf-8 -*-
# LtData
# Copyright (C) 2010 Salvo "LtWorf" Tomaselli
#
# Relation is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# author Salvo "LtWorf" Tomaselli <tiposchi@tiscali.it>
import json
import sys
import os
config = {}
messages = {}
def init():
load()
#[16:42] <tosky> e che è, "Frau Blücher" ?
#[16:42] <Straker> Iiiihhh.
#[16:42] <LtWorf> iiiiiiiiiiiiiiiihhhhh
#[16:42] <salvin> hihihih
def save():
# replace database on disk
f = file("%s/reacts" % config['files'], 'w')
json.dump(messages, f)
f.close()
def load():
global messages
f = file("%s/reacts" % config['files'])
messages = json.load(f)
f.close()
def sendmsg(source, recip, text):
if text.startswith(config['control'] + "addreact "):
react = text.split(" ", 1)[1].strip()
parts = react.split('#', 1)
if (len(parts) != 2):
return "Grazie del tuo contributo %s, nessuno si ricorderà di te" % source
elif (not parts[1]) and parts[0] in messages:
del messages[parts[0]]
save()
else:
messages[parts[0].lower()] = parts[1]
save()
return "Vuoi pure che ti dica grazie? Gli altri ti odieranno per quello che hai fatto."
text = text.lower()
values = []
for k in messages:
if k in text:
values.append(messages[k])
return '\n'.join(values)
def help():
return config['control'] + "addreact stringa#risposta"
pass
|
#!/usr/bin/env python
# my own teleop code!
from __future__ import print_function # for python2 users
import rospy
# imports for keys
import tty
import select
import sys
import termios
# imports for actions
from geometry_msgs.msg import Twist, Vector3
# import for emergency stop with lost telometry
import atexit
# scaffholding:
# PublishAction obtains and interprets keyboard input in an angular and linear velocity for the Neato.
# This info is published to the 'cmd_vel' topic
class PublishAction(object):
def __init__(self):
rospy.init_node('teleop_node')
# Process key code
self.key = None
self.settings = termios.tcgetattr(sys.stdin)
# Publish Actions (velocities)
self.pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.linearVel = 0.0
self.angularVel = 0.0
# emergency stop:
atexit.register(self.exit_handler)
def interpretKey(self):
# start with forward and backward
if self.key == 'w':
self.linearVel = 0.3
elif self.key == 's':
self.linearVel = -0.3
elif self.key == 'd':
self.angularVel = -0.3
elif self.key == 'a':
self.angularVel = 0.3
elif self.key == 'f':
self.angularVel = 0.0
elif self.key == 'e':
self.linearVel = 0.0
else:
self.linearVel = 0.0
self.angularVel = 0.0
def getKey(self):
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
self.key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
def run(self):
print('WASD keys. E stops linear, F stops angular. All else cancels.')
# Get key
while self.key != '\x03': # unless Ctrl+C
self.getKey()
self.interpretKey() # sets velocity according to key
# Publish action
self.pub.publish(Twist(linear=Vector3(x=self.linearVel), angular=Vector3(z=self.angularVel)))
#print('Linear: ' + str(self.linearVel) + ', Angular: ' + str(self.angularVel))
def exit_handler(self):
# emergency exit
self.linearVel = 0.0
self.angularVel = 0.0
if __name__ == '__main__':
node = PublishAction()
node.run()
|
import pygame as pg
from settings import *
import pytweening as tween
vec=pg.math.Vector2
from random import randrange,choice
class Button:
def __init__(self,game):
self.game=game
self.image=self.game.button
self.rect=self.image.get_rect()
def draw_button(self,text,x,y):
self.rect.x = x
self.rect.y = y
self.text=text
self.game.screen.blit(self.image, self.rect)
self.game.draw_text(self.text, self.game.title_font, 40, WHITE, self.rect.x + 100, self.rect.y+30, align='center')
def on_button(self):
self.image=self.game.button2
self.game.screen.blit(self.image, self.rect)
self.game.draw_text(self.text, self.game.title_font, 40, WHITE, self.rect.x + 100, self.rect.y+30, align='center')
def off_button(self):
self.image=self.game.button
class Wight_Button:
def __init__(self, game):
self.game = game
self.image = pg.Surface((100,50))
self.image.fill(LIGHTGREY )
self.rect=self.image.get_rect()
self.delete=False
def draw_button(self, text, x, y):
self.rect.x = x
self.rect.y = y
self.text = text
self.game.screen.blit(self.image, self.rect)
self.game.draw_text(self.text, self.game.title_font, 20, WHITE, self.rect.x + self.rect.width/2, self.rect.y + self.rect.height/2,
align='center')
def on_button(self,deld):
self.delete=deld
if not self.delete:
self.image.fill(LIGHTGREY)
self.game.screen.blit(self.image, self.rect)
self.game.draw_text(self.text, self.game.title_font, 20, WHITE, self.rect.x + self.rect.width / 2,
self.rect.y + self.rect.height / 2,
align='center')
def off_button(self):
self.image = pg.Surface((100, 50))
self.image.fill(DARKGREY)
class Menu_img(pg.sprite.Sprite):
def __init__(self,game,x,y):
self.game = game
self.group = game.menu_sprites,game.for_menu_image
self._layer = 0
pg.sprite.Sprite.__init__(self, self.group)
self.image=self.game.menu_animation[0]
self.rect=self.image.get_rect()
self.rect.x=x
self.rect.y=y
self.current_frame=0
self.last_update = 0
self.point=False
def update(self):
now = pg.time.get_ticks()
mx, my = pg.mouse.get_pos()
if self.rect.collidepoint((mx, my)):
self.point=True
if self.point:
if now - self.last_update > 250:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(UNSLEEP_MENU_ANIMATION)
self.image = self.game.unsleep_menu_animation[self.current_frame]
center=self.rect.center
self.rect = self.image.get_rect()
self.rect.center=center
else:
if now - self.last_update > 250:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(MENU_ANIMATION)
self.image = self.game.menu_animation[self.current_frame]
center=self.rect.center
self.rect = self.image.get_rect()
self.rect.center=center
class Main_fire_fly(pg.sprite.Sprite):
def __init__(self,game,pos):
self.game = game
self.group=game.menu_sprites
self._layer=1
pg.sprite.Sprite.__init__(self,self.group)
self.pos=pos
self.image = game.firefly_animation[0]
self.rect = self.image.get_rect()
self.rect.center = pos
self.pos = vec(pos)
self.hit_rect = self.rect
self.current_frame = 0
self.last_update = 0
self.tween = tween.easeInOutSine
self.step = 0
self.dir = 1
self.vel = randrange(100, 200)
self.num = 1
self.prime_width=self.rect.width
list = [2, 3, 4, 5]
self.num = choice(list)
self.image = pg.transform.scale(self.image,
(int(self.rect.width * self.num), int(self.rect.height * self.num)))
self.rect = self.image.get_rect()
def update(self):
self.animate()
self.pos.y -= self.vel * self.game.dt
self.rect.centery = self.pos.y
self.hit_rect.centery = self.pos.y
# bobing motion
offset = BOB_RANGE * (self.tween(self.step / BOB_RANGE) - 0.5)
self.rect.centerx = self.pos.x + offset * self.dir
self.hit_rect.centerx = self.pos.x + offset * self.dir
self.step += BOB_SPEED
if self.step > BOB_RANGE:
self.step = 0
self.dir *= -1
if self.rect.top <self.game.map_rect.top-200:
self.vel = randrange(100, 200)
list = [2, 3, 4, 5]
self.num = choice(list)
self.pos.y = self.game.map_rect.bottom/2
hits = pg.sprite.spritecollide(self, self.game.for_menu_image, False)
if hits:
for num, sprite in enumerate(hits):
if self.rect.width/2==self.prime_width or self.rect.width/3==self.prime_width: # and self.game.all_sprites.get_layer_of_sprite(self)!=PLAYER_LAYER :
self.game.menu_sprites.change_layer(self, sprite._layer-1)
else:
self.game.menu_sprites.change_layer(self, sprite._layer +1)
def animate(self):
now = pg.time.get_ticks()
if now - self.last_update > 400:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(FIREFLY_ANIMATION)
self.image = self.game.firefly_animation[self.current_frame]
self.rect = self.image.get_rect()
self.image = pg.transform.scale(self.image,
(int(self.rect.width * self.num), int(self.rect.height *self.num)))
self.rect = self.image.get_rect()
|
import numpy as np
from scipy.optimize import linear_sum_assignment
# Recursive Hungarian Algorithm for Dynamic Environments
class RHA2:
def __init__(self, agent_pos, agent_energy, task_pos, task_importance):
self.agent_pos = agent_pos
self.agent_energy = agent_energy
self.task_pos = task_pos
self.task_importance = task_importance
col = []
for i in range(len(self.task_pos)):
if all(self.task_pos[i] == np.array([-1, -1, -1])):
col.append(i)
self.task_pos = np.delete(self.task_pos, col, axis=0)
self.task_importance = np.delete(self.task_importance, col, axis=0)
self.m = len(self.agent_pos)
self.n = len(self.task_pos)
self.oo = 0x3f3f3f3f
def deal(self):
# 初始化
useful_m = self.m
max_mn = max(self.m, self.n)
cost = np.zeros([max_mn, max_mn])
for i in range(self.m):
for j in range(self.n):
dis = np.linalg.norm(self.agent_pos[i]-self.task_pos[j])
if dis > self.agent_energy[i]:
cost[i][j] = self.oo
else:
cost[i][j] = 1 / (self.task_importance[j] - dis) # cost为重要度-距离,因为越小越好所以取倒数
uavs_pos_record = []
uavs_pos_record.append(self.agent_pos.copy())
# 开始
# 1. 飞机数小于等于任务数,多轮分配,一个飞机执行多个任务
if self.m <= self.n:
while useful_m <= self.n:
# 添加虚拟agent,补齐cost矩阵
for i in range(self.m, self.n):
for j in range(self.n):
cost[i][j] = self.oo
col_del = []
row_ind, col_ind = linear_sum_assignment(cost)
for i in range(self.m):
if cost[i][col_ind[i]] < self.oo:
# 更新能量
self.agent_energy[i] -= np.linalg.norm(self.agent_pos[i]-self.task_pos[col_ind[i]])
# 更新位置
self.agent_pos[i] = self.task_pos[col_ind[i]]
col_del.append(col_ind[i])
else:
useful_m -= 1
print(self.agent_energy)
self.task_pos = np.delete(
self.task_pos, col_del, axis=0) # 更新task
self.task_importance = np.delete(
self.task_importance, col_del, axis=0) # 更新importance
self.n = self.n - len(col_del)
uavs_pos_record.append(self.agent_pos.copy())
# 更新代价矩阵
max_mn = max(self.m, self.n)
cost = np.zeros([max_mn, max_mn])
for i in range(self.m):
for j in range(self.n):
dis = np.linalg.norm(self.agent_pos[i]-self.task_pos[j])
if dis > self.agent_energy[i]:
cost[i][j] = self.oo
else:
cost[i][j] = 1 / (self.task_importance[j] - dis)
# 剩余几个任务,不足以分配给所有飞机
# 添加虚拟task,补齐cost矩阵
for i in range(self.m):
for j in range(self.n, self.m):
cost[i][j] = self.oo
row_ind, col_ind = linear_sum_assignment(cost)
tmp = np.zeros(self.agent_pos.shape)
for i in range(self.m):
if col_ind[i] < self.n:
tmp[i] = self.task_pos[col_ind[i]] # 更新agent位置
else:
tmp[i] = self.agent_pos[i]
# self.agent_pos = self.task_pos[col_ind[:self.m]]
uavs_pos_record.append(tmp.copy())
# 2. 飞机数大于任务数,多个飞机执行一个任务
else:
k = self.m // self.n
tmp = np.zeros(self.agent_pos.shape)
for t in range(k):
row_ind, col_ind = linear_sum_assignment(cost[t*self.n:(t+1)*self.n,:self.n])
tmp[t*self.n:(t+1)*self.n] = self.task_pos[col_ind[:]]
if self.m%self.n != 0:
cost_res = np.zeros([self.n, self.n])
cost_res[:self.m%self.n] = cost[k*self.n:, :self.n]
cost_res[self.m%self.n:] = self.oo * np.ones([self.n-self.m%self.n, self.n])
row_ind, col_ind = linear_sum_assignment(cost_res)
tmp[k*self.n:] = self.task_pos[col_ind[:self.m%self.n]]
self.agent_pos = tmp
uavs_pos_record.append(tmp.copy())
return uavs_pos_record
def gen_task_lists(task_pos, uavs_pos_record):
task_dict = dict()
for i in range(len(task_pos)):
task_dict[tuple(task_pos[i])] = i
task_lists = [[] for i in range(len(uavs_pos_record[0]))]
for i in range(len(uavs_pos_record[0])):
for j in range(1, len(uavs_pos_record)):
t = task_dict[tuple(uavs_pos_record[j][i])]
if t not in task_lists[i]:
task_lists[i].append(t)
return task_lists
if __name__ == "__main__":
import matplotlib.pyplot as plt
agent_pos = np.array([(-19, -4, 0), (-1, -1, -1), (-3, 13, 0), (14, -15, 0), (16, 0, 0)])
agent_energy = np.array([100, 100, 100, 30, 30])
task_pos = np.array([(-13, 8, 0), (-12, 20, 0), (4, 4, 0), (18, -12, 0), (4, -19, 0),
(-19, 11, 0), (19, -8, 0), (-1, 9, 0), (-8, -8, 0), (11, -6, 0),
(-18, -17, 0), (-7, -16, 0), (12, 4, 0), (7, -1, 0)])
task_importance = np.array([110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 200, 201, 202, 203])
m, n = len(agent_pos), len(task_pos)
r = RHA2(agent_pos, agent_energy, task_pos, task_importance)
uavs_pos_record = r.deal()
print(uavs_pos_record)
task_lists = gen_task_lists(task_pos, uavs_pos_record)
print(task_lists)
path_len = len(uavs_pos_record)
path_x = np.empty([m, path_len])
path_y = np.empty([m, path_len])
for i in range(path_len):
for j in range(m):
path_x[j][i] = uavs_pos_record[i][j][0]
path_y[j][i] = uavs_pos_record[i][j][1]
for i in range(m):
plt.plot(path_x[i], path_y[i])
for i in range(m):
plt.annotate('U{}'.format(i),
xy=(uavs_pos_record[0][i][0],
uavs_pos_record[0][i][1]), xycoords='data',
xytext=(0, +5), textcoords='offset points', fontsize=10)
for i in range(n):
plt.annotate('{}'.format(i),
xy=(task_pos[i][0],
task_pos[i][1]), xycoords='data',
xytext=(0, +5), textcoords='offset points', fontsize=10)
plt.show()
|
from gturtle import *
def onMouseHit(x, y):
fill(x, y)
makeTurtle(mouseHit = onMouseHit)
hideTurtle()
addStatusBar(30)
setStatusText("Click to fill a region!")
repeat 12:
repeat 6:
forward(80)
right(60)
left(30)
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import subprocess
PIPE = subprocess.PIPE
zdvec = np.linspace(0.0,1.0,5) # piezometer locations
sigmavec = np.logspace(np.log10(2.5E+1),np.log10(2.5E+5),5) # Sy/(Ss*b)
kappavec = np.logspace(-3,1,5) # Kz/Kr
c = ['red','green','blue','cyan','magenta','orange']
l = ['dashed','dotted','dashdot']
infn = 'malama-test-z.in'
inputfile = """0 %i T T T :: quiet output?, model choice, dimensionless output?, timeseries?, piezometer?
2.0D-2 :: Q (volumetric pumping rate) [L^3/T]
6.0D-1 4.0D-1 :: l/b, d/b (normalized __depth__ to bottom/top of screened interval)
2.54D-2 2.54D-2 :: rw, rc (radius of well casing and tubing)
1.0D+0 :: gamma (dimensionless wellbore skin)
001 0.0D+0 1.0D+0 :: pumping well time behavior and parameters
1.00D+1 :: b (initial saturated thickness)
1.0D-4 %.4e :: Kr,kappa (radial K and ratio Kz/Kr)
%.4e %.4e :: Ss,Sy
%.4e :: beta Malama linearization parameter
2.95D+0 3.7D-1 2.0D+0 2.2D-1 2.0D+1 :: Mishra/Neuman 2010; a_c,a_k, psi_a,psi_k, L
10 1.0D-8 1.0D-9 :: deHoog invlap; M,alpha,tol
7 5 :: tanh-sinh quad; 2^k-1 order, # extrapollation steps
1 1 10 50 :: G-L quad; min/max zero split, # zeros to integrate, # abcissa/zero
timedata.dat 15.5 :: file to read time data from (and default t otherwise)
spacedata.dat 2.5D0 :: file to read space data from (and default r otherwise)
%.4e 0.0D0 5 2.54D-2 20.0 :: relative top obs well screen OR piezometer loc, bottom screen, quadrature order across screen (not for piezometer)
%s"""
# **************************************************
bval = 10.0 # assume thickness of 10.0
Sy = 0.25 # assume specific yeild of 25%
beta = 0.0
# **************************************************
for sigma in sigmavec:
for kappa in kappavec:
fig = plt.figure(1,figsize=(14,12))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
for i,zval in enumerate(zdvec):
fh = open(infn,'w')
outhantfn = 'early-hantush-test.out'
mod = 1
Ss = Sy/(sigma*bval)
fh.write(inputfile % (mod,kappa,Ss,Sy,beta,zval,outhantfn))
fh.close()
args = ['./unconfined',infn]
print 'running early hantush',zval
stdout,stderr = subprocess.Popen(args,stdout=PIPE,stderr=PIPE).communicate()
t,h,dh = np.loadtxt(outhantfn,skiprows=20,unpack=True)
ax1.loglog(t,h, linestyle='solid',color=c[i%len(c)],label='Hantush $z_D=$ %.1f' %zval)
ax2.loglog(t,dh,linestyle='solid',color=c[i%len(c)],label='Hantush $z_D=$ %.1f' %zval)
##%## # **************************************************
##%## fh = open(infn,'w')
##%## outhantfn = 'late-hantush-test.out'
##%## mod = 1
##%## beta = 0.0
##%## fh.write(inputfile % (mod,kappa,Sy,Sy,beta,zval,outhantfn))
##%## fh.close()
##%##
##%## print 'running late hantush',zval
##%## stdout,stderr = subprocess.Popen(args,stdout=PIPE,stderr=PIPE).communicate()
##%## t,lh,ldh = np.loadtxt(outhantfn,skiprows=20,unpack=True)
##%##
##%## ax1.loglog(t,h, linestyle='dashdot',color=c[i%len(c)],label='late H $z_D=$ %.1f' %zval)
##%## ax2.loglog(t,dh,linestyle='dashdot',color=c[i%len(c)],label='late H $s_D=$ %.1f' %zval)
fh = open(infn,'w')
outneumanfn = 'neuman-test.out'
mod = 5
beta = 0.0
fh.write(inputfile % (mod,kappa,Ss,Sy,beta,zval,outneumanfn))
fh.close()
print 'running',sigma,kappa,zval
stdout,stderr = subprocess.Popen(args,stdout=PIPE,stderr=PIPE).communicate()
t,h,dh = np.loadtxt(outneumanfn,skiprows=20,unpack=True)
ax1.loglog(t,h, linestyle=l[i//len(c)],color=c[i%len(c)],label='$z_D=$%.1f' % zval)
ax2.loglog(t,dh,linestyle=l[i//len(c)],color=c[i%len(c)],label='$z_D=$%.1f' % zval)
ax1.set_title('$\\sigma=$%.2e $\\kappa=$%.2e' % (sigma,kappa))
ax2.set_xlabel('$t_D$')
ax1.set_ylabel('$s_D$')
ax2.set_ylabel('$d s_D/\\log(t)$')
ax2.set_ylim([3.0E-6,300])
ax1.set_ylim([3.0E-6,300])
# ax2.set_xlim([1.0E-4,1.0E+8])
# ax1.set_xlim([1.0E-4,1.0E+8])
plt.legend(loc='lower right')
plt.savefig('neuman-%.2e-%.2e-compare.png' % (sigma,kappa))
plt.close(1)
|
# Import Dash packages
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
# Import extra packages
import numpy as np
import time
# Import app
from app import app
from common import data
component = dbc.Col([
html.H5('Reload scenarios'),
html.Br(),
html.Div(dbc.Button('Reload scenarios', color='primary', id='settings-reloadscenario-button')),
dbc.Spinner([html.Br(), html.Div(id='settings-reloadscenario-output'), html.Br()], color='primary')
])
@app.callback(
[Output('settings-reloadscenario-output', 'children'),
Output('common-lastmodified-store', 'data')],
[Input('settings-reloadscenario-button', 'n_clicks')]
)
def reload_scenarios(value):
if value:
data.load_data()
output_message = "Scenarios have been reloaded. Currently {} rows available ({} scenarios)".format(
len(data.all_scenarios),
len(data.all_scenarios.groupby(data.params+['withInertia', 'carbonbudget']))
)
else:
output_message = ""
return output_message, time.time() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 6 23:19:22 2017
class Perceptron as AND, OR & NAND gates
@author: Esteban Reyes de Jong
"""
class Perceptron:
'Common base class for all perceptrons'
'''
contructor input: two weights and bias
@param weight1 doble (first weight)
@param weight2 double (second weight)
@param bias double (bias)
'''
def __init__(self, w1, w2, b):
self.weight1 = w1
self.weight2 = w2
self.bias = b
"""
Output method recieves binary input as array. Returns 1 or 0 if condition met
@param x int[2]
@return act int
"""
def act(self, x):
f=(x[0]*self.weight1 + x[1]*self.weight2 + self.bias)
if(f<=0):
return 0
else:
return 1
"""
Classes AND, OR, NAND, inherited from perceptron class
Used to inicialice weight according to distinct gates
"""
class AND(Perceptron):
def __init__(self):
super().__init__(0.5, 0.5, -0.5)
class OR(Perceptron):
def __init__(self):
super().__init__(0.5, 0.5, 0)
class NAND(Perceptron):
def __init__(self):
super().__init__(-0.5, -0.5, 0.75)
#small tests
if __name__ == "__main__":
# main()
p_AND = AND()
out_AND = p_AND.act([1,1])
p_OR = OR()
out_OR = p_OR.act([0,0])
p_NAND = NAND()
out_NAND = p_NAND.act([0,0]) |
# Generated by Django 2.1.1 on 2018-11-07 16:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0012_auto_20181107_2349'),
]
operations = [
migrations.AlterField(
model_name='realteammember',
name='member',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='teammember',
name='member',
field=models.CharField(max_length=40),
),
]
|
class Solution:
def reverseString(self, s):
"""
:type s: str
:rtype: str
注意: 字符串是不可变的,不能直接使用反转数组的方法
"""
length = len(s)
new_s = ''
for i in range(length):
new_s += s[length - i -1]
# new_s = s[::-1]
print(new_s)
return new_s
so = Solution()
so.reverseString('I am luffy !')
|
from django.urls import path
from . import views
app_name = "user"
urlpatterns = [
path("", views.UserListView.as_view(), name="user_list"),
path("<int:pk>/", views.UserDetailView.as_view(), name="user_detail"),
path(
"update/<int:pk>", views.UserUpdateView.as_view(), name="user_update"
),
path("create/", views.UserCreateView.as_view(), name="user_create"),
path(
"create_and_book/",
views.UserCreateAndBookView.as_view(),
name="create_and_book",
),
path(
"organizer_book/<int:pk>/",
views.OrganizerBookView.as_view(),
name="organizer_book"
),
path(
"organizations/",
views.OrganizationListView.as_view(),
name="organization_list",
),
path(
"organization/create/",
views.OrganizationCreateView.as_view(),
name="organization_create",
),
path(
"organization/<int:pk>/update/",
views.OrganizationUpdateView.as_view(),
name="organization_update",
),
path(
"organization/<int:orga_pk>/<slug>/events/<int:page>/",
views.OrganizationEventsListView.as_view(),
name="organization_all_events",
),
path(
"organization/<int:pk>/",
views.OrganizationDeleteView.as_view(),
name="organization_delete",
),
path(
"organization/<int:pk>/add-admin",
views.AddAdminToOrganization.as_view(),
name="organization_add_admin",
),
path(
"organization/<int:pk>/add-active",
views.AddActiveToOrganization.as_view(),
name="organization_add_active",
),
path(
"organization/<int:pk>/add-volunteer",
views.AddVolunteerToOrganization.as_view(),
name="organization_add_volunteer",
),
path(
"organization/<int:pk>/<int:user_pk>/remove-from-actives",
views.RemoveActiveFromOrganization.as_view(),
name="remove_from_actives",
),
path(
"organization/<int:pk>/<int:user_pk>/remove-from-volunteers",
views.RemoveVolunteerFromOrganization.as_view(),
name="remove_from_volunteers",
),
path(
"organization/<int:pk>/<int:user_pk>/remove-from-admins",
views.RemoveAdminFromOrganization.as_view(),
name="remove_from_admins",
),
path(
"present/<int:pk>",
views.PresentCreateUserView.as_view(),
name="present_create_user",
),
path(
"present/<int:event_pk>/<int:pk>/",
views.PresentMoreInfoView.as_view(),
name="present_with_more_info",
),
path(
"organization/<int:pk>/add-member",
views.AddMemberToOrganization.as_view(),
name="organization_add_member",
),
path(
"organization/<int:orga_pk>/update-member/<int:pk>",
views.UpdateMemberView.as_view(),
name="organization_update_member",
),
path(
"fee/<int:pk>/delete/",
views.FeeDeleteView.as_view(),
name="fee_delete",
),
]
|
from django.conf.urls import url
from . import views
app_name = "tbapp"
urlpatterns = [
url(r"^$", views.IndexView.as_view(), name="index"),
url(r"^login$", views.login_view, name="login"),
url(r"^logout$", views.logout_view, name="logout"),
url(r"^events$", views.all_events_view, name="events"),
url(r"^venues$", views.all_venues_view, name="venues"),
url(r"^tickets$", views.all_tickets_view, name="tickets"),
url(r"^users$", views.all_users_view, name="users"),
url(r"^purchase_tickets$", views.purchase_tickets, name="purchase_tickets"),
url(r"^create_event$", views.create_event, name="create_event"),
url(r"^register_user$", views.register_user, name="register_user"),
url(r"^create_venue$", views.create_venue, name="create_venue"),
]
|
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
register = template.Library()
def getAb(context):
#todo: for any reason I am not able to import abTest.settings
contextName = getattr(
settings, 'AB_TEST_CONTEXT_NAME',
getattr(settings, 'AB_TEST_DEFAULT_KEY_NAME', 'ab'))
if not context.has_key(contextName):
return None
return context.get(contextName)
def getExperiment(context, experiment):
ab = getAb(context)
if ab is None: return ""
if not ab.has_key(experiment):
return ""
return ab.get(experiment)
@register.simple_tag(takes_context=True)
def reachedGoalUrl(context, goalName):
ab = getAb(context)
if ab is None: return ""
for test in ab:
for goal in test.goals:
if goal.name == goalName:
return reverse("abTest:reachedGoal", args=[goal.slug,])
return ""
register.assignment_tag(getExperiment, takes_context=True) |
import json
import traceback
from urllib import urlencode
from zope.interface import implements
from twisted.web.iweb import IBodyProducer
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.ssl import ClientContextFactory
from .base import Coin, build_sign, Order
class StringProducer(object):
implements(IBodyProducer)
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
class OKCoinApiMixin(object):
@inlineCallbacks
def future_ticker(self, symbol, contract_type):
try:
response = yield Agent(reactor, WebClientContextFactory()).request(
'GET',
'https://www.okex.com/api/v1/future_ticker.do?symbol={}&contract_type={}'.format(symbol, contract_type)
)
result = yield readBody(response)
data = json.loads(result)
returnValue(float(data['ticker']['last']))
except Exception as exc:
traceback.print_exc()
@inlineCallbacks
def get_order_info(self, user, symbol, contract_type, order_id):
sign = build_sign(user.secret_key,
api_key=user.api_key,
symbol=symbol,
contract_type=contract_type,
order_id=order_id
)
try:
body = urlencode({
'symbol': symbol,
'contract_type': contract_type,
'api_key': user.api_key,
'sign': sign,
'order_id': order_id
})
response = yield Agent(reactor, WebClientContextFactory()).request(
'POST',
'https://www.okex.com/api/v1/future_order_info.do',
Headers({
'Content-Type': ['application/x-www-form-urlencoded;charset=utf-8']
}),
bodyProducer=StringProducer(body)
)
result = yield readBody(response)
print result
data = json.loads(result)
if not data['result']:
returnValue(None)
else:
ret_order = data['orders'][0]
order = Order.build(ret_order['order_id'], ret_order['type'], ret_order['amount'])
order.deal(ret_order['price_avg'], ret_order['deal_amount'], ret_order['status'])
returnValue(order)
except Exception as exc:
traceback.print_exc()
returnValue(None)
@inlineCallbacks
def trade(self, user, symbol, contract_type, price, amount, type, match_price, lever_rate):
sign = build_sign(user.secret_key,
api_key=user.api_key,
symbol=symbol,
contract_type=contract_type,
price=price,
amount=amount,
type=type,
match_price=match_price,
lever_rate=lever_rate
)
try:
body = urlencode({
'symbol': symbol,
'contract_type': contract_type,
'price': price,
'amount': amount,
'type': type,
'match_price': match_price,
'lever_rate': lever_rate,
'api_key': user.api_key,
'sign': sign
})
response = yield Agent(reactor, WebClientContextFactory()).request(
'POST',
'https://www.okex.com/api/v1/future_trade.do',
Headers({"Content-Type" : ["application/x-www-form-urlencoded;charset=utf-8"]}),
bodyProducer=StringProducer(body)
)
result = yield readBody(response)
data = json.loads(result)
if not data['result']:
returnValue(None)
else:
returnValue((yield self.get_order_info(user, symbol, contract_type, data['order_id'])))
except Exception as exc:
traceback.print_exc()
@inlineCallbacks
def do_long(self, user, symbol, contract_type, price, amount, match_price=0, lever_rate=10):
returnValue((yield self.trade(user, symbol, contract_type, price, amount, 1, match_price, lever_rate)))
@inlineCallbacks
def do_stop_long(self, user, symbol, contract_type, price, amount, match_price=0, lever_rate=10):
returnValue((yield self.trade(user, symbol, contract_type, price, amount, 3, match_price, lever_rate)))
@inlineCallbacks
def do_short(self, user, symbol, contract_type, price, amount, match_price=0, lever_rate=10):
returnValue((yield self.trade(user, symbol, contract_type, price, amount, 2, match_price, lever_rate)))
@inlineCallbacks
def do_stop_short(self, user, symbol, contract_type, price, amount, match_price=0, lever_rate=10):
returnValue((yield self.trade(user, symbol, contract_type, price, amount, 4, match_price, lever_rate)))
class OKEXBTCFutureSeasonCoin(Coin, OKCoinApiMixin):
__name__ = 'OKEX BTC Future Season Coin'
@inlineCallbacks
def check_price(self):
price = yield self.future_ticker('btc_usd', 'quarter')
while not self.max_operations.is_empty() and price >= self.max_operations.peek().price:
self.process_operation(self.max_operations.pop())
while not self.min_operations.is_empty() and price <= self.min_operations.peek().price:
self.process_operation(self.min_operations.pop())
yield price
def _handle_result(self, operation, order):
if order is None:
return
operation.bind_order(order)
self.add_to_dealing_operation(operation)
@inlineCallbacks
def long(self, operation):
order = yield self.do_long(operation.user, 'btc_usd', 'quarter', operation.price, operation.amount)
self._handle_result(operation, order)
@inlineCallbacks
def stop_long(self, operation):
order = yield self.do_stop_long(operation.user, 'btc_usd', 'quarter', operation.price, operation.amount)
self._handle_result(operation, order)
@inlineCallbacks
def short(self, operation):
order = yield self.do_short(operation.user, 'btc_usd', 'quarter', operation.price, operation.amount)
self._handle_result(operation, order)
@inlineCallbacks
def stop_short(self, operation):
order = yield self.do_stop_short(operation.user, 'btc_usd', 'quarter', operation.price, operation.amount)
self._handle_result(operation, order)
@inlineCallbacks
def check_deal(self):
for operation in self.dealing_operations:
if operation.dealed:
for op in operation.callback_ops:
op.bind_user(operation.user)
self.add_operation(op)
next_op = operation.next_op
while next_op != operation and next_op is not None:
next_op.cancel()
next_op = next_op.next_op
self.dealing_operations.remove(operation)
else:
yield self.get_order_info(user=operation.user, symbol='btc_usd', contract_type='quarter', order_id=operation.order.order_id)
def __str__(self):
return self.__name__
if __name__ == '__main__':
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from ecoin_trade.main import User
from ecoin_trade.commands import MaxLongCommand
@inlineCallbacks
def test_long():
coin = OKEXBTCFutureSeasonCoin()
op = MaxLongCommand.decode(['maxlong', '4360', '1', '4370', '4350'])
op.bind_user(User('fdbd4121-9f49-4bf3-89e1-acdd1a4f2bf4', 'B7C77280354B9BB02C1668DDC33FC3EE'))
result = yield coin.long(op)
reactor.callLater(0, test_long)
reactor.run() |
#! /usr/bin/env python2
"""
This will simulate doing a "tee": having stdout directly to the console as well as saving it in a list for later processing.
"""
import os
import sys
import time
import select
import logging
import threading
import subprocess
class CopyThread(threading.Thread):
def __init__(self, real_stdout, pipe):
self.buf = ''
self.real_stdout = real_stdout
self.pipe = pipe
self.stopped = False
super(CopyThread, self).__init__()
def run(self):
while not self.stopped:
(read_ready, write_ready, xlist_ready) = select.select([self.pipe], [], [], 0.1)
if read_ready:
line = self.pipe.readline()
if line:
self.real_stdout.write(line)
self.buf += line
else:
break
def stop(self):
self.stopped = True
def see(expr):
value = eval(expr)
print '{expr}: {value!r}'.format(**locals())
original_stdout_fd = os.dup(sys.stdout.fileno())
original_stdout = os.fdopen(original_stdout_fd, 'w', 0)
sys.stdout.close()
(read_fd, write_fd) = os.pipe()
os.dup2(write_fd, 1)
sys.stdout = os.fdopen(1, 'w', 0)
thread = CopyThread(original_stdout, os.fdopen(read_fd, 'r', 0))
thread.start()
print 'This is a test'
subprocess.Popen(['date']).wait()
time.sleep(5)
thread.stop()
while thread.is_alive():
time.sleep(.1)
sys.stdout = original_stdout
see('thread.buf')
exit(0)
|
#!/usr/bin/python
#
# Program To Demonstrate Inheritance
# furnishings.py
#
# Created by: Jason M Wolosonovich
# 6/04/2015
#
# Lesson 7 - Project Attempt 1
"""
furnishings.py: Demonstrates inheritance
@author: Jason M. Wolosonovich
"""
import sys
class Furnishing(object):
def __init__(self, room):
self.room = room
class Sofa(Furnishing):
pass
class Bookshelf(Furnishing):
pass
class Bed(Furnishing):
pass
class Table(Furnishing):
pass
def counter(home):
"""
Function to count the number of each of the items; returns a tuple
of form (item, count)
"""
if not isinstance(home, list):
raise TypeError("argument 'home' should be of type <list>")
# dict for plurals
plural_lookup = {'Furnishing': 'Furnishings',
'Sofa': 'Sofas',
'Bookshelf': 'Bookshelves',
'Bed': 'Beds',
'Table': 'Tables'}
# list of items
item_list = [item.__class__.__name__ for item in home]
# set of unique items
item_set = set(item_list)
item_tuples = []
# print them out
for item in item_set:
item_tuples.append((item,
item_list.count(item)))
print("{0}: {1}".format(plural_lookup[item],
item_list.count(item)))
return item_tuples
def map_the_home(home):
"""
Function to map the items in each room; returns a dict where each
room is a key and the values are the items in each room
"""
if not isinstance(home, list):
raise TypeError("argument 'home' should be of type <list>")
# dict to store home map
home_map = {}
# populate home map
for item in home:
# set default values to empty lists, if key is found (item.room)
# then append value (item) to the list, otherwise create
# key, value pair
home_map.setdefault(item.room, []).append(item)
# print the home map
print(home_map)
return home_map
if __name__=="__main__":
home = []
home.append(Bed('Bedroom'))
home.append(Sofa('Living Room'))
home.append(Table('Living Room'))
home.append(Table('Kitchen'))
home.append(Bookshelf('Living Room'))
# map the home
home_map = map_the_home(home)
# get item counts
item_counts = counter(home)
#goodbye
sys.exit() |
# %%
import difflib
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
def merge_fixtures(fpl_path, understat_path, data_path):
"""[Merges team and fixtures onto fpl. Slightly processes data.]
Args:
fpl_path ([type]): [description]
understat_path ([type]): [description]
data_path ([type]): [description]
Returns:
[type]: [description]
"""
understat = pd.read_csv(understat_path + 'all_understat_players.csv')
understat['date']=pd.to_datetime(understat['date']).dt.date
fixtures = pd.read_csv(data_path + 'fixtures.csv')
fixtures = fixtures[['id', 'team_a', 'team_a_difficulty', 'team_h', 'team_h_difficulty']]
fixtures.rename(columns={'id':'fixture'}, inplace=True)
teams = pd.read_csv(data_path + 'teams.csv')
teams = teams[['id', 'name', 'short_name', 'strength', 'strength_attack_away', 'strength_attack_home', 'strength_defence_away', 'strength_defence_home', 'strength_overall_away', 'strength_overall_home']]
fpl = pd.read_csv(fpl_path + 'merged_gw.csv')
fpl.rename(columns = {'name':'player_name'},inplace = True)
fpl['kickoff_time'] = pd.to_datetime(fpl['kickoff_time']).dt.date
fpl = pd.merge(fpl, fixtures, on='fixture')
fpl = pd.merge(fpl, teams, left_on='team', right_on='name')
fpl = rename_teams(teams, fpl)
return understat, fpl
def rename_teams(teams, fpl, cols = ['team', 'opponent_team', 'team_a', 'team_h']):
"""[This function replaces the integer value used to uniquely identify teams
with the FPL's associated string; 1 -> Manchester United, 2 -> Manchester City]
Args:
teams ([type]): [description]
fpl ([type]): [description]
cols (list, optional): [description]. Defaults to ['team','opponent_team', 'team_a', 'team_h'].
Returns:
[type]: [A converted dataframe]
"""
fpl = fpl.copy()
for col in cols:
for i in np.arange(start = 0, stop = len(teams)):
id = teams['id'][i]
team_name = teams['name'][i]
fpl[col].loc[fpl[col] == id] = team_name
return fpl
def intersect(a, b):
"""[This function finds the intersection between two player name columns]
Args:
a ([type]): [description]
b ([type]): [description]
Returns:
[type]: [The intersection]
"""
# print(len(list(set(a) & set(b))), 'unique and matching names between FPL and Understat')
return list(set(a) & set(b))
def rename_fpl_teams(fpl, features = ['team', 'team_a', 'team_h']):
"""[This function replaces the acronyms used to indicate teams by the FPL API with the teams full names, as seen in the understat data]
Args:
fpl ([type]): [description]
Returns:
[type]: [A renamed dataframe]
NOTE:
New teams from different seasons need to be added here
"""
team_reps = {
'Man City':'Manchester City',
'Man Utd': 'Manchester United',
'West Brom':'West Bromwich Albion',
'Spurs':'Tottenham',
'Sheffield Utd':'Sheffield United',
'West Ham':'West Ham',
'Wolves':'Wolverhampton Wanderers',
'Brighton':'Brighton',
'Chelsea':'Chelsea',
'Newcastle':'Newcastle',
'Everton':'Everton',
'Fulham':'Fulham',
'Arsenal':'Arsenal',
'Leeds':'Leeds',
'Liverpool':'Liverpool',
'Leicester':'Leicester',
'Southampton':'Southampton',
'Crystal Palace':'Crystal Palace',
'Aston Villa':'Aston Villa',
'Burnley':'Burnley',
'Watford':'Watford',
'Bournemouth':'Bournemouth',
'Norwich':'Norwich'
}
for feature in features:
fpl[feature] = fpl[feature].map(team_reps)
return fpl
def get_matching_names(understat_names, fpl_names):
"""[This function checks for similarity between understat and fpl names, and returns a dataframe with all the unique understat names with the most similarly
matching FPL name.]
Args:
understat_names ([type]): [description]
fpl_names ([type]): [description]
threshold ([type]): [description]
Returns:
[type]: [description]
"""
understat_names, fpl_names = understat_names['player_name'].unique(), fpl_names['player_name'].unique()
seq = difflib.SequenceMatcher()
understat_similar = []
fpl_similar = []
ratio = []
for i in range(len(understat_names)):
for j in range(len(fpl_names)):
seq.set_seqs(understat_names[i].lower(), fpl_names[j].lower())
ratio_similar = seq.ratio()
understat_similar.append(understat_names[i])
fpl_similar.append(fpl_names[j])
ratio.append(ratio_similar)
similarity_matched_df = pd.DataFrame({'understat':understat_similar, 'fpl':fpl_similar, 'similarity': ratio}).copy()
similarity_matched_df_final = similarity_matched_df.loc[similarity_matched_df.groupby('understat')['similarity'].idxmax()].copy()
# print(similarity_matched_df_final.sort_values('similarity',ascending=False).to_latex())
return similarity_matched_df_final
def exact_matches(understat, fpl, join = 'inner'):
"""[This function performs the first initial match, that is the entries whos names and dates match exactly between the
understat and fpl datasets]
Args:
understat ([type]): [description]
fpl ([type]): [description]
Returns:
[type]: [The merged data, and the sets of data used to construct it]
"""
matching_names = intersect(understat['player_name'].unique(), fpl['player_name'].unique())
fpl_matched = fpl[fpl['player_name'].isin(matching_names)]
understat_matched = understat[understat['player_name'].isin(matching_names)]
exact_merge = pd.merge(fpl_matched, understat_matched, left_on=['player_name', 'kickoff_time'], right_on=['player_name', 'date'], how= join)
return exact_merge
def remove_matched_names(fpl, understat, exact_merge):
"""[This function checks which names were matched in the first name/date match performed, removes them and returns the
entries who were not matched]
Args:
fpl ([type]): [description]
understat ([type]): [description]
exact_merge ([type]): [description]
Returns:
[type]: [description]
"""
fpl_not_matched = fpl[~fpl['player_name'].isin(exact_merge['player_name'].unique())]
understat_not_matched = understat[~understat['player_name'].isin(exact_merge['player_name'].unique())]
return fpl_not_matched, understat_not_matched
def map_similar_names(similarity_matched_df, understat_not_matched, fpl_not_matched, season):
"""[This function performs the second match, by usin the similarity dataframe returned from get_matching_names. It is manually encoded to identify wrongly mapped names
from each season which are removed from the dataset. The remaining understat names are used to rename the understat datasets player names
and acronyms to the most logical and similar matching names]
Args:
similarity_matched_df ([type]): [description]
understat_not_matched ([type]): [description]
fpl_not_matched ([type]): [description]
NOTE:
New names from different seasons need to be added here
"""
if season == '2020-21' or '2021-22':
wrongly_matched_names = ['Adrián', 'Alisson', 'Allan', 'André Gomes', 'Bernard', 'Bernardo', 'Bernardo Silva', 'David Luiz', 'Ederson', 'Emerson',
'Fabinho', 'Felipe Anderson', 'Fred', 'Hélder Costa', 'Joelinton', 'Jonny', 'Jorginho', 'Kepa', 'Lucas Moura', 'Raphinha',
'Ricardo Pereira', 'Rodri', 'Rúben Dias','Rúben Vinagre', 'Semi Ajayi', 'Trézéguet', 'Wesley', 'Willian']
if season == '2019-20':
wrongly_matched_names = ['Adrián','Alisson','André Gomes','Angelino', 'Bernard', 'Bernardo', 'Bernardo Silva','Borja Bastón',
'Chicharito','David Luiz','Ederson', 'Emerson', 'Fabinho', 'Felipe Anderson', 'Fred','Joelinton', 'Jonny',
'Jorginho','Jota', 'Kepa','Kiko Femenía','Pedro', 'Ricardo Pereira', 'Rodri','Rúben Vinagre','Trézéguet','Wesley','Willian']
similar_rename = similarity_matched_df[~similarity_matched_df['understat'].isin(wrongly_matched_names)] # Subset Similar: Similar match
# no_similar_rename = similarity_matched_df[similarity_matched_df['understat'].isin(wrongly_matched_names)] # Subset Similar: Similar match
# print(similar_rename.to_latex())
# print(no_similar_rename.to_latex())
understat_no_similar = understat_not_matched[understat_not_matched['player_name'].isin(wrongly_matched_names)] # Subset Understat: No similar match
understat_similar = understat_not_matched[~understat_not_matched['player_name'].isin(wrongly_matched_names)] # Subset Understat: Similar match
fpl_similar = fpl_not_matched[fpl_not_matched['player_name'].isin(similar_rename['fpl'].unique())] # Subset FPL: Similar match
fpl_no_similar = fpl_not_matched[~fpl_not_matched['player_name'].isin(similar_rename['fpl'].unique())] # Subset FPL: No similar match
name_mapper = dict(zip(similar_rename['understat'], similar_rename['fpl']))
understat_similar['player_name'] = understat_similar['player_name'].map(name_mapper) # Renames similarly matched names
return understat_no_similar, understat_similar, fpl_similar, fpl_no_similar
def final_rename(understat_no_similar, fpl_no_similar, join = 'inner'):
"""[This function performs the third and final manual matching. It manually investigates those names that had no similar name, searches for the player name
or nickname in the understat data, checks the team, Googles the player's true name, and finds the corresponding FPL name. The function then renames all those
understat entries to the associated FPL name]
Args:
understat_no_similar ([type]): [description]
fpl_no_similar ([type]): [description]
Returns:
[type]: [description]
NOTE:
New names from different seasons need to be added here
"""
name_mapper = {'Adrián':'Adrián Bernabé', # Contains both seasons corrections
'Alisson':'Alisson Ramses Becker',
'Allan':'Allan Marques Loureiro',
'André Gomes':'André Filipe Tavares Gomes',
'Angelino':'José Ángel Esmorís Tasende',
'Bernard':'Bernard Anício Caldeira Duarte', # Everton
'Bernardo Silva':'Bernardo Mota Veiga de Carvalho e Silva', # Manchester City
'Bernardo':'Bernardo Fernandes da Silva Junior', #
'Borja Bastón':'Borja González Tomás',
'Chicharito':'Javier Hernández Balcázar',
'David Luiz':'David Luiz Moreira Marinho',
'Ederson':'Ederson Santana de Moraes',
'Emerson':'Emerson Palmieri dos Santos',
'Fabinho':'Fabio Henrique Tavares',
'Felipe Anderson':'Felipe Anderson Pereira Gomes',
'Fred':'Frederico Rodrigues de Paula Santos', # Manchester United
'Hélder Costa': 'Hélder Wander Sousa de Azevedo e Costa', # Leeds
'Joelinton':'Joelinton Cássio Apolinário de Lira', # Chelsea
'Jonny':'Jonathan Castro Otto', # Wolves
'Jorginho':'Jorge Luiz Frello Filho', # Chelsea
'Jota':'José Ignacio Peleteiro Romallo',
'Kepa':'Kepa Arrizabalaga',
'Kiko Femenía':'Francisco Femenía Far',
'Lucas Moura':'Lucas Rodrigues Moura da Silva',
'Pedro': 'Pedro Rodríguez Ledesma', # Chelsea
'Raphinha':'Raphael Dias Belloli',
'Ricardo Pereira':'Ricardo Domingos Barbosa Pereira',
'Rodri':'Rodrigo Hernandez',
'Rúben Dias':'Rúben Santos Gato Alves Dias',
'Rúben Vinagre':'Rúben Gonçalo Silva Nascimento Vinagre',
'Semi Ajayi':'Oluwasemilogo Adesewo Ibidapo Ajayi',
'Trézéguet':'Mahmoud Ahmed Ibrahim Hassan', # Aston Villa
'Wesley':'Wesley Moraes',
'Willian':'Willian Borges Da Silva',
}
understat_no_similar['player_name'] = understat_no_similar['player_name'].map(name_mapper)
manual_merge = pd.merge(fpl_no_similar, understat_no_similar, left_on=['player_name', 'kickoff_time'],
right_on=['player_name', 'date'], how=join) # Merge using player name and date of game
return manual_merge
def final_merge_understat(exact_merge, similar_merge, manual_merge, understat):
"""[This function merges the three matches performed]
Args:
exact_merge ([type]): [description]
similar_merge ([type]): [description]
manual_merge ([type]): [description]
Returns:
[type]: [description]
"""
understat_final = pd.concat([exact_merge, similar_merge, manual_merge])
# print('Exact: ', exact_merge.shape[0], ' instances merged corresponding to: ', exact_merge['player_name'].unique().__len__(), ' unique players')
# print('Similar: ', similar_merge.shape[0], ' instances merged corresponding to: ', similar_merge['player_name'].unique().__len__(), ' unique players')
# print('Manually: ', manual_merge.shape[0], ' instances merged corresponding to: ', manual_merge['player_name'].unique().__len__(), ' unique players')
# print('Lost: ', understat.shape[0] - understat_final.shape[0], ' instances corresponding to: ', understat['player_name'].unique().__len__()
# - understat_final['player_name'].unique().__len__(), ' unique players', end='\n\n')
# print(list(set(understat_final['player_name'].unique()) - set(understat['player_name'].unique())))
# print(understat_final['player_name'].unique().isin(understat['player_name'].unique()))
return understat_final
def join_data(fpl, understat, season):
exact_merge = exact_matches(understat, fpl) # Data merged on player name and match date
fpl_not_matched, understat_not_matched = remove_matched_names(fpl, understat, exact_merge) # Those names that did not match previously
similarity_matched_df = get_matching_names(understat_not_matched, fpl_not_matched)
understat_no_similar, understat_similar, fpl_similar, fpl_no_similar = map_similar_names(similarity_matched_df, understat_not_matched, fpl_not_matched, season) # Note: Manual investigation
# print(understat_no_similar, understat_similar)
similar_merge = pd.merge(fpl_similar, understat_similar, left_on=['player_name', 'kickoff_time'], right_on=['player_name', 'date'])
no_similar_matches_df = get_matching_names(understat_no_similar, fpl_no_similar) # Note: Manual investigation
manual_merge = final_rename(understat_no_similar, fpl_no_similar)
understat_final = final_merge_understat(exact_merge, similar_merge, manual_merge, understat)
return fpl, understat_final
def main(season):
fpl_path = f'C://Users//jd-vz//Desktop//Code//data//{season}//gws//'
understat_path = f'C://Users//jd-vz//Desktop//Code//data//{season}//understat//'
data_path = f'C://Users//jd-vz//Desktop//Code//data//{season}//'
training_path = f'C://Users//jd-vz//Desktop//Code//data//{season}//training//'
understat, fpl = merge_fixtures(fpl_path, understat_path, data_path)
fpl, understat = join_data(fpl, understat, season)
fpl.to_csv(training_path + 'fpl.csv', index = False)
understat.to_csv(training_path + 'understat_merged.csv', index = False)
if __name__ == "__main__":
main(season='2021-22') # Successful execution
# main(season='2020-21') # Successful execution
# main(season='2019-20') # Successful execution
print('Success!')
# %%
|
from eth_utils import ValidationError
import pytest
from eth2.beacon.constants import FAR_FUTURE_EPOCH, GENESIS_EPOCH
from eth2.beacon.helpers import compute_start_slot_at_epoch
from eth2.beacon.state_machines.forks.serenity.block_validation import (
_validate_eligible_exit_epoch,
_validate_validator_has_not_exited,
_validate_validator_minimum_lifespan,
_validate_voluntary_exit_signature,
validate_voluntary_exit,
)
from eth2.beacon.tools.builder.validator import create_mock_voluntary_exit
@pytest.mark.parametrize(
(
"validator_count",
"slots_per_epoch",
"target_committee_size",
"shard_committee_period",
),
[(40, 2, 2, 16)],
)
def test_validate_voluntary_exit(
genesis_state, keymap, slots_per_epoch, shard_committee_period, config
):
state = genesis_state.set(
"slot",
compute_start_slot_at_epoch(
GENESIS_EPOCH + shard_committee_period, slots_per_epoch
),
)
validator_index = 0
valid_voluntary_exit = create_mock_voluntary_exit(
state, config, keymap, validator_index
)
validate_voluntary_exit(
state, valid_voluntary_exit, slots_per_epoch, shard_committee_period
)
@pytest.mark.parametrize(
("validator_count", "slots_per_epoch", "target_committee_size"), [(40, 2, 2)]
)
@pytest.mark.parametrize(
("validator_exit_epoch", "success"),
[(FAR_FUTURE_EPOCH, True), (FAR_FUTURE_EPOCH - 1, False)],
)
def test_validate_validator_has_not_exited(
genesis_state, validator_exit_epoch, success
):
state = genesis_state
validator_index = 0
validator = state.validators[validator_index].set(
"exit_epoch", validator_exit_epoch
)
if success:
_validate_validator_has_not_exited(validator)
else:
with pytest.raises(ValidationError):
_validate_validator_has_not_exited(validator)
@pytest.mark.parametrize(
("validator_count", "slots_per_epoch", "target_committee_size"), [(40, 2, 2)]
)
@pytest.mark.parametrize(
("max_seed_lookahead", "current_epoch", "voluntary_exit_epoch", "success"),
[(4, 8, 8, True), (4, 8, 8 + 1, False)],
)
def test_validate_eligible_exit_epoch(
genesis_state,
keymap,
current_epoch,
voluntary_exit_epoch,
slots_per_epoch,
config,
success,
):
state = genesis_state.set(
"slot", compute_start_slot_at_epoch(current_epoch, slots_per_epoch)
)
validator_index = 0
signed_voluntary_exit = create_mock_voluntary_exit(
state, config, keymap, validator_index, exit_epoch=voluntary_exit_epoch
)
voluntary_exit = signed_voluntary_exit.message
if success:
_validate_eligible_exit_epoch(
voluntary_exit.epoch, state.current_epoch(slots_per_epoch)
)
else:
with pytest.raises(ValidationError):
_validate_eligible_exit_epoch(
voluntary_exit.epoch, state.current_epoch(slots_per_epoch)
)
@pytest.mark.parametrize(
("current_epoch", "shard_committee_period", "activation_epoch", "success"),
[(16, 4, 16 - 4, True), (16, 4, 16 - 4 + 1, False)],
)
def test_validate_validator_minimum_lifespan(
genesis_state,
keymap,
current_epoch,
activation_epoch,
slots_per_epoch,
shard_committee_period,
success,
):
state = genesis_state.set(
"slot", compute_start_slot_at_epoch(current_epoch, slots_per_epoch)
)
validator_index = 0
validator = state.validators[validator_index].set(
"activation_epoch", activation_epoch
)
state = state.transform(["validators", validator_index], validator)
if success:
_validate_validator_minimum_lifespan(
validator, state.current_epoch(slots_per_epoch), shard_committee_period
)
else:
with pytest.raises(ValidationError):
_validate_validator_minimum_lifespan(
validator, state.current_epoch(slots_per_epoch), shard_committee_period
)
@pytest.mark.parametrize(
(
"validator_count",
"slots_per_epoch",
"target_committee_size",
"max_seed_lookahead",
),
[(40, 2, 2, 2)],
)
@pytest.mark.parametrize(("success",), [(True,), (False,)])
def test_validate_voluntary_exit_signature(genesis_state, keymap, config, success):
slots_per_epoch = config.SLOTS_PER_EPOCH
state = genesis_state
validator_index = 0
voluntary_exit = create_mock_voluntary_exit(state, config, keymap, validator_index)
validator = state.validators[validator_index]
if success:
_validate_voluntary_exit_signature(
state, voluntary_exit, validator, slots_per_epoch
)
else:
# Use wrong signature
voluntary_exit = voluntary_exit.set(
"signature", b"\x12" * 96
) # wrong signature
with pytest.raises(ValidationError):
_validate_voluntary_exit_signature(
state, voluntary_exit, validator, slots_per_epoch
)
|
# Generated by Django 3.1.5 on 2021-01-21 14:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20210121_1726'),
]
operations = [
migrations.CreateModel(
name='ShopUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('balance', models.FloatField(default=0.0)),
],
),
migrations.DeleteModel(
name='UserBalance',
),
migrations.AlterField(
model_name='cart',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.shopuser'),
),
]
|
#tkinter implementaion
from tkinter import *
from tkinter import messagebox
import pyqrcode
ws = Tk()
ws.title("PythonGuides")
ws.config(bg='#F25252')
def generate_QR():
if len(user_input.get())!=0 :
global qr,img
qr = pyqrcode.create(user_input.get())
img = BitmapImage(data = qr.xbm(scale=8))
else:
messagebox.showwarning('warning', 'All Fields are Required!')
try:
display_code()
except:
pass
def display_code():
img_lbl.config(image = img)
output.config(text="QR code of " + user_input.get())
|
# encoding: utf-8
#!/usr/bin/python
import bluetooth
import os
import time
from threading import Thread # @UnusedWildImport
os.system("echo "+str(os.getpid())+">>.tmp")
address_list = ["00:14:35:00:17:DC", "11:11:11:11:11:11"]
target_address = None
sendComanndNow = False
port = 3
def sendComannd(command):
"""Método que faz a modificação do comando
:Param command: O novo comando
:Type command: String
"""
try:
nearby_devices = bluetooth.discover_devices()
if (len(nearby_devices) > 0):
for bdaddr in nearby_devices:
if bdaddr in address_list:
print "ip encontrado"
target_address = bdaddr
break
if target_address is not None:
print "Dispositivo encontrado com o endereço ", target_address
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM )
sock.connect((target_address, port))
print("conexao aceita")
sock.send(command)
sock.close()
return True
else:
print "Nenhum dispositivo encontrado."
except Exception as e:
print "except",e
return False
return False
def bluetoothHasConnected():
"""Método que verifica se o bluetooth está conectado na máquina
:Return: Booleano correspondente a verificação
:Rtype: Boolean
"""
return os.popen("hcitool dev").read().replace("Devices:\n","").strip() != ""
def sendSignalBluetooth(nameRoom):
"""Método que faz o envio do sinal bluetooth
:Param nameRoom: Nome do cômodo
:Type nameRoom: String
"""
sendSignal = Thread(target = sendComannd, args = (nameRoom, ))
sendSignal.start()
|
# https://leetcode.com/problems/minimum-swaps-to-make-strings-equal/
class Solution:
def minimumSwap(self, s1: str, s2: str) -> int:
if len(s1) != len(s2):
return -1
s1_x = s1.count('x')
s2_x = s2.count('x')
if (s1_x + s2_x) % 2 == 1:
return -1
x = 0
y = 0
n = len(s1)
for i in range(n):
ss1 = s1[i]
if ss1 != s2[i]:
if ss1 == 'x':
x += 1
else:
y += 1
xdiv, xmod = divmod(x, 2)
ydiv, ymod = divmod(y, 2)
if xmod == 1 and ymod == 1:
return xdiv + ydiv + 2
else:
return xdiv + ydiv
def main():
sol = Solution()
print(sol.minimumSwap('xxyyxyxyxx', 'xyyxyxxxyx') == 4)
print(sol.minimumSwap('xx', 'xy') == -1)
print(sol.minimumSwap('xy', 'yx') == 2)
print(sol.minimumSwap('xx', 'yy') == 1)
if __name__ == '__main__':
main()
|
import os
import sys
import cPickle
from string import Template
#
# read Tina's b efficiency variations for msugra on heplx* and assemble
# them in one pickle file
#
#
# get effect of scale factor (defined as SF1 / SF0)
# if necessary : sum over b-tag bins
#
def getFactor (dic,signal,btags):
norm_sf0 = 0.
norm_sf1 = 0.
for lep in dic:
for btag in btags:
norm_sf0 += dic[lep][signal]['Norm_sf0'][btag][0]
norm_sf1 += dic[lep][signal]['Norm_sf1'][btag][0]
if norm_sf0 < 0.000001: return None
return norm_sf1/norm_sf0
#
# get variation on SF1 (up-down)/(2*nominal)
# if necessary : sum over b-tag bins
#
def getVar (dic,signal,btags,mode):
norm_sf1 = 0.
up = 0.
down = 0.
strup = 'Up_'+mode+'_sf1'
strdown = 'Down_'+mode+'_sf1'
s1 = 0.
for lep in dic:
for btag in btags:
norm_sf1 += dic[lep][signal]['Norm_sf1'][btag][0]
up += dic[lep][signal][strup][btag][0]
down += dic[lep][signal][strdown][btag][0]
if mode == 'b':
s1 += dic[lep][signal]['Norm_sf1'][btag][0]* \
dic[lep][signal]['BDelta/(2Norm_sf1)'][btags[0]][0]
else:
s1 += dic[lep][signal]['Norm_sf1'][btag][0]* \
dic[lep][signal]['LDelta/(2Norm_sf1)'][btags[0]][0]
var = (up-down)/2./norm_sf1
if len(btags) == 1:
if abs(var-s1/norm_sf1) > 0.0001:
print "Mismatch in variations: ",btags[0],mode,var,s1/norm_sf1
return None
return var
#
# Template for directory / file name
#
template = Template('/data/trauner/Counts/Counts_MSUGRA_${lepton}_BGEff_newMCEff_Eff24Feb/${lepton}_MSUGRA_${ht}_ht_${met}_barepfmet_allJets_withEffgt500_eff24Feb_absoluteErr_BGEff.py')
#
# definition of flavours, HT and MET regions
#
leptons = [ 'Muon', 'Electron' ]
hts = [ 750, 1000 ]
mets = [ 250, 350, 450, 550 ]
#
# translate btag bin labels
#
btagLabels = { 'b0' : [ '0' ], 'b1' : [ '1' ], 'b1p' : [ '1' , '>=2' ], 'b2' : [ '>=2' ] }
#
# create dictionary and loop over flavours, HT and MET cuts
#
effDict = {}
for ht in hts:
# if not ht in effDict: effDict[ht] = {}
for met in mets:
# if not met in effDict[ht]: effDict[ht][met] = {}
inDict = { 'Muon' : {}, 'Electron' : {} }
for lepton in inDict:
ifname = template.substitute(lepton=lepton,ht=str(ht),met=str(met))
if not os.path.exists(ifname):
print "No such file ",ifname
print ifname
# execute input file
execfile(ifname)
labels = nbtags.keys()
assert(len(labels)==1)
inDict[lepton] = nbtags[labels[0]]
# unused label
for label in nbtags:
# msugra points
for signal in nbtags[label]:
# translate to standard string
msugraString = signal.replace("signal_","msugra_")
msugraString += "_10_0_1"
# if not msugraString in effDict[ht][met]: effDict[ht][met][msugraString] = {}
# btags = nbtags[label][signal]['originalMC'].keys()
# print lep,ht,met,msugraString
# btag bins (output / input notation)
for btagOut, btagsIn in btagLabels.iteritems():
# scaling after application of SF
sfFactor = getFactor(inDict,signal,btagsIn)
# skip empty bins
if sfFactor == None: continue
if not btagOut in effDict: effDict[btagOut] = {}
if not ht in effDict[btagOut]: effDict[btagOut][ht] = {}
if not met in effDict[btagOut][ht]: effDict[btagOut][ht][met] = {}
if not msugraString in effDict[btagOut][ht][met]:
effDict[btagOut][ht][met][msugraString] = {}
# add correction factor and variations to dictionary
# if not btagOut in effDict[ht][met][msugraString]:
# effDict[btagOut][ht][met][msugraString] = {}
effDict[btagOut][ht][met][msugraString]['sfFactor'] = sfFactor
effDict[btagOut][ht][met][msugraString]['relVarB'] = getVar(inDict,signal,btagsIn,'b')
effDict[btagOut][ht][met][msugraString]['relVarL'] = getVar(inDict,signal,btagsIn,'l')
# print effDict
# sys.exit(0)
#
# write dictionary
#
fout = open("msugraBeffSyst.pkl","wb")
cPickle.dump(effDict,fout)
fout.close()
|
from matplotlib import pyplot as plt
# 支持中文
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
x_values = [x for x in range(-100, 100)]
y_values = [x ** 2 for x in x_values]
# 设置style: plt.style.available
plt.style.use('ggplot')
fig, ax = plt.subplots()
ax.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Reds, s=10)
# 设置图表标题
ax.set_title("平方数", fontsize=18)
ax.set_xlabel("值", fontsize=12)
ax.set_ylabel("值的平方", fontsize=12)
# 设置刻度标记的的大小
ax.tick_params(axis='both', labelsize=12)
# 保存图像
plt.savefig("scatter.png", bbox_inches='tight')
# 绘制图像
plt.show()
|
"""pandemic URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path, include
from . import views
from django.contrib.auth import views as auth_views
from rest_framework_nested import routers
from accounts import views as acc_views
from soap import urls as soap_urls
from game import urls as game_urls
from chat import views as chat_views
from game import views as game_views
from rest_framework.documentation import include_docs_urls
api = routers.SimpleRouter()
api.register(r'chat', chat_views.ChatViewSet, base_name='chat')
api.register(r'cities', game_views.CityViewSet, base_name='city')
api.register(r'pawn', game_views.PawnViewSet, base_name='pawn')
api.register(r'card', game_views.CardViewSet, base_name='card')
chat_router = routers.NestedSimpleRouter(api, r'chat', lookup='chat')
chat_router.register(r'messages', chat_views.MessageViewSet, base_name='chat-message')
api.register(r'session', game_views.SessionViewSet, base_name='session')
session_router = routers.NestedSimpleRouter(api, r'session', lookup='session_hash')
session_router.register(r'session_state', game_views.SessionStateViewSet, base_name='session-state')
session_router.register(r'users', game_views.UserViewSet, base_name='session-users')
session_router.register(r'owner', game_views.OwnerViewSet, base_name='session-owner')
session_router.register(r'player_state', game_views.PlayerStateViewSet, base_name='player-state')
session_router.register(r'city_state', game_views.CityStateViewSet, base_name='city-state')
session_router.register(r'card_state', game_views.CardStateViewSet, base_name='card-state')
session_router.register(r'cure_state', game_views.CureStateViewSet, base_name='cure-state')
session_router.register(r'disease_state', game_views.DiseaseStateViewSet, base_name='disease-state')
api.register('districts', acc_views.DistrictViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
# USER RELATED
path('login/', auth_views.LoginView.as_view(redirect_authenticated_user=True), name='login'),
path('logout/', auth_views.LogoutView.as_view(next_page="/"), name='logout'),
path('reset-password/', auth_views.PasswordResetView.as_view(), name='password_reset'),
path('reset-sent/', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'),
path('reset-password-confirm/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('password-reset-complete/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
path('register/', acc_views.register, name='register'),
path('activate/<uidb64>/<token>/', acc_views.registration_activate, name='registration_activate'),
path('user/<username>/', acc_views.profile, name='profile'),
path('edit-profile/', acc_views.edit_profile, name='edit_profile'),
path('change-password/', acc_views.change_password, name='change_password'),
# END OF USER RELATED
path('how-to/', views.howto, name="howtoplay"),
path('about-us/', views.aboutus, name="aboutus"),
# REST API
path('api/', include(api.urls)),
path('api/', include(chat_router.urls)),
path('api/', include(session_router.urls)),
path('api-docs/', include_docs_urls(title="Pandemic REST API")),
# GAME
path("game/", include(game_urls)),
# SOAP
path("soap/", include(soap_urls)),
# Root
path('', views.frontpage, name='frontpage'),
]
|
from __future__ import absolute_import
from .classification import ResNet20V2ASKC, ResNet50_v1bASKC, ResNextASKC, ResNet110V2ASKC, CIFARResNextASKC
|
from _20180425 import *
def output_20180425():
# except_sentence.except_output()
# about_dim.dim_output()
# sort_alg.sort_output()
# q_fibonacci.q_fibonacci_output()
# about_set.set_output()
# dictionary.dictionary_output()
tuple.tuple_output() |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for model."""
import lingvo.compat as tf
from lingvo.core import py_utils
def ComputeSplits(batch_size, num_splits):
"""Creates a tensor of size num_splits of number of values per split.
Assigns each split floor(batch_size/num_splits) and round-robins
the remainder (if any) to each split.
Example::
batch_size: [5]
num_splits: 3
returns: [2, 2, 1]
Args:
batch_size: tensor of rank 0, size of tensor to be split
num_splits: number of splits to split tensor into
Returns:
tensor of length num_splits containing sizes of each split
"""
values = tf.tile(
tf.div([batch_size], num_splits),
tf.constant(
[num_splits], dtype=tf.int32))
mods = tf.tile(tf.constant([1]), tf.math.floormod([batch_size], num_splits))
zeros = tf.tile(tf.constant([0]),
tf.subtract(tf.shape(values), tf.shape(mods)))
mods = tf.concat([mods, zeros], 0)
ret = tf.add(values, mods)
# for some reason TF erases shape information if num_splits is 1
if num_splits == 1:
ret.set_shape([1])
return ret
def SplitTensors(xs, num_splits):
"""Splits tensors in `xs` evenly into num_splits along the 1st dimenion.
Args:
xs: A tuple of tensors. Each tensor's 1st dimension is the same size.
num_splits: A python integer.
Returns:
A tuple of lists of tensors, num elements in the tuple = len(xs).
i-th element in each list corresponds to i-th split of each tensor in xs
along the first dimension of each tensor.
"""
# assert first dim of all tensors in xs is equal
batch_dims = [tf.shape(x)[0] for x in xs]
all_batch_dims = tf.stack(batch_dims)
all_batch_dims = py_utils.with_dependencies([
py_utils.assert_equal(
all_batch_dims,
tf.shape(xs[0])[0],
message='first dim of tensors in xs must match'),
py_utils.assert_greater_equal(
tf.shape(xs[0])[0],
num_splits,
message='first dim of tensors in xs must be greater than num_splits')
], all_batch_dims)
splits = ComputeSplits(tf.shape(xs[0])[0], num_splits)
# add the above assertion into the compute graph
splits = py_utils.with_dependencies([all_batch_dims], splits)
split_xs = [tf.split(axis=0, num_or_size_splits=splits, value=x) for x in xs]
return split_xs
def SplitDictOfTensors(t_dict, num_splits):
"""Splits tensors in `t_dict` evenly into `num_splits` along the 1st dimenion.
Args:
t_dict: A dictionary of tensors. Each tensor's 1st dimension is the same
size.
num_splits: A python integer.
Returns:
A list of dictionaries of tensors, num elements in the list = num_splits
i-th dictionary in the list corresponds to i-th split of each tensor
along the first dimension of each tensor for each key in the original dict.
"""
keys = []
values = []
for k, v in sorted(t_dict.items()):
keys.append(k)
values.append(v)
splits = SplitTensors(tuple(values), num_splits)
assert all(len(lst) == len(splits[0]) for lst in splits)
ret_list = []
for s in range(num_splits):
d = {}
for k in range(len(splits)):
d[keys[k]] = splits[k][s]
ret_list.append(d)
return ret_list
|
# Software Name: MOON
# Version: 5.4
# SPDX-FileCopyrightText: Copyright (c) 2018-2020 Orange and its contributors
# SPDX-License-Identifier: Apache-2.0
# This software is distributed under the 'Apache License 2.0',
# the text of which is available at 'http://www.apache.org/licenses/LICENSE-2.0.txt'
# or see the "LICENSE" file for more details.
"""
Plugin to request OpenStack infrastructure:
- Nova
"""
from moon_manager.plugins.moon_openstack_plugin import *
LOGGER = logging.getLogger("moon.manager.plugins.moon_nova_plugin")
PLUGIN_TYPE = "information"
_ = str
# Nova exceptions
class NovaError(MoonError):
description = _("There is an error connecting to Nova.")
code = 400
title = 'Nova error'
logger = "ERROR"
class NovaProjectError(NovaError):
description = _("There is an error retrieving projects from the Nova service.")
code = 400
title = 'Nova project error'
logger = "ERROR"
class NovaUserError(NovaError):
description = _("There is an error retrieving users from the Nova service.")
code = 400
title = 'Nova user error'
logger = "ERROR"
class NovaUserConflict(NovaUserError):
description = _("A user with that name already exist.")
code = 400
title = 'Nova user error'
logger = "ERROR"
class NovaConnector(OpenStackConnector):
def get_items(self, item_id=None, **kwargs):
return self._get(endpoint="/servers", _exception=NovaProjectError)
def add_item(self, object_id=None, **kwargs):
raise NotImplementedError() # pragma: no cover
def update_item(self, item_id, **kwargs):
raise NotImplementedError() # pragma: no cover
def delete_item(self, item_id, **kwargs):
raise NotImplementedError() # pragma: no cover
class Connector(NovaConnector):
pass
|
"""Utilities for loading raw data"""
import struct
import numpy as np
from PIL import Image, ImageEnhance
# Specify the path to the ETL character database files
ETL_PATH = 'ETLC'
def read_record(database, f):
"""Load image from ETL binary
Args:
database (string): 'ETL8B2' or 'ETL1C'. Read the ETL documentation to add support
for other datasets.
f (opened file): binary file
Returns:
img_out (PIL image): image of the Japanese character
"""
W, H = 64, 63
if database == 'ETL8B2':
s = f.read(512)
r = struct.unpack('>2H4s504s', s)
i1 = Image.frombytes('1', (W, H), r[3], 'raw')
img_out = r + (i1,)
return img_out
elif database == 'ETL1C':
s = f.read(2052)
r = struct.unpack('>H2sH6BI4H4B4x2016s4x', s)
iF = Image.frombytes('F', (W, H), r[18], 'bit', 4)
iP = iF.convert('P')
iP.save('iP.png')
enhancer = ImageEnhance.Brightness(iP)
iE = enhancer.enhance(40)
size_add = 12
iE = iE.resize((W + size_add, H + size_add))
iE = iE.crop((size_add / 2,
size_add / 2,
W + size_add / 2,
H + size_add / 2))
img_out = r + (iE,)
return img_out
def get_ETL_data(dataset, categories, writers_per_char,
database='ETL8B2',
starting_writer=None,
vectorize=False,
resize=None,
img_format=False,
get_scripts=False,
):
"""Load Japanese characters into a list of PIL images or numpy arrays.
Args:
dataset (string): the dataset index for the corresponding database. This will be the
index that shows up in the name for the binary file.
categories (iterable): the characters to return
writers_per_char (int): the number of different writers to return, for each character.
database (str, optional): database name
starting_writer (int, optional): specify the index for a starting writer
vectorize (bool, optional): True will return as a flattened numpy array
resize (tuple, optional): (W,H) tuple to specify the output image dimensions
img_format (bool, optional): True will return as PIL image
get_scripts (bool, optional): True will also return a label for the type of Japanese script
Returns:
output (X, Y [, scriptTypes]): tuple containing the data, labels, and the script type if get_scripts=True
"""
W, H = 64, 64
new_img = Image.new('1', (W, H))
if database == 'ETL8B2':
name_base = ETL_PATH + '/ETL8B/ETL8B2C'
elif database == 'ETL1C':
name_base = ETL_PATH + '/ETL1/ETL1C_'
try:
filename = name_base + dataset
except:
filename = name_base + str(dataset)
X = []
Y = []
scriptTypes = []
try:
iter(categories)
except:
categories = [categories]
for id_category in categories:
with open(filename, 'rb') as f:
if database == 'ETL8B2':
f.seek((id_category * 160 + 1) * 512)
elif database == 'ETL1C':
f.seek((id_category * 1411 + 1) * 2052)
for i in range(writers_per_char):
try:
# skip records
if starting_writer:
for j in range(starting_writer):
read_record(database, f)
# start outputting records
r = read_record(database, f)
new_img.paste(r[-1], (0, 0))
iI = Image.eval(new_img, lambda x: not x)
# resize images
if resize:
# new_img.thumbnail(resize, Image.ANTIALIAS)
iI.thumbnail(resize)
shapes = resize[0], resize[1]
else:
shapes = W, H
# output formats
if img_format:
outData = iI
elif vectorize:
outData = np.asarray(iI.getdata()).reshape(
shapes[0] * shapes[1])
else:
outData = np.asarray(iI.getdata()).reshape(
shapes[0], shapes[1])
X.append(outData)
if database == 'ETL8B2':
Y.append(r[1])
if id_category < 75:
scriptTypes.append(0)
else:
scriptTypes.append(2)
elif database == 'ETL1C':
Y.append(r[3])
scriptTypes.append(1)
except:
break
output = []
if img_format:
output += [X]
output += [Y]
else:
X, Y = np.asarray(X, dtype=np.int32), np.asarray(Y, dtype=np.int32)
output += [X]
output += [Y]
if get_scripts:
output += [scriptTypes]
return output
|
# Generated by Django 3.0.8 on 2020-07-24 18:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('phone', models.CharField(max_length=255)),
('creation_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('purchase_date', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='customers', to='api.Customer')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image_link', models.TextField()),
('price', models.FloatField()),
('creation_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='InvoiceItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('amount_paid', models.FloatField()),
('creation_date', models.DateTimeField(auto_now_add=True)),
('invoice', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='invoices', to='api.Invoice')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='products', to='api.Product')),
],
),
]
|
#!/usr/bin/env python3
# Animation: https://imgur.com/a/nyVEK2j
DAY_NUM = 5
DAY_DESC = 'Day 5: Supply Stacks'
class TrackableLetter:
def __init__(self, value, letter_id):
self.value = value
self.letter_id = letter_id
def __str__(self):
return self.value
def calc(log, values, mode, draw=False, info={}, get_info=False, replace_pattern=None):
if draw:
from grid import Grid
animated = Grid()
values = values[:]
letters = []
stacks = []
while True:
row = values.pop(0)
if row.strip().startswith("1"):
break
for i, val in enumerate(row):
if 'A' <= val <= 'Z':
i = int((i - 1) / 4)
while len(stacks) <= i:
stacks.append([])
val = TrackableLetter(val, len(letters))
letters.append(val)
stacks[i].append(val)
if replace_pattern is not None:
val.value = replace_pattern[val.letter_id]
for i in range(len(stacks)):
stacks[i] = stacks[i][::-1]
def save_grid(target=None):
if draw or target is not None:
for x in range(len(stacks)):
for y in range(info['max_height'] - 1, -1, -1):
if draw:
animated[(x, info['max_height']-y)] = " " if y >= len(stacks[x]) else str(stacks[x][y])
if target is not None:
target.append(None if y >= len(stacks[x]) else stacks[x][y])
if draw:
animated.save_frame()
import re
if draw:
slow_at = info['frames'] - 2
info['frames'] = 0
get_info = True
for row in values:
m = re.search("move ([0-9]+) from ([0-9]+) to ([0-9]+)", row)
if m is not None:
steps = list(map(int, m.groups()))
if mode == 1:
for _ in range(steps[0]):
if draw:
if info['frames'] >= slow_at:
save_grid()
stacks[steps[2]-1].append(stacks[steps[1]-1].pop())
if get_info:
info['max_height'] = max(info.get('max_height', 0), max(len(x) for x in stacks))
save_grid()
if get_info:
info['frames'] = info.get('frames', 0) + 1
else:
temp = []
for _ in range(steps[0]):
temp.append(stacks[steps[1]-1].pop())
stacks[steps[2]-1] += temp[::-1]
ret = ""
for cur in stacks:
ret += str(cur[-1])
if get_info:
info['tracked'] = []
save_grid(target=info['tracked'])
if draw:
animated.draw_frames(cell_size=(15, 15))
if get_info:
return info
return ret
def other_draw_nggyu(describe, values):
if describe:
return "Draw this, in the style of Rick Astley"
from dummylog import DummyLog
import animate
animate.prep()
info = calc(DummyLog(), values, 1, get_info=True)
words = "NEVERGONNAGIVEYOUUPNEVERGONNALETYOUDOWNNEVERGONNARUNAROUNDANDDESERTYOUNEVERGONNAMAKEYOUCRYNEVERGONNASAYGOODBYENEVERGONNATELLALIEANDHURTYOU"
pattern = {}
for cur in info['tracked']:
if cur is not None:
pattern[cur.letter_id] = words[0]
words = words[1:]
calc(DummyLog(), values, 1, draw=True, info=info, replace_pattern=pattern)
animate.create_mp4(DAY_NUM, rate=10, final_secs=5)
def other_draw(describe, values):
if describe:
return "Draw this"
from dummylog import DummyLog
import animate
animate.prep()
info = calc(DummyLog(), values, 1, get_info=True)
calc(DummyLog(), values, 1, draw=True, info=info)
animate.create_mp4(DAY_NUM, rate=10, final_secs=5)
def test(log):
values = log.decode_values("""
[D]
[N] [C]
[Z] [M] [P]
1 2 3
move 1 from 2 to 1
move 3 from 1 to 3
move 2 from 2 to 1
move 1 from 1 to 2
""")
log.test(calc(log, values, 1), 'CMZ')
log.test(calc(log, values, 2), 'MCD')
def run(log, values):
log(calc(log, values, 1))
log(calc(log, values, 2))
if __name__ == "__main__":
import sys, os
def find_input_file():
for fn in sys.argv[1:] + ["input.txt", f"day_{DAY_NUM:0d}_input.txt", f"day_{DAY_NUM:02d}_input.txt"]:
for dn in [[], ["Puzzles"], ["..", "Puzzles"]]:
cur = os.path.join(*(dn + [fn]))
if os.path.isfile(cur): return cur
fn = find_input_file()
if fn is None: print("Unable to find input file!\nSpecify filename on command line"); exit(1)
print(f"Using '{fn}' as input file:")
with open(fn) as f: values = [x.strip("\r\n") for x in f.readlines()]
print(f"Running day {DAY_DESC}:")
run(print, values)
|
import functools
import logging
import h5py
import numpy as np
import pandas as pd
import torch
from dcase_util.data import ProbabilityEncoder
from genericpath import exists
from scipy.signal import medfilt
from torch.utils.data import DataLoader, Dataset
from evaluation_measures import ConfusionMatrix, compute_metrics, compute_psds_from_operating_points, psds_score
def median_filt_1d(event_roll, filt_span=7):
"""FUNCTION TO APPLY MEDIAN FILTER
ARGS:
--
event_roll: event roll [T,C]
filt_span: median filter span(integer odd scalar)
RETURN:
--
event_roll : median filter applied event roll [T,C]
"""
assert isinstance(filt_span, (int, list))
if len(event_roll.shape) == 1:
event_roll = medfilt(event_roll, filt_span)
else:
if isinstance(filt_span, int):
for i in range(event_roll.shape[1]):
event_roll[:, i] = medfilt(event_roll[:, i], filt_span)
else:
assert event_roll.shape[1] == len(filt_span)
for i in range(event_roll.shape[1]):
event_roll[:, i] = medfilt(event_roll[:, i], filt_span[i])
return event_roll
def fill_up_gap(event_roll, accept_gap=5):
"""FUNCTION TO PERFORM FILL UP GAPS
ARGS:
--
event_roll: event roll [T,C]
accept_gap: number of accept gap to fill up (integer scalar)
RETURN:
--
event_roll: processed event roll [T,C]
"""
assert isinstance(accept_gap, (int, list))
num_classes = event_roll.shape[1]
event_roll_ = np.append(
np.append(np.zeros((1, num_classes)), event_roll, axis=0),
np.zeros((1, num_classes)),
axis=0,
)
aux_event_roll = np.diff(event_roll_, axis=0)
for i in range(event_roll.shape[1]):
onsets = np.where(aux_event_roll[:, i] == 1)[0]
offsets = np.where(aux_event_roll[:, i] == -1)[0]
for j in range(1, onsets.shape[0]):
if isinstance(accept_gap, int):
if onsets[j] - offsets[j - 1] <= accept_gap:
event_roll[offsets[j - 1] : onsets[j], i] = 1
elif isinstance(accept_gap, list):
if onsets[j] - offsets[j - 1] <= accept_gap[i]:
event_roll[offsets[j - 1] : onsets[j], i] = 1
return event_roll
def remove_short_duration(event_roll, reject_duration=10):
"""Remove short duration
ARGS:
--
event_roll: event roll [T,C]
reject_duration: number of duration to reject as short section (int or list)
RETURN:
--
event_roll: processed event roll [T,C]
"""
assert isinstance(reject_duration, (int, list))
num_classes = event_roll.shape[1]
event_roll_ = np.append(
np.append(np.zeros((1, num_classes)), event_roll, axis=0),
np.zeros((1, num_classes)),
axis=0,
)
aux_event_roll = np.diff(event_roll_, axis=0)
for i in range(event_roll.shape[1]):
onsets = np.where(aux_event_roll[:, i] == 1)[0]
offsets = np.where(aux_event_roll[:, i] == -1)[0]
for j in range(onsets.shape[0]):
if isinstance(reject_duration, int):
if onsets[j] - offsets[j] <= reject_duration:
event_roll[offsets[j] : onsets[j], i] = 0
elif isinstance(reject_duration, list):
if onsets[j] - offsets[j] <= reject_duration[i]:
event_roll[offsets[j] : onsets[j], i] = 0
return event_roll
class ScoreDataset(Dataset):
def __init__(self, score_h5_path, has_label=True):
with h5py.File(score_h5_path, "r") as h5:
self.data_ids = list(h5.keys())
self.dataset = {}
self.has_label = has_label
for data_id in self.data_ids:
pred_strong = h5[data_id]["pred_strong"][()]
pred_weak = h5[data_id]["pred_weak"][()]
if self.has_label:
target = h5[data_id]["target"][()]
self.dataset[data_id] = dict(pred_strong=pred_strong, pred_weak=pred_weak, target=target)
else:
self.dataset[data_id] = dict(
pred_strong=pred_strong,
pred_weak=pred_weak,
)
def __getitem__(self, index):
data_id = self.data_ids[index]
pred_strong = self.dataset[data_id]["pred_strong"]
pred_weak = self.dataset[data_id]["pred_weak"]
if self.has_label:
target = self.dataset[data_id]["target"]
return dict(
data_id=data_id,
pred_strong=pred_strong,
pred_weak=pred_weak,
target=target,
)
else:
return dict(
data_id=data_id,
pred_strong=pred_strong,
pred_weak=pred_weak,
)
def __len__(self):
return len(self.data_ids)
class PostProcess:
def __init__(
self,
model: torch.nn.Module,
iterator,
output_dir,
options,
):
self.model = model
self.iterator = iterator
self.options = options
self.device = options.device
self.decoder = options.decoder
self.pooling_time_ratio = options.pooling_time_ratio
self.sample_rate = options.sample_rate
self.hop_size = options.hop_size
self.thresholds = [0.5]
self.validation_df = options.validation_df
self.durations_validation = options.durations_validation
self.labels = {key: value for value, key in enumerate(options.classes)}
self.output_dir = output_dir
self.get_posterior(save_h5_path=output_dir / "posterior.h5")
self.data_loader = DataLoader(ScoreDataset(output_dir / "posterior.h5", has_label=True))
@torch.no_grad()
def get_posterior(self, save_h5_path) -> None:
with h5py.File(save_h5_path, "w") as h5:
self.model.eval()
for (batch_input, batch_target, data_ids) in self.iterator:
predicts = self.model(batch_input.to(self.device))
predicts["strong"] = torch.sigmoid(predicts["strong"]).cpu().data.numpy()
predicts["weak"] = torch.sigmoid(predicts["weak"]).cpu().data.numpy()
for data_id, pred_strong, pred_weak, target in zip(
data_ids, predicts["strong"], predicts["weak"], batch_target.numpy()
):
h5.create_group(data_id)
h5[data_id].create_dataset("pred_strong", data=pred_strong)
h5[data_id].create_dataset("pred_weak", data=pred_weak)
h5[data_id].create_dataset("target", data=target)
def get_prediction_dataframe(
self,
post_processing=None,
save_predictions=None,
transforms=None,
mode="validation",
threshold=0.5,
binarization_type="global_threshold",
):
"""
post_processing: e.g. [functools.partial(median_filt_1d, filt_span=39)]
"""
prediction_df = pd.DataFrame()
# Flame level
frame_measure = [ConfusionMatrix() for i in range(len(self.labels))]
tag_measure = ConfusionMatrix()
for batch_idx, data in enumerate(self.data_loader):
output = {}
output["strong"] = data["pred_strong"].cpu().data.numpy()
output["weak"] = data["pred_weak"].cpu().data.numpy()
# Binarize score into predicted label
if binarization_type == "class_threshold":
for i in range(output["strong"].shape[0]):
output["strong"][i] = ProbabilityEncoder().binarization(
output["strong"][i],
binarization_type=binarization_type,
threshold=threshold,
time_axis=0,
)
elif binarization_type == "global_threshold":
output["strong"] = ProbabilityEncoder().binarization(
output["strong"],
binarization_type=binarization_type,
threshold=threshold,
)
else:
raise ValueError("binarization_type must be 'class_threshold' or 'global_threshold'")
weak = ProbabilityEncoder().binarization(
output["weak"], binarization_type="global_threshold", threshold=0.5
)
for pred, data_id in zip(output["strong"], data["data_id"]):
# Apply post processing if exists
if post_processing is not None:
for post_process_fn in post_processing:
pred = post_process_fn(pred)
pred = self.decoder(pred)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
# Put them in seconds
pred.loc[:, ["onset", "offset"]] *= self.pooling_time_ratio / (self.sample_rate / self.hop_size)
pred.loc[:, ["onset", "offset"]] = pred[["onset", "offset"]].clip(0, self.options.max_len_seconds)
pred["filename"] = data_id
prediction_df = prediction_df.append(pred, ignore_index=True)
return prediction_df
def search_best_threshold(self, step, target="Event"):
assert 0 < step < 1.0
assert target in ["Event", "Frame"]
best_th = {k: 0.0 for k in self.labels}
best_f1 = {k: 0.0 for k in self.labels}
for th in np.arange(step, 1.0, step):
logging.info(f"threshold: {th}")
prediction_df = self.get_prediction_dataframe(
threshold=th,
binarization_type="global_threshold",
save_predictions=None,
)
events_metric, segments_metric, psds_m_f1 = compute_metrics(
prediction_df, self.validation_df, self.durations_validation
)
for i, label in enumerate(self.labels):
f1 = events_metric.class_wise_f_measure(event_label=label)["f_measure"]
# if target == 'Event':
# f1 = valid_events_metric.class_wise_f_measure(event_label=label)['f_measure']
# elif target == 'Frame':
# f1 = frame_measure[i].calc_f1()[2]
# else:
# raise NotImplementedError
if f1 > best_f1[label]:
best_th[label] = th
best_f1[label] = f1
thres_list = [0.5] * len(self.labels)
for i, label in enumerate(self.labels):
thres_list[i] = best_th[label]
prediction_df = self.get_prediction_dataframe(
post_processing=None,
threshold=thres_list,
binarization_type="class_threshold",
)
# Compute evaluation metrics
events_metric, segments_metric, psds_m_f1 = compute_metrics(
prediction_df, self.validation_df, self.durations_validation
)
macro_f1_event = events_metric.results_class_wise_average_metrics()["f_measure"]["f_measure"]
macro_f1_segment = segments_metric.results_class_wise_average_metrics()["f_measure"]["f_measure"]
logging.info(f"Event-based F1:{macro_f1_event * 100:.4}\tSegment-based F1:{macro_f1_event * 100:.4}")
logging.info(f"best_th: {best_th}")
logging.info(f"best_f1: {best_f1}")
return best_th, best_f1
def search_best_median(self, spans, best_th=None, target="Event"):
best_span = {k: 1 for k in self.labels}
best_f1 = {k: 0.0 for k in self.labels}
for span in spans:
logging.info(f"median filter span: {span}")
post_process_fn = [functools.partial(median_filt_1d, filt_span=span)]
if best_th is not None:
prediction_df = self.get_prediction_dataframe(
post_processing=post_process_fn,
threshold=list(best_th.values()),
binarization_type="class_threshold",
)
else:
prediction_df = self.get_prediction_dataframe(post_processing=post_process_fn)
events_metric, segments_metric, psds_m_f1 = compute_metrics(
prediction_df, self.validation_df, self.durations_validation
)
for i, label in enumerate(self.labels):
f1 = events_metric.class_wise_f_measure(event_label=label)["f_measure"]
# if target == 'Event':
# f1 = valid_events_metric.class_wise_f_measure(event_label=label)['f_measure']
# elif target == 'Frame':
# f1 = frame_measure[i].calc_f1()[2]
# else:
# raise NotImplementedError
if f1 > best_f1[label]:
best_span[label] = span
best_f1[label] = f1
post_process_fn = [functools.partial(median_filt_1d, filt_span=list(best_span.values()))]
if best_th is not None:
prediction_df = self.get_prediction_dataframe(
post_processing=post_process_fn,
threshold=len(best_th.values()),
binarization_type="class_threshold",
)
else:
prediction_df = self.get_prediction_dataframe(post_processing=post_process_fn)
# Compute evaluation metrics
events_metric, segments_metric, psds_m_f1 = compute_metrics(
prediction_df, self.validation_df, self.durations_validation
)
macro_f1_event = events_metric.results_class_wise_average_metrics()["f_measure"]["f_measure"]
macro_f1_segment = segments_metric.results_class_wise_average_metrics()["f_measure"]["f_measure"]
logging.info(f"best_span: {best_span}")
logging.info(f"best_f1: {best_f1}")
return best_span, best_f1
def show_best(self, pp_params, save_predictions=None):
# Set applying post-processing functions
post_processing_fn = []
if "threshold" in pp_params.keys():
threshold = list(pp_params["threshold"].values())
binarization_type = "class_threshold"
else:
threshold = 0.5
binarization_type = "global_threshold"
if "median_filtering" in pp_params.keys():
filt_span = list(pp_params["median_filtering"].values())
post_processing_fn.append(functools.partial(median_filt_1d, filt_span=filt_span))
if "fill_up_gap" in pp_params.keys():
accept_gap = list(pp_params["fill_up_gap"].values())
post_processing_fn.append(functools.partial(fill_up_gap, accept_gap=accept_gap))
if len(post_processing_fn) == 0:
post_processing_fn = None
prediction_df = self.get_prediction_dataframe(
post_processing=post_processing_fn,
threshold=threshold,
binarization_type=binarization_type,
)
# Compute evaluation metrics
events_metric, segments_metric, psds_m_f1 = compute_metrics(
prediction_df, self.validation_df, self.durations_validation
)
macro_f1_event = events_metric.results_class_wise_average_metrics()["f_measure"]["f_measure"]
macro_f1_segment = segments_metric.results_class_wise_average_metrics()["f_measure"]["f_measure"]
logging.info(f"Event-based macro F1: {macro_f1_event}")
logging.info(f"Segment-based macro F1: {macro_f1_segment}")
def compute_psds(self):
logging.info("Compute psds scores")
##########
# Optional but recommended
##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
out_nb_frames_1s = self.sample_rate / self.hop_size / self.pooling_time_ratio
# median_window = max(int(0.45 * out_nb_frames_1s), 1)
post_processing_fn = [functools.partial(median_filt_1d, filt_span=3)]
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
prediction_dfs = {}
for threshold in list_thresholds:
prediction_dfs[threshold] = self.get_prediction_dataframe(
post_processing=post_processing_fn,
threshold=threshold,
binarization_type="global_threshold",
)
pred_thresh = []
for key in prediction_dfs:
pred_thresh.append(prediction_dfs[key])
if len(pred_thresh) == 1:
pred_thresh = pred_thresh[0]
# save predictions
(self.output_dir / "predictions_thresh").mkdir(exist_ok=True)
for th, pred_df in zip(list_thresholds, pred_thresh):
pred_df.to_csv(
self.output_dir / "predictions_thresh" / f"{th}.csv",
index=False,
sep="\t",
float_format="%.3f",
)
psds = compute_psds_from_operating_points(pred_thresh, self.validation_df, self.durations_validation)
psds_score(psds, filename_roc_curves=self.output_dir / "psds_roc.png")
def tune_all(
self,
):
best_th, best_f1 = self.search_best_threshold(
step=0.1,
)
best_fs, best_f1 = self.search_best_median(spans=list(range(1, 31, 2)), best_th=best_th)
pp_params = {
"threshold": best_th,
"median_filtering": best_fs,
}
self.show_best(
pp_params=pp_params,
)
logging.info("===================")
logging.info(f"best_th: {best_th}")
logging.info(f"best_fs: {best_fs}")
return pp_params
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 14:33:16 2019
@author: Lee
"""
import numpy as np
SRM_Kernels = np.load('SRM_Kernels.npy')
print(SRM_Kernels[:1]) |
################################################################
# pp.client-plone
# (C) 2013, ZOPYX Limited, D-72074 Tuebingen, Germany
################################################################
from zope.interface import Interface
from zope import schema
from pp.client.plone.i18n import MessageFactory as _
class IPPContent(Interface):
""" Marker interface for Plone content to be considered as
content for Produce & Publish.
"""
class IArchiveFolder(Interface):
""" Marker interface for folder with archived content that will
be ignored inside @@asHTML
"""
class IPloneClientConnectorLayer(Interface):
"""A brower layer specific to my product """
class IPPClientPloneSettings(Interface):
""" pp.client-plone settings """
server_url = schema.TextLine(
title=_(u'URL of Produce & Publish webservice'),
description=_(u'URL of Produce & Publish webservice'),
default=u'https://pp-server.zopyx.com'
)
server_username = schema.TextLine(
title=_(u'Username for webservice'),
description=_(u'Username for webservice'),
required=False,
default=u''
)
server_password = schema.TextLine(
title=_(u'Password for webservice'),
description=_(u'Password for webservice'),
required=False,
default=u''
)
|
import sys
from PIL import Image
# Helper Functions #
def region3x3 (img, x, y):
C = getpixel(img, x, y)
NW = getpixel(img, x-1, y-1)
N = getpixel(img, x, y-1)
NE = getpixel(img, x+1, y-1)
E = getpixel(img, x+1, y)
SE = getpixel(img, x+1, y+1)
S = getpixel(img, x, y+1)
SW = getpixel(img, x-1, y+1)
W = getpixel(img,x-1,y+1)
return [C,N,S,E,W,NW,NE,SE,SW]
def getpixel(img, x, y):
width, height = img.size
if x < 0:
x = 0
elif x >= width :
x = width - 1
if y < 0:
y = 0
elif y >= height:
y = height - 1
pixels = img.load()
return pixels[x,y]
def filter(img, f):
width, height = img.size
imgdup = img.copy()
pixels = imgdup.load()
for x in range(width):
for y in range(height):
r = region3x3(img, x, y)
pixels[x, y] = int(f(r))
return imgdup
def open(filepath):
if len(filepath)<=1:
print ("missing image filename")
sys.exit(1)
img = Image.open(filepath)
img = img.convert("L")
return img
def showimg(filepath):
img = open(filepath) # load file specified on the command line
img.show()
# For Sharpen #
#define laplace function
def laplace (regionlist):
value = sum(regionlist[1:5]) - 4 * regionlist[0]
return value
def minus (A,B):
width, height = A.size
dupA = A.copy()
pixelsA = A.load()
pixelsdupA = dupA.load()
pixelsB = B.load()
for x in range(width):
for y in range(height):
pixelsdupA [x,y] = pixelsA[x,y] - pixelsB[x,y]
return dupA
def sharpen(img):
edges = filter(img, laplace)
imgdup = minus(img, edges)
return imgdup
# For Flip #
def flip(img):
width, height = img.size
imgdup = img.copy()
org = img.load()
dup = imgdup.load()
for j in range(height):
for i in range(width):
dup[i,j] = org[width-i-1, j]
return imgdup
# For Denoise #
def median (regionlist):
regionlist = sorted(regionlist)
m = len(regionlist)/2
return regionlist[m]
def denoise(img):
imgdup = filter(img, median)
return imgdup
# For Blur #
def avg (regionlist):
return sum(regionlist)/len(regionlist)
def blur(img):
imgdup = filter(img, avg)
return imgdup
|
class Solution:
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
if len(word1) < len(word2):
word1, word2 = word2, word1
lw1 = len(word1)
lw2 = len(word2)
opt = list(range(1+lw2))
i = 1
while i<(1+lw1):
cur = [0]*(1+lw2)
cur[0] = i
for j in range(1, lw2+1):
cur[j] = opt[j-1]
if word1[i-1] != word2[j-1]:
cur[j] = 1+min(cur[j-1], opt[j])
opt = cur
i+=1
return opt[-1]
s = Solution()
print(s.minDistance("abcdef", "abzcdef"))
|
#!/usr/bin/env python3
from QuantumCircuits import QuantumPrograms
from qiskit import QuantumProgram
from QConfig import QConfig
from SignalUtils import tryExecuteWithTimeout
import CsvDataWriter
from random import randint
import time
import sys
def setup_quantum_program():
timeout = 210 # 3.5 minutes
# timeout = 80 # for debugging
shots = 1024
backend = 'local_qasm_simulator'
program = 'factorize_N'
engine = QuantumProgram()
apiToken = None
try:
apiToken = open("./.qiskit_api_token", "r").read()
except:
apiToken = input("Enter your IBM Quantum Experience API token: \n> ")
engine.set_api(apiToken, 'https://quantumexperience.ng.bluemix.net/api')
config = QConfig(backend, shots, timeout, program)
return QuantumPrograms(engine, config)
def run_benchmark(qp: QuantumPrograms, numberToFactor: int):
initial = time.perf_counter()
qp.factorize_N(numberToFactor)
return (time.perf_counter() - initial)
def random_with_N_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
if __name__ == "__main__":
num_inputs = 20
# num_inputs = 3 # for debugging
results = { 15: [] }
for i in range(1, num_inputs):
results[random_with_N_digits(i + 2)] = []
num_trials = 10
# num_trials = 3 # for debugging
engine = setup_quantum_program()
for i in results.keys():
for j in range(0, num_trials):
def run_experiment():
res = run_benchmark(engine, i)
results[i].append(res)
tryExecuteWithTimeout(run_experiment, engine.qconf.timeout, f"Failed to factorize {i} within {engine.qconf.timeout} seconds.")
if len(results[i]) <= j:
results[i].append(-1) # use value of -1 to indicate timeout failure
CsvDataWriter.write_data(results)
print("Done benchmarking!")
try:
sys.exit()
except:
try:
quit()
except:
pass
|
# Generated by Django 3.0.3 on 2020-03-01 20:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0010_maintasks'),
]
operations = [
migrations.AlterField(
model_name='maintasks',
name='task',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.CASCADE, related_name='Tarefa', to='core.Tasks'),
),
]
|
#!/usr/bin/env python
#
# Author: yasser hifny
#
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
import sys
import codecs
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.layers import Embedding, Dense, Input, LSTM, GlobalAveragePooling1D, GlobalMaxPooling1D
from tensorflow.keras.layers import SpatialDropout1D, Dropout, LSTM, GRU, Bidirectional, TimeDistributed
from tensorflow.keras.models import Model, load_model
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.optimizers import RMSprop, Adam, Nadam
from tensorflow.keras.layers import Conv1D, MaxPooling1D
from tensorflow.keras import initializers
from tensorflow.keras.preprocessing import sequence
import tensorflow.keras
from tensorflow.keras import optimizers
from tensorflow.keras.models import load_model
from tensorflow_asr.models.encoders.conformer import ConformerEncoder
import glob
import os
# import tensorflow as tf
from tensorflow.keras.callbacks import Callback
import warnings
# import tensorflow as tf
import random
random.seed(9001)
import argparse
from numpy import newaxis
#from gated_cnn import GatedConvBlock
from GCNN import GatedConv1D
from novograd import NovoGrad
from my_layers import ContextExpansion
from transformer import Position_Embedding, MultiHeadAttention, LayerNormalization, Gelu
from tensorflow.keras.utils import get_custom_objects
from kaldi.asr import MappedLatticeFasterRecognizer
from kaldi.decoder import LatticeFasterDecoderOptions
#from kaldi.itf import DecodableInterface
from kaldi.matrix import Matrix
from kaldi.util.table import SequentialMatrixReader, MatrixWriter
import config_kaldi as config
#from tensorflow.keras_transformer.transformer import TransformerBlock
print (config.graph_file )
print (config.words_mapping_file)
# Construct recognizer
decoder_opts = LatticeFasterDecoderOptions()
decoder_opts.beam = 13
decoder_opts.max_active = 7000
asr = MappedLatticeFasterRecognizer.from_files(
config.final_model, config.graph_file, config.words_mapping_file,
acoustic_scale=1.0, decoder_opts=decoder_opts)
print (asr)
def normalize( feature, feats_mean, feats_std, eps=1e-14):
return (feature - feats_mean) / (feats_std + eps)
get_custom_objects().update({
#'GatedConvBlock': GatedConvBlock,
'NovoGrad': NovoGrad,
'ContextExpansion': ContextExpansion,
'Position_Embedding': Position_Embedding,
})
# model = load_model(model_name)
#kalid_model = load_model(sys.argv[1], custom_objects=get_custom_objects())
kalid_model = load_model(sys.argv[1], custom_objects={#'GatedConvBlock':GatedConvBlock,
'NovoGrad': NovoGrad,
'ContextExpansion' : ContextExpansion,
'Position_Embedding':Position_Embedding,
'MultiHeadAttention':MultiHeadAttention,
'LayerNormalization':LayerNormalization,
'Gelu':Gelu,
'ConformerEncoder':ConformerEncoder})
model = Model(inputs=[kalid_model.input[0] if type(kalid_model.input) is list else kalid_model.input], outputs=[kalid_model.get_layer('output_tri').output])
model.summary()
#
feat_norm_file = "mean_std_fmllr.npz"
feats_mean = np.load(feat_norm_file)['mean']
feats_std = np.load(feat_norm_file)['std']
#
feats_rspecifier = config.fmllr_dev_feats_rspecifier
if sys.argv[2]== 'test': feats_rspecifier = config.fmllr_test_feats_rspecifier
# read priors
priors = np.genfromtxt (sys.argv[3], delimiter=',')
# output
out_file = open(sys.argv[4], "w")
# posterior writer
posterior_writer = MatrixWriter("ark:"+sys.argv[4]+'.ark')
# decode
with SequentialMatrixReader(feats_rspecifier) as f:
for (fkey, feats) in f:
print ('processing: ', fkey, flush=True)
feats= feats.numpy()[newaxis,...]
loglikes = np.log (model.predict(feats)[0,:,:] / priors)
loglikes [loglikes == -np.inf] = -100
out = asr.decode(Matrix(loglikes))
out_file.write("%s %s\n" %(fkey, out["text"]))
posterior_writer[fkey]=Matrix(loglikes)
posterior_writer.close()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 29 20:18:22 2019
@author: zhuguohua
"""
import sympy as sp
sp.init_printing(use_unicode=True)
# 车辆参数
m,I_z,l_f,l_r,C_alpha_f,C_alpha_r,V_x,R,delta_ff = sp.symbols('m I_z l_f l_r C_alpha_f C_alpha_r V_x R delta_ff')
k1,k2,k3,k4 = sp.symbols('k1 k2 k3 k4')
s = sp.symbols('s')
A = sp.Matrix([
[0,1,0,0],
[0,-2*(C_alpha_f + C_alpha_r)/(m*V_x),2*(C_alpha_f + C_alpha_r)/m,-2*(C_alpha_f*l_f - C_alpha_r*l_r)/(m*V_x)],
[0,0,0,1],
[0,-2*(C_alpha_f*l_f - C_alpha_r*l_r)/(I_z*V_x),2*(C_alpha_f*l_f - C_alpha_r*l_r)/I_z,-2*(C_alpha_f*l_f**2 + C_alpha_r*l_r**2)/(I_z*V_x)]
])
print("A:\r\n",sp.latex(A))
B1 = sp.Matrix([
[0],
[2*C_alpha_f/m],
[0],
[2*l_f*C_alpha_f/I_z]
])
print("B1:\r\n",sp.latex(B1))
B2 = sp.Matrix(
[[0],
[-2*(C_alpha_f*l_f - C_alpha_r*l_r)/(m*V_x) - V_x],
[0],
[-2*(C_alpha_f*l_f**2 + C_alpha_r*l_r**2)/(I_z*V_x)]
])
S = sp.Matrix(
[[s,0,0,0],[0,s,0,0],[0,0,s,0],[0,0,0,s]]
)
print("B2:\r\n",sp.latex(B2))
K = sp.Matrix([[k1,k2,k3,k4]])
print("K:\r\n",sp.latex(K))
D = (S - A)**-1
print("D:\r\n",sp.latex(D))
E = B1
X_ss = D*E
X_factor = sp.factor(X_ss)
print("x_ss:\r\n",sp.latex(X_factor))
|
from avx.devices.net import TCPDevice
class Tivo(TCPDevice):
'''
A networked TiVo device. Developed against Virgin Media UK's TiVo boxes.
'''
socket = None
def __init__(self, deviceID, ipAddress, port=31339, **kwargs):
super(Tivo, self).__init__(deviceID, ipAddress, port, **kwargs)
def sendIRCode(self, ircode):
self.send('IRCODE %s\r' % ircode)
# Playback functions
def pause(self):
self.sendIRCode("PAUSE")
def play(self):
self.sendIRCode("PLAY")
def rewind(self):
self.sendIRCode("REVERSE")
def fastForward(self):
self.sendIRCode("FORWARD")
def replay(self):
self.sendIRCode("REPLAY")
def skip(self):
self.sendIRCode("ADVANCE")
def slow(self):
self.sendIRCode("SLOW")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.