text stringlengths 38 1.54M |
|---|
from settings.models import system_settings
from settings.models import system_settings
from nginx.views import *
from functools import reduce
import iptc
def ip_into_int(ip):
return reduce(lambda x,y:(x<<8)+y,map(int,ip.split('.')))
def is_internal_ip(ip):
ip = ip_into_int(ip)
net_a = ip_into_int('10.255.255.255') >> 24
net_b = ip_into_int('172.31.255.255') >> 20
net_c = ip_into_int('192.168.255.255') >> 16
return ip >> 24 == net_a or ip >> 20 == net_b or ip >> 16 == net_c
def set_internal_firwall(network,port_list):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER),'INPUT')
chain.flush()
for port in port_list:
rule = iptc.Rule()
rule.target = iptc.Target(rule,'ACCEPT')
rule.src = network
match = iptc.Match(rule,'tcp')
match.dport = str(port)
rule.add_match(match)
chain.insert_rule(rule)
rule = iptc.Rule()
rule.target = iptc.Target(rule,"ACCEPT")
match = iptc.Match(rule,'state')
match.state= "RELATED,ESTABLISHED"
rule.add_match(match)
chain.insert_rule(rule)
rule = iptc.Rule()
rule.target = iptc.Target(rule,"ACCEPT")
rule.in_interface = "lo"
chain.insert_rule(rule)
chain.set_policy("DROP")
def set_public_firewall(port_list):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER),"INPUT")
for port in port_list:
rule = iptc.Rule()
rule.target = iptc.Target(rule, "ACCEPT")
rule.protocol = "tcp"
match = iptc.Match(rule, "tcp")
match.dport = str(port)
rule.add_match(match)
chain.insert_rule(rule)
def set_firewall():
if system_settings.objects.all().count() != 0:
internal_inc = system_settings.objects.all()[0].internal_nic
if internal_inc != "":
internal_port = [22,8000]
public_port = []
for proxy in proxy_config.objects.all():
public_port.append(proxy.listen)
public_port = list(set(public_port))
address = ""
nics = get_sys_info()['nic']
for nic in nics:
if nic['nic'] == internal_inc:
for i in nic['address'].split('.')[:2]:
address += i + '.'
address += '0.0/16'
set_internal_firwall(address,internal_port)
set_public_firewall(public_port)
|
from prime import Prime
def ans():
circular_primes = set([2])
for p in Prime.gen_nums(1000000):
if any(x in str(p) for x in '02468'):
continue
is_circular = True
for i in range(len(str(p))):
if not Prime.contains(int(str(p)[i:] + str(p)[:i])):
is_circular = False
break
if is_circular:
circular_primes.add(p)
return len(circular_primes)
if __name__ == '__main__':
print(ans())
|
try:
from lapjv import lapjv
# from scipy.optimize import linear_sum_assignment
segment = False
except ImportError:
print('Module lap not found, emulating with much slower scipy.optimize.linear_sum_assignment')
segment = True
from scipy.optimize import linear_sum_assignment
import random
import numpy as np
import keras.backend as K
from keras.utils import Sequence
class TrainingData(Sequence):
def __init__(self, score, train, id2samples, train_idx, steps=1000, batch_size=32):
super(TrainingData, self).__init__()
# Maximizing the score is the same as minimuzing -score.
self.score = -score
self.train = train
self.dims = train.shape[1]
self.steps = steps
self.batch_size = batch_size
self.id2samples = id2samples
self.train_idx = train_idx
t2i = {}
for i,t in enumerate(train_idx): t2i[t] = i
for ts in id2samples.values():
idxs = [t2i[t] for t in ts]
for i in idxs:
for j in idxs:
# Set a large value for matching whales -- eliminates this potential pairing
self.score[i,j] = 10000.0
self.on_epoch_end()
def on_epoch_end(self):
# Skip this on the last epoch.
if self.steps <= 0: return
self.steps -= 1
self.match = []
self.unmatch = []
if segment:
# Using slow scipy. Make small batches.
tmp = []
batch = 512
for start in range(0, score.shape[0], batch):
end = min(score.shape[0], start + batch)
_, x = linear_sum_assignment(self.score[start:end, start:end])
tmp.append(x + start)
x = np.concatenate(tmp)
else:
# Solve the linear assignment problem
# _,_, x = lapjv(self.score)
# import ipdb; ipdb.set_trace()
x, _, _ = lapjv(self.score)
y = np.arange(len(x), dtype=np.int32)
# Compute a derangement for matching whales
for ts in self.id2samples.values():
d = ts.copy()
while True:
random.shuffle(d)
if not np.any(ts == d): break
for ab in zip(ts, d): self.match.append(ab)
if 1:
# Construct unmatched pairs from the LAP solution.
for i,j in zip(x,y):
if i == j:
print(f'i {i} == j {j}')
# print(self.score)
print(x)
print(y)
assert i != j
self.unmatch.append((self.train_idx[i], self.train_idx[j]))
# Force a different choice for an eventual next epoch.
self.score[x,y] = 10000.0
self.score[y,x] = 10000.0
random.shuffle(self.match)
random.shuffle(self.unmatch)
assert len(self.match) == len(self.train) and len(self.unmatch) == len(self.train)
def __len__(self):
return (len(self.match) + len(self.unmatch) + self.batch_size - 1) // self.batch_size
def __getitem__(self, index):
start = self.batch_size * index
end = min(start + self.batch_size, len(self.match) + len(self.unmatch))
size = end - start
assert size > 0
a = np.zeros((size,) + (self.dims,), dtype=K.floatx())
b = np.zeros((size,) + (self.dims,), dtype=K.floatx())
c = np.zeros((size,1), dtype=K.floatx())
j = start//2
for i in range(0, size, 2):
a[i, :] = self.train[self.match[j][0]]
b[i, :] = self.train[self.match[j][1]]
# This is a match
c[i, 0] = 1
a[i+1,:] = self.train[self.unmatch[j][0]]
b[i+1,:] = self.train[self.unmatch[j][1]]
# unmatch
c[i+1,0] = 0
j += 1
return [a[:,None,],b[:,None,]],c
if __name__ == '__main__':
from utils import load_cache, group_label, shuffle_idxs
train, y_, _, _ = load_cache('../../')
score = np.random.random_sample(size=(len(train), len(train)))
id2samples = group_label(y_)
train_idx, _ = shuffle_idxs(train)
data = TrainingData(score, train, id2samples, train_idx)
import ipdb; ipdb.set_trace()
print(data) |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for making predictions on AI Platform and parsing results."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import sys
from googleapiclient import discovery
from constants import constants # pylint: disable=g-bad-import-order
def _parse_arguments(argv):
"""Parses execution arguments and replaces default values.
Args:
argv: Input arguments from sys.
Returns:
Dictionary of parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--project",
required=True,
help="GCP project name.")
parser.add_argument(
"--model",
help="Model name.",
required=True)
parser.add_argument(
"--version",
help="Model version.")
parser.add_argument(
"--usernames",
required=True,
help="File where username queries are stored.")
args, _ = parser.parse_known_args(args=argv[1:])
return args
def _get_instances(usernames_file):
"""Creates inputs for prediction given is file with usernames.
Args:
usernames_file: path to a file containing usernames of users to get
recommendations for.
Returns:
A list of input dicts to be passed to the model for prediction.
"""
with open(usernames_file, "r") as f:
lines = f.read().splitlines()
instances = []
for line in lines:
instance = {
constants.USER_KEY: "",
constants.ITEM_KEY: "",
}
instance.update(json.loads(line))
instances.append(instance)
return instances
def _predict_json(project, model, instances, version):
"""Send json data to a deployed model for prediction.
Args:
project: project where the Cloud AI Platform Model is deployed.
model: model name.
instances: Keys should be the names of Tensors
your deployed model expects as inputs. Values should be datatypes
convertible to Tensors, or (potentially nested) lists of datatypes
convertible to tensors.
version: version of the model to target.
Returns:
Dictionary of prediction results defined by the model.
Raises:
RuntimeError: predictions couldn't be made.
"""
service = discovery.build("ml", "v1")
name = "projects/{}/models/{}".format(project, model)
if version is not None:
name += "/versions/{}".format(version)
response = service.projects().predict(
name=name,
body={"instances": instances}
).execute()
if "error" in response:
raise RuntimeError(response["error"])
return response["predictions"]
def _print_predictions(instances, predictions):
"""Prints top k titles predicted for each user.
Args:
instances: predication inputs.
predictions: a list of prediction dicts returned by the model.
"""
for inputs, pred in zip(instances, predictions):
if inputs[constants.USER_KEY]:
print("Recommended songs for", pred[constants.USER_KEY])
print("Title | Similarity")
for item_id, sim in zip(pred["user_top_k"], pred["user_sims"]):
print(item_id, "|", sim)
print("------")
if inputs[constants.ITEM_KEY]:
print("Recommended songs for", pred[constants.ITEM_KEY])
print("Title | Similarity")
for item_id, sim in zip(pred["item_top_k"], pred["item_sims"]):
print(item_id, "|", sim)
print("------")
def main():
"""Uses the usernames in the given usernames file to produce predictions.
The model expects all features to be populated, although predictions are only
made with usernames. So, as a preprocessing step, stubbed features are passed
to the model along with each username.
"""
params = _parse_arguments(sys.argv)
instances = _get_instances(params.usernames)
predictions = _predict_json(params.project, params.model, instances,
params.version)
_print_predictions(instances, predictions)
if __name__ == "__main__":
main()
|
# Deep-Learning:Training via BP+Pseudo-inverse
import pandas as pd
import numpy as np
import my_utility as ut
# Softmax's training
def train_softmax(x,y,param):
w=ut.iniW(y.shape[0],x.shape[0])
costo=[]
for i in range(param[0]):
G=ut.softmax_grad(x,y,w,param[2])
gradW=G[0]
c=G[1]
costo.append(c)
w=w-param[1]*gradW
return w,costo
# AE's Training
def train_ae(x,hnode,param):
w1 = ut.iniW(hnode, x.shape[0])
z = np.dot(w1, x)
a = 1/(1 + np.exp(-z))
w2 = ut.pinv_ae(x, w1, param[2])
for i in range(param[3]):
w1 = ut.backward_ae(x,w1,w2,param[1])
return(w1)
def train_sae(x,param):
W={}
for hn in range(4,len(param)): #Number of AEs
w1 = train_ae(x,hn,param)
W[hn-4] = w1
x = ut.act_sigmoid(np.dot(w1,x))
return(W,x)
# Beginning ...
def main():
par_sae,par_sft = ut.load_config()
xe = ut.load_data_csv('train_x.csv')
ye = ut.load_data_csv('train_y.csv')
W,Xr = train_sae(xe,par_sae)
Ws, cost = train_softmax(Xr,ye,par_sft)
ut.save_w_dl(W,Ws,cost,'w_dl.npz','cost_sofmax.csv')
if __name__ == '__main__':
main()
|
import numpy as np
import pandas as pd
import pylab as plt
from studyResults import customaxis, ratio
# These are the "Tableau 20" colors as RGB.
cols = [(3,43,122),(31,119,180),(174,199,232),(255,186,120),(177,3,24),(31, 119, 180),
(174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229),
(1, 119, 180), (1, 199, 232), (1, 127, 14), (1, 187, 120),
(1, 160, 44), (1, 223, 138), (1, 39, 40), (1, 152, 150),
(1, 103, 189), (1, 176, 213), (1, 86, 75), (1, 156, 148),
(1, 119, 194), (1, 182, 210), (1, 127, 127), (1, 199, 199),
(1, 189, 34), (1, 219, 141), (1, 190, 207), (1, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(cols)):
r, g, b = cols[i]
cols[i] = (r / 255., g / 255., b / 255.)
def extractInfoFromFile(fnameF,noise,dim,candFlag = False):
from scipy.stats import lognorm,gamma
df = np.asarray(pd.read_csv(fnameF))
maxI = len(df[:,1])-1
cand = np.mean(df[-5:-1,3:-1],0)
fit = df[-1,1]
sense = df[-1,-1]
if noise > 0:
for line in range(10, len(df[:,1])):
fit = 0.8*fit + 0.2*df[line,1]
sense = 0.8*sense + 0.2*df[line,-1]
cand = 0.9*cand + 0.1*df[line,3:-1]
#print df[line,3:-1]
if candFlag: return cand
fit *= -1
th = 100
if dim == 100: d = cand/np.sum(cand)
else:
x = np.linspace(0.01, 10000., num=100) # values for x-axis
d = np.zeros(100)
w = 0
for jj in range(0,len(cand)-1,3):
d += cand[jj]*gamma.cdf(x, cand[jj+1], loc=0, scale=cand[jj+2]) # probability distribution
w += cand[jj]
d = np.diff(np.concatenate([[0],d]))
d = d/w
return [d,cand],sense,fit
def main2_4(tit,dim=1,noise=0,numE2=2):
"""
version >4
"""
x1 = [1,3,10,30,100]
if dim == 100: end = "2STDobs.txt"
else: end = "2obs.txt"
numS = 5
numE = 5
allFit = np.zeros([numS,numE])
allSk = np.zeros([numS,numE])
allKut = np.zeros([numS,numE])
allMe = np.zeros([numS,numE])
allFa = np.zeros([numS,numE])
allSt = np.zeros([numS,numE])
allSense = np.zeros([numS,numE])
propLowHigh = np.zeros([numS,numE])
MeanAll = np.zeros([numS,numE,100])
i = -1
for a in range(numS) :
i += 1
j = -1
for b in np.arange(numE2):
j += 1
#open("./dataDE/"+str(noise)+args+str(dim)+str(suddenness)+str(numChanges)+"obs.txt","w")
fnameF = "./dataDE/"+str(noise)+tit+str(dim)+str(b)+str(x1[a])+end
[cand,gammaP],sense,fit = extractInfoFromFile(fnameF,noise,dim)
Mean = np.nansum(np.linspace(0,10000,100)*cand)
Variance = np.nansum(cand*(np.linspace(0,10000,100) - Mean)**2)
allSense[i,j] = np.round(sense)
allFit[i,j] = fit
MeanAll[i,j,:] = cand
allMe[i,j] += Mean
allFa[i,j] += np.sqrt(Variance/Mean)
allSt[i,j] += np.sqrt(Variance)
propLowHigh[i,j] = np.sum(MeanAll[i,j,40:])*100.
if 0:#propLowHigh[i,j] == 0:
plt.plot(cand)
plt.title(str(a)+str(b))
print tit
plt.show()
np.savetxt("./dataDE/EN_Oct19_allFit_v4"+str(noise)+str(dim)+tit+".txt",allFit)
allFit[np.isnan(allFit)] = 0
with file('./dataDE/EN_Oct19_MeanAll_v4'+str(noise)+str(dim)+tit+'.txt', 'w') as outfile:
outfile.write('# Array shape: {0}\n'.format(MeanAll.shape))
for data_slice in MeanAll:
np.savetxt(outfile, data_slice, fmt='%-7.2e')
outfile.write('# New slice\n')
allMe[np.isnan(allMe)] = 0
allFa[np.isnan(allFa)] = 0
allSense[np.isnan(allSense)] = 0
propLowHigh[np.isnan(propLowHigh)] = 0
np.savetxt("./dataDE/EN_Oct19_allMe_v4"+str(noise)+str(dim)+tit+".txt",allMe)
np.savetxt("./dataDE/EN_Oct19_allFa_v4"+str(noise)+str(dim)+tit+".txt",allFa)
np.savetxt("./dataDE/EN_Oct19_allSense_v4"+str(noise)+str(dim)+tit+".txt",allSense)
np.savetxt("./dataDE/EN_Oct19_propLowHigh_v4"+str(noise)+str(dim)+tit+".txt",propLowHigh)
def printAB(tit,dim=1,noise=0):
try:
x1 = [1,3,10,30,100]
i = -1
if dim == 100: end = "STDobs.txt"
else: end = "obs.txt"
a1,a2,a3,a4 = 0,0,0,0
for a in range(5) :
i += 1
j = -1
for b in np.arange(2):
j += 1
#open("./dataDE/"+str(noise)+args+str(dim)+str(suddenness)+str(numChanges)+"obs.txt","w")
fnameF = "./dataDE/"+str(noise)+tit+str(dim)+str(b)+str(x1[a])+end
df = np.asarray(pd.read_csv(fnameF))
maxI = len(df[:,1])-1
cand = df[-1,3:-1]
fit = df[-1,1]
sense = df[-1,-1]
for line in range(10, len(df[:,1])):
fit = 0.8*fit + 0.2*df[line,1]
sense = 0.8*sense + 0.2*df[line,-1]
cand = 0.8*cand + 0.2*df[line,3:-1]
a1 += fit
a2 += sense
a3 += cand[1]
a4 += cand[2]
print "%f, %f, %f, %f" %(fit,sense,cand[1],cand[2])
except: pass
def plotImshow(numS,numE,MeanAll,allFit,allMe,allFa,allSense,propLowHigh,tit,limits=True):
i = -1
fig2 = plt.figure(2)
fig2.set_size_inches(2.*numS,2.*numE)
#10 Env for a in np.logspace(0.0,0.4,5)-1:#np.logspace(0.000,0.5,6)-1:
for a in range(numS) :
i += 1
j = -1
#10 Env for b in np.arange(5):#np.arange(0,7,1):#np.arange(1,10,1):
for b in np.arange(numE):
j += 1
x = numE*((numS-1) -i)+j+1
#print x, i, j
ax = fig2.add_subplot(numS,numE, x)
ax.plot(np.linspace(50,10000,100),MeanAll[i,j,:],linewidth=3,color='orange')
if i == 0:
if j == 0:
ax.set_ylabel('Frequency',fontsize=16)
ax.set_xlabel('Protein \nLevel',fontsize=16)
else:
ax.set_xlabel('Protein \nLevel',fontsize=16)
ax.axes.get_yaxis().set_visible(False)
elif j == 0:
ax.set_ylabel('Frequency',fontsize=16)
ax.axes.get_xaxis().set_visible(False)
else:
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
frame = plt.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
#ax.set_ylim((0,0.5))
ax.set_xlim((0,10050))
ax.set_xlim((-200,10000))
plt.subplots_adjust(hspace = .001, wspace=0.001)
tit = tit.replace('_','-')
plt.suptitle(tit,fontsize=24)
plt.savefig("./imagesDE/"+tit+'Hist.pdf', bbox_inches='tight' ,dpi=100)
#plt.show()
plt.clf()
def main2_4_Plot(tit,dim=1,noise=0,numE2=2):
"""
version >4
"""
x1 = [1,3,10,30,100]
numS = 5
numE = 5
#print np.shape(np.loadtxt("./data/EN_Oct19_meanAll_v4"+tit+".txt"))
MeanAll = np.loadtxt("./dataDE/EN_Oct19_MeanAll_v4"+str(noise)+str(dim)+tit+".txt").reshape((numS,numE,100))
allFit = np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(noise)+str(dim)+tit+".txt")
allMe = np.loadtxt("./dataDE/EN_Oct19_allMe_v4"+str(noise)+str(dim)+tit+".txt")
allFa = np.loadtxt("./dataDE/EN_Oct19_allFa_v4"+str(noise)+str(dim)+tit+".txt")
allSense = np.loadtxt("./dataDE/EN_Oct19_allSense_v4"+str(noise)+str(dim)+tit+".txt")
propLowHigh = np.loadtxt("./dataDE/EN_Oct19_propLowHigh_v4"+str(noise)+str(dim)+tit+".txt")
plotImshow(numS,numE2,MeanAll,allFit,allMe,allFa,allSense,propLowHigh,str(noise)+str(dim)+tit)
def main2_4_PlotTogether(tit,dim=1,noise=0,numE2=2):
"""
version >4
"""
x1 = [1,3,10,30,100]
numS = 5
numE = 2
fig2 = plt.figure(2)
fig2.set_size_inches(2.,2.*5)
jjj = 0
#10 Env for a in np.logspace(0.0,0.4,5)-1:#np.logspace(0.000,0.5,6)-1:
for d in dim:
i = -1
jjj += 1
MeanAll = np.loadtxt("./dataDE/EN_Oct19_MeanAll_v4"+str(noise)+str(d)+tit+".txt").reshape((numS,5,100))
for a in range(numS) :
i += 1
j = -1
#10 Env for b in np.arange(5):#np.arange(0,7,1):#np.arange(1,10,1):
for b in np.arange(numE):
j += 1
x = numE*((numS-1) -i)+j+1
#print x, i, j
ax = fig2.add_subplot(numS,numE, x)
ax.plot(np.linspace(50,10000,100),MeanAll[i,j,:],linewidth=3,color=cols[jjj])
if i == 0:
if j == 0:
ax.set_ylabel('Frequency',fontsize=16)
ax.set_xlabel('Protein \nLevel',fontsize=16)
else:
ax.set_xlabel('Protein \nLevel',fontsize=16)
ax.axes.get_yaxis().set_visible(False)
elif j == 0:
ax.set_ylabel('Frequency',fontsize=16)
ax.axes.get_xaxis().set_visible(False)
else:
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
frame = plt.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
ax.set_ylim((0,0.05))
ax.set_xlim((0,10050))
ax.set_xlim((-200,10000))
#plt.yscale('log')
plt.subplots_adjust(hspace = .001, wspace=0.001)
#plt.xscale('log')
tit = tit.replace('_','-')
plt.suptitle(tit,fontsize=24)
plt.savefig("./imagesDE/"+tit+str(noise)+'Hist.pdf', bbox_inches='tight' ,dpi=100)
#plt.show()
plt.clf()
def figStrategiesRegion(titles,cost=1,dim=1,noise=0):
numE = 5
matrixToPlot = np.zeros((len(titles),numE))
i = -1
for tit in titles:
i += 1
propLowHigh = np.loadtxt("./dataDE/EN_Oct19_propLowHigh_v4"+str(noise)+str(dim)+tit+".txt")
#matrixToPlot[i,:] = ((propLowHigh[:,cost]))#
matrixToPlot[i,:] = np.log10((propLowHigh[:,cost])/(100.-propLowHigh[:,cost]))
"""
x = []
for cosa in ['1','3','10','30','100']:
fnameF = "./dataDE/"+str(noise)+tit+str(dim)+str(-1*(cost-1))+cosa+"0obs.txt"
d = extractInfoFromFile(fnameF,noise,dim,candFlag = True)
print d
x.append(d[0]/d[3])
print tit, x
matrixToPlot[i,:] = np.asarray(x)
"""
print matrixToPlot
fig = plt.figure(1)
fig.set_size_inches(7./5*2.,2.)
ax = plt.subplot(1,1,1)
#plt.imshow(matrixToPlot,interpolation='none',cmap='Blues',vmin=-20,vmax=100)
if 1:#cost == 1:
plt.imshow(matrixToPlot,interpolation='none',cmap='Purples',vmin=-5,vmax=3)#,vmin=-3,vmax=2)#,vmin=-10,vmax=100)
else:
plt.imshow(matrixToPlot,interpolation='none',cmap='Purples',vmin=-3,vmax=3)#,vmin=-3,vmax=2)#,vmin=-10,vmax=100)
plt.xlabel('Environmental Transition Rate',fontsize=10)
plt.ylabel('Symmetry',fontsize=10)
plt.xticks(np.arange(5),['1','3','10','30','100'],rotation=90)
plt.yticks(np.arange(len(titles)),["%.0e" %(float(ratio(title)[ratio(title).find(":")+1:])/float(ratio(title)[:ratio(title).find(":")])) for title in titles])
plt.title('Proportion of cells in phenotype high',fontsize=10)
plt.colorbar(orientation='vertical')
customaxis(ax, c_left='none', c_bottom='none', c_right='none', c_top='none', lw=2, size=10, pad=8)
plt.savefig("./imagesDE/imshow"+title[0]+str(cost)+str(noise)+str(dim)+tit[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
#plt.show()
def figStrategiesPropTo(titles,cost=1,scale='log',dim=1,noise=0):
numE = 5
path = './data/'
fig = plt.figure(1)
fig.set_size_inches(3.3,2.5)
ax = plt.subplot(1,1,1)
minX = 100
i = -1
for tit in titles:
i += 1
propLowHigh = np.loadtxt("./dataDE/EN_Oct19_propLowHigh_v4"+str(noise)+str(dim)+tit+".txt")
ax.plot([1,3,10,30,100],propLowHigh[:,cost],marker='.',ms=10,label=ratio(tit),linewidth=1.5,color = cols[i])
t = propLowHigh[:,cost]
minX = min(min(t[t>0]),minX)
plt.xscale('log')
plt.xlabel('Environmental Transition Rate',fontsize=10)
plt.ylabel('Percentage of cells with \n high protein expression',fontsize=10)
plt.yscale(scale)
plt.ylim(minX/10.,120)
plt.xlim(0.9,120)
plt.legend(loc=4,frameon=0,fontsize=10,ncol=2)
customaxis(ax, c_left='k', c_bottom='k', c_right='none', c_top='none', lw=1, size=10, pad=8)
#plt.show()
plt.show()
plt.savefig("./imagesDE/lines"+tit[0]+str(cost)+str(noise)+str(dim)+tit[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
def figStrategiesAsymmetryo(titles,cost=1,scale='log',dim=1,noise=0):
numE = 5
path = './dataDE/'
fig = plt.figure(1)
fig.set_size_inches(0.8*3.3,0.8*2.5)
ax = fig.add_subplot(1,1,1)
x0 = []
x1 = []
x2 = []
x3 = []
x4 = []
y0 = []
y1 = []
y2 = []
y3 = []
y4 = []
i = -1
xaxis = []
xaxis2 = []
for tit in titles:
i += 1
propLowHigh = (np.loadtxt("./dataDE/EN_Oct19_propLowHigh_v4"+str(noise)+str(dim)+tit+".txt"))
print propLowHigh
x0.append(propLowHigh[0,0])
x1.append(propLowHigh[1,0])
x2.append(propLowHigh[2,0])
x3.append(propLowHigh[3,0])
x4.append(propLowHigh[4,0])
y0.append(propLowHigh[0,1])
y1.append(propLowHigh[1,1])
y2.append(propLowHigh[2,1])
y3.append(propLowHigh[3,1])
y4.append(propLowHigh[4,1])
env = ratio(tit)
if tit[0] == "0":
xaxis.append(env)
else:
first = env[:env.find(":")]
second = env[env.find(":")+1:]
print float(env[:env.find(":")])/float(env[env.find(":")+1:])
xaxis.append(float(env[env.find(":")+1:])/float(env[:env.find(":")]))
xaxis2.append(float(env[:env.find(":")]))
xaxis = np.asarray(xaxis)
xaxis2 = np.asarray(xaxis2)
"""
ax.plot(xaxis,x0,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 1')
ax.plot(xaxis,x1,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 3')
ax.plot(xaxis,x2,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 10')
ax.plot(xaxis,x3,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 30')
ax.plot(xaxis,x4,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 100')
ax.plot((xaxis),y0,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 1')
ax.plot((xaxis),y1,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 3')
ax.plot((xaxis),y2,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 10')
ax.plot((xaxis),y3,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 30')
ax.plot((xaxis),y4,color=(31./255, 119./255, 180./255),alpha=1.,marker='.',label='Sensing, Env 100')
"""
ax.plot(1./(xaxis2/1),x0,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 1')
ax.plot(1./(xaxis2/3),x1,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 3')
ax.plot(1./(xaxis2/10),x2,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 10')
ax.plot(1./(xaxis2/30),x3,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 30')
ax.plot(1./(xaxis2/100),x4,color=(214./255, 39./255, 40./255),marker='.',label='No Sensing, Env 100')
ax.plot((1./(xaxis2/1)),y0,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 1')
ax.plot((1./(xaxis2/3)),y1,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 3')
ax.plot((1./(xaxis2/10)),y2,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 10')
ax.plot((1./(xaxis2/30)),y3,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 30')
ax.plot((1./(xaxis2/100)),y4,color=(31./255, 119./255, 180./255),alpha=1.,marker='.',label='Sensing, Env 100')
print y0
"""
for i,j,z in zip(xaxis,y0,np.ones(len(y0))):
print "%d %d %d" %(i*1000,j*1000,z)
for i,j,z in zip(xaxis,y1,2+np.ones(len(y0))):
print "%d %d %d" %(i*1000,j*1000,z)
for i,j,z in zip(xaxis,y2,9+np.ones(len(y0)))[:-1]:
print "%d %d %d" %(i*1000,j*1000,z)
for i,j,z in zip(xaxis,y3,29+np.ones(len(y0)))[:-1]:
print "%d %d %d" %(i*1000,j*1000,z)
"""
ax.set_xlabel('Ratio High/Low Stress',fontsize=10)
ax.set_xlabel('Time High Stress',fontsize=10)
ax.set_ylabel('Percentage of cells with \n high protein expression',fontsize=10)
ax.set_yscale(scale)
ax.set_xscale(scale)
#plt.ylim(1E-3,120)
#plt.xlim(0. ax2.plot(1./(xaxis*1),y0,color='blue',alpha=1,marker='.',label='Sensing, Env 1')
#ax.legend(loc=4,frameon=0,fontsize=10,ncol=2)
customaxis(ax, c_left='k', c_bottom='k', c_right='none', c_top='none', lw=1, size=10, pad=8)
#plt.show()
plt.savefig("./imagesDE/linesAsymmetry"+tit[0]+str(cost)+str(noise)+str(dim)+tit[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
def figStrategiesEnvironmentEffect(titles,noises = [0],cost= 0,dims = [2,1]):
numE = 5
path = './dataDE/'
fig = plt.figure(1)
fig.set_size_inches(3.3,2.5)
ax = fig.add_subplot(1,1,1)
if cost == 0:
titles = ["2Env_NN_PEEHSS"]
par = 2
else:
titles = ["2Env_NN_PEVHSS"]
par = 4
i = -1
for noise in noises:
for tit in titles:
i += 1
allFit = 100.*(np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(noise)+str(dims[0])+tit+".txt")-np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(noise)+str(dims[1])+tit+".txt"))#/np.loadtxt("./data/EN_Oct19_allFit_v4"+tit+".txt")
propLowHigh = np.loadtxt("./dataDE/EN_Oct19_propLowHigh_v4"+str(noise)+str(dims[0])+tit+".txt")[par,cost]
allFit = allFit[par,cost]
ax.plot(propLowHigh.reshape(-1),allFit.reshape(-1),marker='.',ms=15,label=ratio(tit)+"N"+str(noise),color=cols[i])
plt.ylim(-.1,2)
plt.xlim(-5,105)
ax.set_xlabel('Proportion of cells in High Stress')
ax.set_ylabel(r'$\Delta Fitness$')
plt.legend(loc=1,frameon=0,fontsize=10,ncol=2)
customaxis(ax, c_left='k', c_bottom='k', c_right='none', c_top='none', lw=1, size=10, pad=8)
#savefig('tempz.pdf', bbox_inches='tight' ,dpi=100)
plt.savefig("./imagesDE/deltaFitness"+tit[0]+str(cost)+str(noise)+tit[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
def figStrategiesDFitnessSensing(titles,cost= [1,0],dims = 100):
numE = 5
path = './dataDE/'
fig = plt.figure(1)
fig.set_size_inches(3.3,2.5)
ax = fig.add_subplot(1,1,1)
i = -1
for tit in titles:
i += 1
allFit = 100.*(np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(0)+str(dims)+tit+".txt"))
propLowHigh = np.loadtxt("./dataDE/EN_Oct19_propLowHigh_v4"+str(0)+str(dims)+tit+".txt")[:,1]
allFit = allFit[:,1]-allFit[:,0]
ax.plot(propLowHigh.reshape(-1),allFit.reshape(-1),marker='.',ms=15,label=ratio(tit)+"N"+str(0),color=cols[i])
#plt.ylim(-.1,2)
plt.xlim(-5,105)
ax.set_xlabel('Proportion of cells with \n high protein expression')
ax.set_ylabel(r'$\Delta Fitness$')
plt.legend(loc=1,frameon=0,fontsize=10,ncol=2)
customaxis(ax, c_left='k', c_bottom='k', c_right='none', c_top='none', lw=1, size=10, pad=8)
#savefig('tempz.pdf', bbox_inches='tight' ,dpi=100)
plt.savefig("./imagesDE/deltaFitnessSensing"+tit[0]+str(cost)+str(0)+tit[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
numE = 5
path = './dataDE/'
fig = plt.figure(1)
fig.set_size_inches(0.8*4,0.8*2.5)
ax = fig.add_subplot(1,1,1)
y0 = []
y1 = []
y2 = []
y3 = []
y4 = []
i = -1
xaxis = []
xaxis2 = []
for tit in titles:
i += 1
propLowHigh = 100.*(np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(0)+str(dims)+tit+".txt"))
propLowHigh[:,1] = propLowHigh[:,1]-propLowHigh[:,0]
y0.append(propLowHigh[0,1])
y1.append(propLowHigh[1,1])
y2.append(propLowHigh[2,1])
y3.append(propLowHigh[3,1])
y4.append(propLowHigh[4,1])
env = ratio(tit)
if tit[0] == "0":
xaxis.append(env)
else:
first = env[:env.find(":")]
second = env[env.find(":")+1:]
print float(env[:env.find(":")])/float(env[env.find(":")+1:])
xaxis.append(float(env[env.find(":")+1:])/float(env[:env.find(":")]))
xaxis2.append(float(env[:env.find(":")]))
xaxis = np.asarray(xaxis)
xaxis2 = np.asarray(xaxis2)
ax.plot(xaxis,y0,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 1')
ax.plot(xaxis,y1,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 3')
ax.plot(xaxis,y2,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 10')
ax.plot(xaxis,y3,color=(31./255, 119./255, 180./255),alpha=1,marker='.',label='Sensing, Env 30')
ax.plot(xaxis,y4,color=(31./255, 119./255, 180./255),alpha=1.,marker='.',label='Sensing, Env 100')
plt.ylim([0,12])
ax.set_xlabel('Ratio High/Low Stress',fontsize=12)
ax.set_ylabel(r'$\Delta Fitness$',fontsize=12)
ax.set_yscale('linear')
ax.set_xscale('log')
customaxis(ax, c_left='k', c_bottom='k', c_right='none', c_top='none', lw=1, size=10, pad=8)
#plt.show()
plt.savefig("./imagesDE/linesAsymmetrxxxy"+tit[0]+str(cost)+str(0)+str(100)+tit[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
def figNoiseFitnessDifference(names,noises,cost=1,dims=[2,1]):
numE = 5
path = './dataDE/'
fig = plt.figure(1)
fig.set_size_inches(3.3,2.5)
ax = fig.add_subplot(1,1,1)
colsInd = -1
for title in names:
colsInd += 1
i = -1
structureFitness = np.zeros((len(noises),5))
for noise in noises:
i += 1
print np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(noise)+str(dims[0])+title+".txt")
print np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(noise)+str(dims[1])+title+".txt")
allFit = 100.*(np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(noise)+str(dims[0])+title+".txt")-
np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(noise)+str(dims[1])+title+".txt"))#/np.loadtxt("./data/EN_Oct19_allFit_v4"+tit+".txt")
structureFitness[i,:] = allFit[:,cost].reshape(-1)
plt.title(title.replace("_","-"))
plt.savefig("./imagesDE/tempTogether3Env"+title.replace("_","-")+str(cost)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
for j in range(5):
if j == 0:
ax.plot(range(len(noises)),structureFitness[:,j],linewidth=2,color=cols[colsInd],label=ratio(title))
else:
ax.plot(range(len(noises)),structureFitness[:,j],linewidth=2,color=cols[colsInd])
ax.yaxis.set_ticks_position('both')
ax.vlines(range(len(noises)),0,2,linewidth=2,alpha=0.5)
plt.ylim((-0.25,2.1))
plt.xticks(range(len(noises)),noises)
ax.set_xlabel('Noise level')
ax.set_ylabel(r'$\Delta Fitness$')
customaxis(ax, c_left='k', c_bottom='none', c_right='none', c_top='none', lw=2, size=10, pad=8)
#plt.ylim(0,3)
plt.legend(loc=1,frameon=0,fontsize=10,ncol=2)
#savefig('tempw.pdf', bbox_inches='tight' ,dpi=100)
plt.savefig("./imagesDE/deltaFitnessNoise"+title[0]+str(cost)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
def figNoiseFitnessDifferenceOnlyOptimum(noises,cost=0,dims=[2,1]):
numE = 5
path = './dataDE/'
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
fig = plt.figure(1)
fig.set_size_inches(3.5,2.5)
ax = fig.add_subplot(1,1,1)
noS = []
S = []
noS.append((10,"2Env_NN_PEAHSS"))
noS.append((10,"2Env_NN_PEEHSS"))
noS.append((10,"2Env_NN_PEVHSS"))
noS.append((10,"2Env_NN_PEHHSS"))
noS.append((10,"2Env_NN_PELHSS"))
S.append((100,"2Env_NN_PEAHSS"))
S.append((100,"2Env_NN_PEEHSS"))
S.append((100,"2Env_NN_PEVHSS"))
S.append((10,"2Env_NN_PEHHSS"))
S.append((1,"2Env_NN_PELHSS"))
allP = [noS,S]
param = allP[cost]
colsInd = -1
for freq,title in param:
colsInd += 1
i = -1
structureFitness = np.zeros((len(noises),6))
structureFitness1 = np.zeros((len(noises),1))
for noise in noises:
i += 1
fit0 = []
plt.subplot(10,1,i+1)
for iii in range(0,1):#[3,5]:
fnameF = "./dataDE/"+str(noise)+title+str(dims[0])+str(cost)+str(freq)+str(iii)+"obs.txt"
[cand,gammaP1],sense,fit = extractInfoFromFile(fnameF,noise,dims[0])
plt.plot(cand)
fit0.append(fit)
fit1 = []
for iii in range(0,1):#:
fnameF = "./dataDE/"+str(noise)+title+str(dims[1])+str(cost)+str(freq)+str(iii)+"obs.txt"
[cand,gammaP2],sense,fit = extractInfoFromFile(fnameF,noise,dims[1])
plt.plot(cand)
fit1.append(fit)
#print "2g: ", gammaP1, "1g: ", gammaP2
fit0 = np.asarray(fit0)
fit1 = np.asarray(fit1)
print cost, title[-5:-2], noise, fit0-fit1
structureFitness[i,:] = 100.*(fit0-fit1)
structureFitness1[i] = 100.*(np.median(fit0)-np.median(fit1))
#structureFitness[i] = 100.*np.max(fit0)
#structureFitness1[i] = 100.*np.max(fit1)
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
##plt.plot(np.repeat(noises,1),structureFitness.reshape(-1))
##plt.plot(np.repeat(noises,1),structureFitness1.reshape(-1))
#ax.plot(np.repeat(noises,6),structureFitness.reshape(-1),ms=10,marker='.',color=cols[colsInd],linewidth=1.5)
##ax.plot(np.repeat(noises,1),structureFitness1.reshape(-1),color=cols[colsInd]
##a = lowess(structureFitness1.reshape(-1),np.repeat(noises,1),frac = 0.6,delta=0.0)
##ax.plot(np.repeat(noises,6),structureFitness.reshape(-1),'.',color=cols[colsInd])
##ax.plot(a[:,0],a[:,1],linewidth=2,color=cols[colsInd])
plt.title(title.replace("_","-"))
plt.savefig("./imagesDE/tempTogetherNoise"+title.replace("_","-")+str(cost)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
ax.yaxis.set_ticks_position('both')
#ax.vlines(range(len(noises)),0,2,linewidth=2,alpha=0.5)
plt.ylim((-0.1,2.6))
#plt.xticks(range(len(noises)),noises)
ax.set_xlabel('Noise level')
ax.set_ylabel('Benefit of multistability')
customaxis(ax, c_left='k', c_bottom='none', c_right='none', c_top='none', lw=2, size=10, pad=8)
#plt.ylim(0,3)
#plt.legend(loc=1,frameon=0,fontsize=10,ncol=2)
plt.savefig("./imagesDE/deltaF2itnessNoiseOnlOptimum"+title[0]+str(cost)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
def figNoiseFitnessDifferenceOnlyOptimum3Env(noises,cost=0,dims=[2,1]):
numE = 5
path = './dataDE/'
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
fig = plt.figure(1)
fig.set_size_inches(3.5,2.5)
ax = fig.add_subplot(1,1,1)
noS = []
S = []
noS.append((10,"3Env_0102_PEA"))
noS.append((10,"3Env_0102_PEE"))
noS.append((10,"3Env_0102_PEV"))
noS.append((10,"3Env_0102_PEH"))
noS.append((10,"3Env_0102_PEL"))
S.append((100,"3Env_0102_PEA"))
S.append((100,"3Env_0102_PEE"))
S.append((100,"3Env_0102_PEV"))
S.append((10,"3Env_0102_PEH"))
S.append((1,"3Env_0102_PEL"))
allP = [noS,S]
param = allP[cost]
colsInd = -1
for freq,title in param:
colsInd += 1
i = -1
structureFitness = np.zeros((len(["A","E","V","H","L"]),1))
structureFitness1 = np.zeros((len(["A","E","V","H","L"]),1))
for ending in ["A","E","V","H","L"][::-1]:
i += 1
plt.subplot(5,1,i+1)
fit0 = []
for iii in [0]:#"["",1,2,3,4,5]:
fnameF = "./dataDE/"+str(0)+title+str(ending)+"HSS"+str(dims[0])+str(cost)+str(freq)+str(iii)+"obs.txt"
[cand,gammaP],sense,fit = extractInfoFromFile(fnameF,0,dims[0])
plt.plot(cand)
fit0.append(fit)
fit1 = []
for iii in [0]:#["",1,2,3,4,5]:
fnameF = "./dataDE/"+str(0)+title+str(ending)+"HSS"+str(dims[1])+str(cost)+str(freq)+str(iii)+"obs.txt"
[cand,gammaP],sense,fit = extractInfoFromFile(fnameF,0,dims[1])
plt.plot(cand)
fit1.append(fit)
fit0 = np.asarray(fit0)
fit1 = np.asarray(fit1)
print fit0,fit1
structureFitness[i,:] = 100.*(fit0-fit1)
#structureFitness1[i,:] = 100.*
plt.title(title.replace("_","-"))
plt.savefig("./imagesDE/tempTogether3Env"+title.replace("_","-")+str(cost)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
structure = [4,3,2,1,0][::-1]
#plt.plot(np.repeat(len(noises),1),structureFitness.reshape(-1))
#plt.plot(np.repeat(len(noises),1),structureFitness1.reshape(-1))
#a = lowess(structureFitness.reshape(-1),np.repeat(structure,6),frac = 0.66,delta=0.0)
ax.plot(np.repeat(structure,1),structureFitness.reshape(-1),ms=10,marker='.',color=cols[colsInd],linewidth=1.5)
#ax.plot(a[:,0],a[:,1],linewidth=2,color=cols[colsInd])
ax.yaxis.set_ticks_position('both')
#ax.vlines(range(len(noises)),0,2,linewidth=2,alpha=0.5)
plt.ylim((-0.1,2.6))
plt.xticks(range(len(structure)),structure)
ax.set_xlabel('Time in the Intermediate Environment')
ax.set_ylabel('Benefit of Multistability')
customaxis(ax, c_left='k', c_bottom='none', c_right='none', c_top='none', lw=2, size=10, pad=8)
#plt.ylim(0,3)
#plt.legend(loc=1,frameon=0,fontsize=10,ncol=2)
plt.savefig("./imagesDE/deltaF3ENVitnessNoiseOnlOptimum"+title[0]+str(cost)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
def fig3Strategies(names,cost=1,dims=[2,1]):
numE = 5
path = './dataDE/'
fig = plt.figure(1)
fig.set_size_inches(3.3,2.5)
ax = fig.add_subplot(1,1,1)
colsInd = -1
for n in ["A","E","V","H","L"][::-1]:
n2 = [_ for _ in names if "3Env_Noise_PE"+n in _][::-1]
colsInd += 1
i = -1
structureFitness = np.zeros((len(["A","E","V","H","L"]),5))
for title in n2:
i += 1
allFit = 100.*(np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(0)+str(dims[0])+title+".txt")-
np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(0)+str(dims[1])+title+".txt"))#/np.loadtxt("./data/EN_Oct19_allFit_v4"+tit+".txt")
structureFitness[i,:] = allFit[:,cost].reshape(-1)
if n == "A": lab= 10000
elif n == "E": lab= 1000
elif n == "V": lab= 100
elif n == "H": lab= 10
elif n == "L": lab= 1
for j in range(5):
if j == 0:
ax.plot(range(5),structureFitness[:,j],linewidth=2,color=cols[len(["A","E","V","H","L"])-colsInd],label=str(lab))
else:
ax.plot(range(5),structureFitness[:,j],linewidth=2,color=cols[len(["A","E","V","H","L"])-colsInd])
ax.yaxis.set_ticks_position('both')
ax.vlines(range(5),0,2,linewidth=2,alpha=0.5)
plt.xlim((0,3.1))
plt.ylim((-0.25,2.1))
plt.xticks(range(5),["1","10","100","1000","10000"])
ax.set_xlabel('Second environment')
ax.set_ylabel(r'$\Delta Fitness$')
customaxis(ax, c_left='k', c_bottom='none', c_right='none', c_top='none', lw=2, size=10, pad=8)
#plt.ylim(0,3)
plt.legend(loc=1,frameon=0,fontsize=10,ncol=2)
plt.savefig("./imagesDE/deltaFitnessSecondEnvir"+title[0]+str(cost)+str(0)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
def figStrategiesRegionDeltaFitness(titles,cost=1,dims = [2,1]):
numE = 5
matrixToPlot = np.zeros((len(titles),numE))
i = -1
for title in titles:
i += 1
allFit = 100.*(np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(0)+str(dims[0])+title+".txt")-
np.loadtxt("./dataDE/EN_Oct19_allFit_v4"+str(0)+str(dims[1])+title+".txt"))#/np.loadtxt("./data/EN_Oct19_allFit_v4"+tit+".txt")
matrixToPlot[i,:] = (allFit[:,cost])
print matrixToPlot
fig = plt.figure(1)
fig.set_size_inches(len(titles)/5.*2.,2.*2.)
ax = fig.add_subplot(1,1,1)
plt.imshow(matrixToPlot,interpolation='none',cmap='YlGn',vmin=-0.,vmax=1)
#plt.imshow(matrixToPlot,interpolation='none',cmap='BuPu',vmin=-0.2,vmax=1.5)
plt.xlabel('Environmental Transition Rate',fontsize=10)
plt.ylabel('Symmetry',fontsize=10)
plt.xticks(np.arange(5),['1','3','10','30','100'],rotation=90)
print [float(ratio(title)[ratio(title).find(":")+1:])/float(ratio(title)[:ratio(title).find(":")]) for title in titles]
plt.yticks(np.arange(len(titles)),[float(ratio(title)[ratio(title).find(":")+1:])/float(ratio(title)[:ratio(title).find(":")]) for title in titles])
plt.title('Proportion of cells in phenotype high',fontsize=10)
plt.colorbar(orientation='vertical')
customaxis(ax, c_left='none', c_bottom='none', c_right='none', c_top='none', lw=2, size=10, pad=8)
plt.savefig("./imagesDE/imshow3DeltaFit"+title[0]+str(cost)+str(0)+str(21)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.clf()
#plt.show()
def figGamma(names,dims=[2,1]):
for cost in [0,1]:
numE = 5
path = './dataDE/'
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
fig = plt.figure(1)
fig.set_size_inches(3.5,2.5)
ax = fig.add_subplot(1,1,1)
noS = []
S = []
fit0 = []
fit1 = []
colsInd = -1
for title in names:
freq = 100
colsInd += 1
i = -1
for ending in [""][::-1]:
i += 1
for iii in [0]:#"["",1,2,3,4,5]:
fnameF = "./dataDE/"+str(0)+title+str(ending)+str(dims[0])+str(cost)+str(freq)+"obs.txt"
[cand,gammaP],sense,fit = extractInfoFromFile(fnameF,0,dims[0])
fit0.append(fit)
for iii in [0]:#["",1,2,3,4,5]:
fnameF = "./dataDE/"+str(0)+title+str(ending)+str(dims[1])+str(cost)+str(freq)+"obs.txt"
[cand,gammaP],sense,fit = extractInfoFromFile(fnameF,0,dims[1])
fit1.append(fit)
fit0 = np.asarray(fit0)
fit1 = np.asarray(fit1)
arrayDiffFitness = 100.*(fit0-fit1)
st = np.array([0.001,0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
st = st/1.5*11/10
print len(st),len(arrayDiffFitness)
#plt.plot(np.repeat(len(noises),1),structureFitness.reshape(-1))
#plt.plot(np.repeat(len(noises),1),structureFitness1.reshape(-1))
#a = lowess(structureFitness.reshape(-1),np.repeat(structure,6),frac = 0.66,delta=0.0)
ax.plot(st,arrayDiffFitness.reshape(-1),ms=10,marker='.',color=cols[cost],linewidth=1.5,label=str(cost))
#ax.plot(a[:,0],a[:,1],linewidth=2,color=cols[colsInd])
ax.yaxis.set_ticks_position('both')
#ax.vlines(range(len(noises)),0,2,linewidth=2,alpha=0.5)
plt.ylim((-0.1,1))
plt.xscale('log')
plt.xlim((6E-4,1.))
#plt.xticks(range(len(structure)),structure)
ax.set_xlabel('Mean stress levels')
ax.set_ylabel('Benefit of bistability')
customaxis(ax, c_left='k', c_bottom='none', c_right='none', c_top='none', lw=2, size=10, pad=8)
#plt.ylim(0,3)
plt.legend(loc=1,frameon=0,fontsize=10,ncol=2)
plt.savefig("./imagesDE/deltaFGamma"+title[0]+str(cost)+title[-2:]+'.pdf', bbox_inches='tight' ,dpi=100)
plt.show()
plt.clf()
def extractOptimum1Env():
from scipy.stats import gamma
gammaParameters = np.zeros((2,101))
for stress in range(0,101):
if stress < 10:
s = "0"+str(stress)
else:
s = str(stress)
title = "1Env_"+s+"SS"
fnameF = "./dataDE/"+str(0)+title+"1010obs.txt"
[d,cand],sense,fit = extractInfoFromFile(fnameF,0,1)
x = np.linspace(0.01, 10000., num=100) # values for x-axis
d = np.zeros(100)
w = 0
for jj in range(0,len(cand)-1,3):
d += cand[jj]*gamma.pdf(x, cand[jj+1], loc=0, scale=cand[jj+2]) # probability distribution
w += cand[jj]
d /= np.sum(d)
plt.plot(d)
gammaParameters[:,stress] = cand[1:]
print title, cand, fit
plt.show()
np.savetxt("gamma1EnvOptimum.txt",gammaParameters)
def main():
"""
## Fig. 2
names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
possibleFactors = []
for numChanges in [1,3,10,30,100]:
for sudden in range(2):
for dim in [1,2]:
for noise in [0]:
main2_4(name,dim=dim,noise=noise,numE2=1)
#main2_4_Plot(name,dim=dim,noise=noise,numE2=2)
figStrategiesRegion(names,cost,dim,noise=0) #imshow
figStrategiesDFitnessSensing(names,cost=[1,0],dims=2) #lognormal difference as propto in xcaxis
"""
"""
## Fig. 3
names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
possibleFactors = []
for numChanges in [1,3,10,30,100]:
for sudden in range(2):
for dim in [1,2]:
for noise in [0]:
main2_4(name,dim=dim,noise=noise,numE2=1)
#main2_4_Plot(name,dim=dim,noise=noise,numE2=2)
figStrategiesRegionDeltaFitness(names,cost,dims=[2,1])
"""
"""
## Fig. 4
for cost in [0,1]:
## To plot distributions uncomment: subplot(...) and plot(cand) in the following files.
## For 3 environments, difference in fitness as a function of the second environment. Only for optimum
figNoiseFitnessDifferenceOnlyOptimum3Env([],cost=cost,dims=[2,1])
## Noise only for optimum
figNoiseFitnessDifferenceOnlyOptimum([0,0.25, 0.5,0.75,1,1.5,2,3,4,5],cost=cost)
"""
"""
## Fig. S2
names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
possibleFactors = []
for numChanges in [1,3,10,30,100]:
for sudden in range(2):
for dim in [100]:
for noise in [0]:
main2_4(name,dim=dim,noise=noise,numE2=1)
#main2_4_Plot(name,dim=dim,noise=noise,numE2=2)
figStrategiesRegion(names,cost,dim,noise=0) #imshow
"""
"""
## Fig. S3
names = ["2Env_NN_PEAHWS","2Env_NN_PEEHWS","2Env_NN_PEVHWS","2Env_NN_PEHHWS","2Env_NN_PELHWS"]
possibleFactors = []
for numChanges in [1,3,10,30,100]:
for sudden in range(2):
for dim in [1,2]:
for noise in [0]:
main2_4(name,dim=dim,noise=noise,numE2=1)
#main2_4_Plot(name,dim=dim,noise=noise,numE2=2)
figStrategiesRegion(names,cost,dim,noise=0) #imshow
figStrategiesDFitnessSensing(names,cost=[1,0],dims=2) #lognormal difference as propto in xcaxis
"""
"""
## Fig. S3 insets
names = ["2Env_NN_PEIHWS","2Env_NN_PEtHWS","2Env_NN_PEjHWS","2Env_NN_PEkHWS","2Env_NN_PEsHWS","2Env_NN_PEmHWS", "2Env_NN_PEnHWS"]
possibleFactors = []
for numChanges in [1,3,10,30,100]:
for sudden in range(2):
for dim in [1,2]:
for noise in [0]:
main2_4(name,dim=dim,noise=noise,numE2=1)
#main2_4_Plot(name,dim=dim,noise=noise,numE2=2)
figStrategiesRegion(names,cost,dim,noise=0) #imshow
"""
"""
## Fig. S5
names = ["3Env_0102_PEHAHSS","3Env_0102_PEHEHSS","3Env_0102_PEHVHSS","3Env_0102_PEHHHSS","3Env_0102_PEHLHSS"]
figGamma(names,dims=[2,1])
"""
"""
## OTHER STUFF.
## Extract data into something easily studied
names = ["3Env_Noise_PEAAHSS","3Env_Noise_PEAEHSS","3Env_Noise_PEAVHSS","3Env_Noise_PEAHHSS","3Env_Noise_PEALHSS",
"3Env_Noise_PEEAHSS","3Env_Noise_PEEEHSS","3Env_Noise_PEEVHSS","3Env_Noise_PEEHHSS","3Env_Noise_PEELHSS",
"3Env_Noise_PEVAHSS","3Env_Noise_PEVEHSS","3Env_Noise_PEVVHSS","3Env_Noise_PEVHHSS","3Env_Noise_PEVLHSS",
"3Env_Noise_PEHAHSS","3Env_Noise_PEHEHSS","3Env_Noise_PEHVHSS","3Env_Noise_PEHHHSS","3Env_Noise_PEHLHSS",
"3Env_Noise_PELAHSS","3Env_Noise_PELEHSS","3Env_Noise_PELVHSS","3Env_Noise_PELHHSS","3Env_Noise_PELLHSS"]
names = ["3Env_0102_PEAAHSS","3Env_0102_PEAEHSS","3Env_0102_PEAVHSS","3Env_0102_PEAHHSS","3Env_0102_PEALHSS",
"3Env_0102_PEEAHSS","3Env_0102_PEEEHSS","3Env_0102_PEEVHSS","3Env_0102_PEEHHSS","3Env_0102_PEELHSS",
"3Env_0102_PEVAHSS","3Env_0102_PEVEHSS","3Env_0102_PEVVHSS","3Env_0102_PEVHHSS","3Env_0102_PEVLHSS",
"3Env_0102_PEHAHSS","3Env_0102_PEHEHSS","3Env_0102_PEHVHSS","3Env_0102_PEHHHSS","3Env_0102_PEHLHSS",
"3Env_0102_PELAHSS","3Env_0102_PELEHSS","3Env_0102_PELVHSS","3Env_0102_PELHHSS","3Env_0102_PELLHSS"]
#names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
#names = sorted(["0Env_00010SS","0Env_0010SS","0Env_010SS","0Env_030SS","0Env_050SS","0Env_070SS","0Env_090SS","0Env_100SS","0Env_020SS","0Env_040SS","0Env_060SS","0Env_080SS"])
#names = ["2Env_NN_PEAHWS","2Env_NN_PEEHWS","2Env_NN_PEVHWS","2Env_NN_PEHHWS","2Env_NN_PELHWS"]
#names = ["2Env_NN_PEIHWS","2Env_NN_PEtHWS","2Env_NN_PEjHWS","2Env_NN_PEkHWS","2Env_NN_PEsHWS","2Env_NN_PEmHWS", "2Env_NN_PEnHWS"]
#names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
#names = ["2Env_NN_PEVHSS"]
#names = ["2Env_NN_PEAHWS","2Env_NN_PEEHWS","2Env_NN_PEVHWS","2Env_NN_PEHHWS","2Env_NN_PELHWS"]
"""
names = ["3Env_0102_PEHLHSS"]
names = []
for end in ["A","E","V","H","L"]:
names.append("3Env_0102_PEE"+end+"HSS")
names.append("3Env_0102_PEV"+end+"HSS")
"""
#names = ["2Env_NN_PEAHSS","2Env_NN_PEEHSS","2Env_NN_PEVHSS","2Env_NN_PEHHSS","2Env_NN_PELHSS"]
names = ["2Env_NN_PEIHWS","2Env_NN_PEtHWS","2Env_NN_PEjHWS","2Env_NN_PEkHWS","2Env_NN_PEsHWS","2Env_NN_PEmHWS", "2Env_NN_PEnHWS"]
#names = ["2Env_NN_PEAHWS","2Env_NN_PEEHWS","2Env_NN_PEVHWS","2Env_NN_PEHHWS","2Env_NN_PELHWS"]
#extractOptimum1Env()
#names = ["2Env_NN_PEIHWS","2Env_NN_PEtHWS","2Env_NN_PEjHWS","2Env_NN_PEkHWS","2Env_NN_PEsHWS","2Env_NN_PEmHWS", "2Env_NN_PEnHWS"]
#names = ["2Env_NN_PEAHWS","2Env_NN_PEEHWS","2Env_NN_PEVHWS","2Env_NN_PEHHWS","2Env_NN_PELHWS"]
## Extract data into something easily studied
dims = [2,100]
for name in names:
for noise in [0]:#,0.25, 0.5,0.75,1,1.5,2,3,4,5]:
for dim in dims:
pass
#main2_4(name,dim=dim,noise=noise,numE2=1)
#main2_4_Plot(name,dim=dim,noise=noise,numE2=2)
# Superpose 1 and 2 yamma
#main2_4_PlotTogether(name,dim=[1,2],noise=noise,numE2=5)
for name in names:
pass
#printAB(name)
for cost in [0]:#,2,3,4]:
for dim in dims:
pass
## imshow with prop of cells in high. asymmetry vs frequency
#figStrategiesRegion(names,cost,dim,noise=0) #imshow
## transition rate vs prop cells in high
#figStrategiesPropTo(names,cost,dim=dim,noise=0) #lines
for dim in [100]:
pass
## asymmetry vs prop cells in high. Only for 2 and 0 env
#figStrategiesAsymmetryo(names,'','log',dim=dim,noise=0)
## Difference in fitness sensing vs no sensing as a function of prop cells in high
#figStrategiesDFitnessSensing(names,cost=[1,0],dims=2) #lognormal difference as propto in xcaxis
for cost in [1]:
pass
## Difference in fitness as a function of prop cells in high
#figStrategiesEnvironmentEffect(names,noises = [0,0.25, 0.5,0.75,1,1.5,2,3,4,5],cost=cost,dims=[2,1]) #lognormal difference as propto in xcaxis
## Difference in fitness as a function fo the nois ein the environemnt. Only for two environemtns
#figNoiseFitnessDifference(names,[0,1.,5],cost=cost,dims=[2,1])
#figNoiseFitnessDifference(names,[0,0.25, 0.5,0.75,1,1.5,2,3,4,5],cost=cost,dims=[2,1])
## For 3 environments, difference in fitness as a function of the second environment. Only for optimum
#figNoiseFitnessDifferenceOnlyOptimum3Env(names,cost=cost,dims=[2,1])
# Noise only for optimum
#figNoiseFitnessDifferenceOnlyOptimum([0,0.25, 0.5,0.75,1,1.5,2,3,4,5],cost=cost)
#figGamma(names,dims=[2,1])
## For 3 environments, difference in fitness as a function of the second environment
for cost in [0,1]:
pass
#fig3Strategies(names,cost=cost,dims=[2,1]) #lognor mal difference as propto in xcaxis
## For 2/3 environments. Difference in fitness imshow
for cost in [0,1]:
pass
## imshow with prop of cells in high. asymmetry vs frequency
#figStrategiesRegionDeltaFitness(names,cost,dims=[2,1])
"""
if __name__ == "__main__": main()
|
from sys import stdin
def main():
adapters = [int(line) for line in stdin]
adapters.sort()
nums = [0] + adapters + [adapters[-1] + 3]
diffs = [nums[i] - nums[i - 1] for i in range(1, len(nums))]
print(diffs.count(1) * diffs.count(3))
if __name__ == "__main__":
main()
|
##
# Nathália Harumi Kuromiya - RA 175188
# Assignment 1
#
# This file is responsible for executing the problem to get statistics and
# compare searchers.
##
from modeling import *
p1 = robot((10, 10), (50, 50))
p2 = robot((50, 50), (10, 10))
p3 = robot((10, 50), (50, 10))
p4 = robot((50, 10), (10, 50))
p5 = robot((10,10), (10, 50))
p6 = robot((50, 10), (50, 50))
problems = [p1, p2, p3, p4, p5, p6]
searchers = [breadth_first_graph_search, depth_first_graph_search, astar_search]
headers = ['search algorithm', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6']
for p in problems:
print(p, "\n")
result_1 = breadth_first_graph_search(p)
print(" BFS path cost: ", str(result_1.path_cost), "\n")
result_2 = depth_first_graph_search(p)
print(" DFS path cost: ", str(result_2.path_cost), "\n")
result_3 = astar_search(p)
print(" A* path cost: ", str(result_3.path_cost) + "\n\n")
compare_searchers(problems, headers, searchers)
|
from python.util import reverseComplement
class Node:
def __init__(self, value):
self.value = value
self.path = None
self.edges = set()
def __add__(self, other):
self.edges.add(other)
return self
def __str__(self):
return self.value
def __eq__(self, y):
return self.value == y.value
def __hash__(self):
return hash(self.value)
def build_deBruijin_graph(un, k):
global data_len
result = {}
for r in un:
for i in range(0, data_len - k):
start = r[i:i + k]
startNode = result.get(start)
if not startNode:
startNode = Node(start)
result[start] = startNode
end = r[i + 1:i + k + 1]
endNode = result.get(end)
if not endNode:
endNode = Node(end)
result[end] = endNode
startNode += endNode
result[start] = startNode
result[end] = endNode
return result
def first(l):
for x in l:
return x
return None
def traverseGraph(start_node, edges, visited, k):
global data_len
node = edges[start_node]
node.path = [node]
visited.add(start_node)
queue = [node]
minPath = None
while len(queue) > 0:
node = queue.pop()
for e in node.edges:
if not e.path:
visited.add(e.value)
e.path = node.path + [e]
queue.append(e)
elif len(e.path) < len(node.path) + 1:
e.path = node.path + [e]
if e.value == start_node and len(node.path) >= data_len - k + 1:
if not minPath or len(minPath) > len(node.path):
minPath = node.path
return minPath
if __name__ == '__main__':
s = []
while True:
l = input()
if not l: break
s.append(l)
un = set(s).union({reverseComplement(x) for x in s})
data_len = len(s[0])
k = data_len - 1
while k >= 1:
edges = build_deBruijin_graph(un, k)
g_visited_nodes = set()
start_node = first(edges.keys())
path = None
while True:
visited_nodes = set()
path = traverseGraph(start_node, edges, visited_nodes, k)
if path:
break
g_visited_nodes |= visited_nodes
start_node = first(edges.keys() - g_visited_nodes)
if not start_node:
k -= 1
break
if path:
string = None
for c in path:
if not string:
string = c.value
else:
string += c.value[-1]
i = 0
max = 0
while i < (len(string) / 2):
if string[:i] == string[-i:]:
max = i
i += 1
print(string[:-max])
break
|
import sys
import T21
import T50
import queue
import threading
class TIS_100:
def __init__(self):
# Nodes
self.nodes={}
# Clock tick
self.tick=threading.Condition()
def add_T21_node(self, name, code):
'''Add a computation node named name, with code.'''
new_node=T21.T21()
new_node.name=name
new_node.code=code
self.nodes[name]=new_node
def add_T50_node(self, name, fd):
'''Add a file node, reading from file descriptor fd.'''
new_node=T50.T50()
new_node.name=name
new_node.fd=fd
self.nodes[name]=new_node
def add_port(self, source_node, source_name, dest_node, dest_name=None):
'''Add a port between a node and another, or a node and a file.
If dest is a file, dest_name needs not be defined.
'''
try:
new_queue=queue.Queue(maxsize=1)
self.nodes[source_node].add_out_port(source_name, new_queue)
self.nodes[dest_node].add_in_port(dest_name, new_queue)
except:
# No such node or file
print("Trying to add invalid port.") # Lol descriptive error messages.
raise
def _exec_on_tick(self, func, _tick):
while True:
with _tick:
_tick.wait()
func()
def start(self):
for node in self.nodes:
t = threading.Thread(name=str(node), target=self._exec_on_tick, args=(self.nodes[node].exec_next,self.tick))
t.start()
def exec_one(self):
with self.tick:
self.tick.notify_all()
if __name__=="__main__":
import time
import sys
tis=TIS_100()
if False:
with open("../duplicator.100") as f:
dcode = f.read()
with open("../adder.100") as f:
acode = f.read()
tis.add_T21_node('duplicator',dcode)
tis.add_T21_node('adder',acode)
tis.add_T50_node('stdin_node', sys.stdin)
tis.add_T50_node('stdout_node', sys.stdout)
tis.add_port('stdin_node','foo','duplicator','IN')
tis.add_port('duplicator','DOWN','adder','UP')
tis.add_port('adder','OUT','stdout_node','bar')
if True:
with open("../test.100") as f:
tcode = f.read()
tis.add_T21_node('test_node',tcode)
tis.add_T50_node('stdin_node', sys.stdin)
tis.add_T50_node('stdout_node', sys.stdout)
tis.add_port('stdin_node','foo','test_node','IN')
tis.add_port('test_node','OUT','stdout_node','bar')
tis.start()
while True:
tis.exec_one()
print("tick")
time.sleep(0.2)
|
from wardmetrics.core_methods import eval_events, eval_segments
from wardmetrics.utils import *
from wardmetrics.visualisations import *
from metric.MyMetric import testMyMetric
ground_truth_test = [
(40, 60),
(70, 75),
(90, 100),
(125, 135),
(150, 157),
(187, 220),
]
detection_test = [
(10, 20),
(45, 52),
(65, 80),
(120, 180),
(195, 200),
(207, 213),
]
ground_truth_test = [
(40, 60),
(73, 75),
(90, 100),
(125, 135),
(150, 157),
(190, 215),
(220, 230),
(235, 250),
(275, 292),
(340, 368),
]
detection_test = [
(10, 20),
(45, 52),
(70, 80),
(120, 180),
(195, 200),
(207, 213),
(221, 237),
(239, 243),
(245, 250),
]
testMyMetric(ground_truth_test,detection_test)
eval_start = 2
eval_end = 241
# Calculate segment results:
twoset_results, segments_with_scores, segment_counts, normed_segment_counts = eval_segments(ground_truth_test, detection_test, eval_start, eval_end)
# Print results:
print_detailed_segment_results(segment_counts)
print_detailed_segment_results(normed_segment_counts)
print_twoset_segment_metrics(twoset_results)
# Access segment results in other formats:
print("\nAbsolute values:")
print("----------------")
print(detailed_segment_results_to_list(segment_counts)) # segment scores as basic python list
print(detailed_segment_results_to_string(segment_counts)) # segment scores as string line
print(detailed_segment_results_to_string(segment_counts, separator=";", prefix="(", suffix=")\n")) # segment scores as string line
print("Normed values:")
print("--------------")
print(detailed_segment_results_to_list(normed_segment_counts)) # segment scores as basic python list
print(detailed_segment_results_to_string(normed_segment_counts)) # segment scores as string line
print(detailed_segment_results_to_string(normed_segment_counts, separator=";", prefix="(", suffix=")\n")) # segment scores as string line
# Access segment metrics in other formats:
print("2SET metrics:")
print("-------------")
print(twoset_segment_metrics_to_list(twoset_results)) # twoset_results as basic python list
print(twoset_segment_metrics_to_string(twoset_results)) # twoset_results as string line
print(twoset_segment_metrics_to_string(twoset_results, separator=";", prefix="(", suffix=")\n")) # twoset_results as string line
# Visualisations:
plot_events_with_segment_scores(segments_with_scores, ground_truth_test, detection_test)
plot_segment_counts(segment_counts)
plot_twoset_metrics(twoset_results)
# Run event-based evaluation:
gt_event_scores, det_event_scores, detailed_scores, standard_scores = eval_events(ground_truth_test, detection_test)
# Print results:
print_standard_event_metrics(standard_scores)
print_detailed_event_metrics(detailed_scores)
# Access results in other formats:
print(standard_event_metrics_to_list(standard_scores)) # standard scores as basic python list, order: p, r, p_w, r_w
print(standard_event_metrics_to_string(standard_scores)) # standard scores as string line, order: p, r, p_w, r_w)
print(standard_event_metrics_to_string(standard_scores, separator=";", prefix="(", suffix=")\n")) # standard scores as string line, order: p, r, p_w, r_w
print(detailed_event_metrics_to_list(detailed_scores)) # detailed scores as basic python list
print(detailed_event_metrics_to_string(detailed_scores)) # detailed scores as string line
print(detailed_event_metrics_to_string(detailed_scores, separator=";", prefix="(", suffix=")\n")) # standard scores as string line
# Show results:
plot_events_with_event_scores(gt_event_scores, det_event_scores, ground_truth_test, detection_test, show=False)
plot_event_analysis_diagram(detailed_scores)
|
#!/usr/bin/python3
import configparser
import pprint
import sys
from pymongo import MongoClient
config = configparser.ConfigParser()
config.read('mongoConf.ini')
src = config['PRIME']
dst = config['SECONDARY']
def check_db(client, db):
if not db in client.list_database_names():
sys.exit("On host: '{}' the DB: '{}' does not exist!".format(client.address, db))
def client_no_uri(node):
return MongoClient(host=node.get('host'),
port=node.getint('port'),
username=node.get('username'),
password=node.get('password'),
authSource=node.get('authSource'),
authMechanism=node.get('authMechanism'))
def client_uri(node):
client = "mongodb://{}:{}@{}:{}/?authSource={}&authMechanism=SCRAM-SHA-1".format(node.get('username'),
node.get('password'),
node.get('host'),
node.get('port'),
node.get('authSource'))
return MongoClient(client)
src_client = client_no_uri(src)
check_db(src_client, src.get('src_db'))
src_db = src_client[src.get('src_db')]
collections = src_db.list_collection_names()
dst_client = client_uri(dst)
dst_db = dst_client[dst.get('dst_db')]
for collection in collections:
src_col = src_db[collection]
src_list = []
for documents in src_col.find({}, {'_id': False}):
src_list.append(documents)
pprint.pprint(src_list)
dst_col_ext = src_col.name + dst.get('collection_ext')
dst_col = dst_db[dst_col_ext]
dst_col.insert_many(src_list)
|
# Generated by Django 2.2.4 on 2019-10-22 06:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('androidApis', '0002_remove_transactions_car'),
]
operations = [
migrations.RenameField(
model_name='customer',
old_name='uid',
new_name='username',
),
migrations.AddField(
model_name='customer',
name='password',
field=models.CharField(default='userName', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='transactions',
name='premiumId',
field=models.IntegerField(default=50),
preserve_default=False,
),
]
|
# Este codigo é referente ao exercicio 5
# Criado em: 09/12/2019
# Criadora: Caroline Salgado Alves
# Versão: 1.0
medida = float(input("Digite uma medida em cm: "))
nova = medida/100
print("A nova medida em metros é: ", nova)
|
#!/usr/bin/python
'''
File: arguments.py
Author: Matt Welch
Description: implified functionality for getting arguments (Matlab style) and
selecting files
'''
import sys
import os # file select dialog
DEBUG_MODE = False
def getArgs():
# gets command line arguments & count Matlab style
nargin = len(sys.argv)
varargin = sys.argv
if DEBUG_MODE:
print('Number of arguments: {0}'.format(nargin))
print('Argument List: {0}'.format(varargin))
return varargin, nargin
def selectFile(extension='.dat'):
# selects a file matching the extension from the current directory
files = [f for f in os.listdir('.') if (os.path.isfile(f) and f.endswith(extension))]
files.sort(reverse=True) # sort files in descending order
count=1
print("Which '{0}' file from the current directory would you like to load?".format(extension))
for f in files:
print("[{0}] {1}".format(count, f))
count +=1
default_ans = '0'
user_answer = raw_input("Enter the number of the file to load (0 exits): (default=%s): " % default_ans) or default_ans
user_answer = int(user_answer)
if user_answer == 0:
print("User selected '0', Exiting...")
exit(1)
infile = files[user_answer-1]
print("User selected file number {0}: {1}".format(user_answer, infile))
return infile
|
import falcon
import scrypt
from base64 import b64decode
from os import urandom
from kartotek.config import config
from kartotek.db import Database
_db = Database()
def new_password(password):
salt = urandom(256)
full_salt = salt + config['password_secret'].encode('utf-8')
return (salt, scrypt.hash(password, full_salt))
def verify_password(password, salt, password_hash):
return password_hash == scrypt.hash(password, salt + config['password_secret'].encode('utf-8'))
def auth(fn):
setattr(fn, '__auth', True)
return fn
class AuthMiddleware:
def process_resource(self, req, resp, resource, params):
if hasattr(getattr(resource, "on_%s" % req.method.lower()), '__auth') == False:
return
if req.auth is None:
raise falcon.HTTPUnauthorized()
auth_type, user_password = req.auth.split(" ")
if auth_type.lower() != "basic":
raise falcon.HTTPBadRequest()
username, password = b64decode(user_password).decode('utf-8').split(":")
credentials = _db.get_credentials(username)
if credentials is None:
raise falcon.HTTPUnauthorized()
if not verify_password(password, credentials['password_salt'], credentials['password_hash']):
raise falcon.HTTPUnauthorized()
req.user_id = credentials['user_id']
|
import cdat_info
import cdms2
import ESMP
import numpy
import unittest
import sys
class TestGridTypes(unittest.TestCase):
"""
All test interpolate to the same grid or to curvilinear grid
"""
def setUp(self):
pass
def test_test1(self):
"""
2D gsRegrid
"""
u = cdms2.open(cdat_info.get_prefix() + '/sample_data/clt.nc')('u')[0, 0,...]
clt = cdms2.open(cdat_info.get_prefix() + '/sample_data/clt.nc')('clt')[0, ...]
ctlOnUGrid = clt.regrid( u.getGrid() )
#print 'ctlOnUGrid.getGrid() = ', type(ctlOnUGrid.getGrid())
self.assertRegexpMatches(str(type(ctlOnUGrid.getGrid())),
"cdms2.grid.TransientRectGrid")
def test_test2(self):
"""
2D ESMP
"""
u = cdms2.open(cdat_info.get_prefix() + '/sample_data/clt.nc')('u')[0, 0,...]
clt = cdms2.open(cdat_info.get_prefix() + '/sample_data/clt.nc')('clt')[0, ...]
ctlOnUGrid = clt.regrid( u.getGrid(), regridTool = "ESMP" )
#print 'ctlOnUGrid.getGrid() = ', type(ctlOnUGrid.getGrid())
self.assertRegexpMatches(str(type(ctlOnUGrid.getGrid())),
"cdms2.grid.TransientRectGrid")
if __name__ == '__main__':
print "" # Spacer
ESMP.ESMP_Initialize()
suite = unittest.TestLoader().loadTestsFromTestCase(TestGridTypes)
unittest.TextTestRunner(verbosity = 1).run(suite)
|
import matplotlib.pyplot as plt
import sys
def process(fileName):
# read in data from file
history = []
f = open(fileName)
i = 0
curGen = []
for line in f:
data = map(float, line.split())
curGen.append(data)
i += 1
if i == 100:
history.append(curGen)
curGen = []
i = 0
# find averages
averages = [[] for x in range(9)]
for generation in history:
local_sum = [0]*9
# find local sum
for model in generation:
for param in range(9):
local_sum[param] += model[param]
# divide by number of members in population to get average
for param in range(9):
averages[param].append(local_sum[param]/100.0)
# plot the average chi values over generations
plt.figure(1)
# chis
"""
plt.subplot(221)
plt.plot(averages[8])
plt.xlabel('Generation')
plt.ylabel('Average Chi')
plt.title('Chi-Squared (not reduced)')
plt.subplot(223)
plt.plot(averages[6])
plt.xlabel('Generation')
plt.ylabel('Average Chi')
plt.title('SED')
plt.subplot(224)
plt.plot(averages[7])
plt.xlabel('Generation')
plt.ylabel('Average Chi')
plt.title('Visibilities')
"""
# DOES NOT WORK FOR NON-REDUCED
#plt.ylim(-10,100)
labels = ['Inner Radius', 'Outer Radius', 'Grain Size', 'Disk Mass', 'Surface Density', 'Beta']
for i in range(6):
plt.subplot(321+i)
plt.plot(averages[i])
plt.xlabel('Generation')
plt.ylabel('Average ' + labels[i])
plt.title(labels[i])
"""
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
"""
# DOES THIS OVERWRITE? IF NO, HANDLE DIFFERENTLY
#plt.savefig('genetic/' + plotFile)
plt.tight_layout()
plt.show()
file_name = sys.argv[1]
process(file_name) |
#!/usr/bin/env python3
# Created by:Euel Yirga
# Created on:October 2019
# This makes sound on the pybadge
SCREEN_X = 160
SCREEN_Y = 120
SCREEN_GRID_Y = 16
SCREEN_GRID_X = 8
SPRITE_SIZE = 16
FPS = 60
SPRITE_MOVEMENT_SPEED = 1
# using for button state
button_state = {
"button_up": "up",
"button_just_pressed": "just pressed",
"button_still_pressed": "still pressed",
"button_released": "released"
} |
import json
import re
from datetime import datetime
from http import HTTPStatus
from urllib.request import urlopen
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from config.settings.utils import get_env_var
from grid.models import Grid
from menu.models import Menu
def get_dong(user_address):
dong_pattern = re.compile(r'^[가-힣]+[1-9]*동$')
addrs = user_address.split(' ')
for addr in addrs:
if re.fullmatch(dong_pattern, addr):
return addr
def get_x_y_grid(user_dong):
try:
x_y_grid = get_object_or_404(Grid, name=user_dong)
return x_y_grid
except:
return JsonResponse(
{
"message": "동에 대한 x, y 좌표 정보가 없습니다.",
},
status=HTTPStatus.BAD_REQUEST,
)
def get_base_time(hour):
hour = int(hour)
if hour < 3:
temp_hour = '20'
elif hour < 6:
temp_hour = '23'
elif hour < 9:
temp_hour = '02'
elif hour < 12:
temp_hour = '05'
elif hour < 15:
temp_hour = '08'
elif hour < 18:
temp_hour = '11'
elif hour < 21:
temp_hour = '14'
elif hour < 24:
temp_hour = '17'
return temp_hour + '00'
def get_sky_info(data):
try:
weather_info = data['response']['body']['items']['item']
if weather_info[3]['category'] == 'SKY':
return weather_info[3]['fcstValue']
elif weather_info[5]['category'] == 'SKY':
return weather_info[5]['fcstValue']
except KeyError:
return JsonResponse(
{
"message": "기상청 서버로부터 날씨 정보를 가져오는 중 문제가 발생하여 날씨 정보를 받아오지 못했습니다.",
},
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
def get_user_dong_weather(nx, ny):
service_key = get_env_var('WEATHER_API_SERVICE_KEY')
now = datetime.now()
now_date = now.strftime('%Y%m%d')
now_hour = int(now.strftime('%H'))
if now_hour < 6:
base_date = str(int(now_date) - 1)
else:
base_date = now_date
base_hour = get_base_time(now_hour)
num_of_rows = '6'
base_date = base_date
base_time = base_hour
nx = str(nx)
ny = str(ny)
_type = 'json'
api_url = 'http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastSpaceData?serviceKey={}' \
'&base_date={}&base_time={}&nx={}&ny={}&numOfRows={}&_type={}'.format(
service_key, base_date, base_time, nx, ny, num_of_rows, _type)
data = urlopen(api_url).read().decode('utf8')
json_data = json.loads(data)
sky = get_sky_info(json_data)
return sky
def get_restaurants(**kwargs):
restaurant_id = kwargs['restaurant_id']
category_id = kwargs['category_id']
menu = Menu.objects.filter(restaurant=restaurant_id).values(
'restaurant', 'pk', 'name', 'img', 'detail', 'price', 'type',
)
if not menu:
return JsonResponse(
{
"message": "메뉴가 존재하지 않습니다.",
},
status=HTTPStatus.NOT_FOUND,
)
menu = list(menu, )
data = {
'menu': menu,
'category_id': category_id,
}
return JsonResponse(data) |
from PIL import Image
import numpy as np
from scipy.ndimage import filters
from matplotlib.pylab import *
#Sobel滤波器
im=np.array(Image.open('C:\\Users\\T\\Downloads\\5.jpg').convert('L'))
#计算x方向的导数
imx=np.zeros(im.shape)
filters.sobel(im,1,imx)
#计算y方向的导数
imy=np.zeros(im.shape)
filters.sobel(im,0,imy)
#计算图像的梯度
magnitude=np.sqrt(imx**2+imy**2)
flt=figure()
gray()
flt.add_subplot(1,4,1)
imshow(im)
flt.add_subplot(1,4,2)
imshow(imx)
flt.add_subplot(1,4,3)
imshow(imy)
flt.add_subplot(1,4,4)
imshow(magnitude)
#高斯导数滤波器
im=np.array(Image.open('C:\\Users\\T\\Downloads\\5.jpg').convert('L'))
sigma=5
#计算x方向的导数
imx=np.zeros(im.shape)
filters.gaussian_filter(im,(sigma,sigma),(0,1),imx)
#计算y方向的导数
imy=np.zeros(im.shape)
filters.gaussian_filter(im,(sigma,sigma),(1,0),imy)
#计算图像的梯度
magnitude=np.sqrt(imx**2+imy**2)
flt=figure()
gray()
flt.add_subplot(1,4,1)
imshow(im)
flt.add_subplot(1,4,2)
imshow(imx)
flt.add_subplot(1,4,3)
imshow(imy)
flt.add_subplot(1,4,4)
imshow(magnitude) |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-05-23 08:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('application', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='databaserestore',
old_name='aws_filename',
new_name='aws_bucket',
),
]
|
import media
import fresh_tomatoes
# construct six movie objects
titanic = media.Movie(
"Titanic",
"The World's Most Beloved and Acclaimed Film",
"https://upload.wikimedia.org/wikipedia/en/2/22/Titanic_poster.jpg",
"https://www.youtube.com/watch?v=2e-eXJ6HgkQ")
harry_potter = media.Movie(
"Harry Potter",
"The Grown-up Wizard Meets His Godfather",
"http://static.tvtropes.org/pmwiki/pub/images/harry_potter_and_the_prisoner_of_azkaban_ver5_4865.jpg", # noqa
"https://www.youtube.com/watch?v=lAxgztbYDbs")
wind = media.Movie(
"Gone with the Wind",
"Recalling Civil War and Plantation Days of South",
"https://upload.wikimedia.org/wikipedia/en/b/b3/Gone_With_The_Wind_1967_re-release.jpg", # noqa
"https://www.youtube.com/watch?v=8mM8iNarcRc")
roman = media.Movie(
"Roman Holiday",
"The Love between a Reporter and a Royal Princess",
"https://upload.wikimedia.org/wikipedia/en/b/b7/Roman_holiday.jpg",
"https://www.youtube.com/watch?v=9GzCG6lpFUw")
inception = media.Movie(
"Inception",
"Dream on",
"https://upload.wikimedia.org/wikipedia/en/2/2e/Inception_%282010%29_theatrical_poster.jpg", # noqa
"https://www.youtube.com/watch?v=66TuSJo4dZM")
god_father = media.Movie(
"The Godfather", "The Story of A New York Crime Family",
"http://celebmix.com/wp-content/uploads/2017/03/celebrating-45-years-of-the-godfather-01.jpg", # noqa
"https://www.youtube.com/watch?v=sY1S34973zA")
# the method of fresh tomatoes takes in a list of movies as its input
movies = [titanic, harry_potter, wind, roman, inception, god_father]
fresh_tomatoes.open_movies_page(movies)
|
"""create stakeholders
Revision ID: 9ccd4437c1e5
Revises:
Create Date: 2020-04-29 18:23:46.443633
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "9ccd4437c1e5"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"firm",
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("UUID", sa.String(length=36), nullable=False),
sa.Column("HEBREW_NAME", sa.String(), nullable=True),
sa.Column("ENGLISH_NAME", sa.String(), nullable=True),
sa.Column("ID", sa.String(), nullable=False),
sa.Column("ID_TYPE", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("ID", "ID_TYPE"),
)
op.create_table(
"maya_stakeholder",
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("AccumulateHoldings", sa.String(), nullable=True),
sa.Column("AsmachtaDuachMeshubash", sa.String(), nullable=True),
sa.Column("IsRequiredToReportChange", sa.String(), nullable=True),
sa.Column("KodSugYeshut", sa.String(), nullable=True),
sa.Column("HolderOwner", sa.String(), nullable=True),
sa.Column("CapitalPct", sa.Float(), nullable=True),
sa.Column("CapitalPct_Dilul", sa.Float(), nullable=True),
sa.Column("ChangeSincePrevious", sa.Integer(), nullable=True),
sa.Column("CompanyName", sa.String(), nullable=True),
sa.Column("CompanyNameEn", sa.String(), nullable=True),
sa.Column("CompanyUrl", sa.String(), nullable=True),
sa.Column("CurrentAmount", sa.Integer(), nullable=True),
sa.Column("Date2", sa.Date(), nullable=True),
sa.Column("FullName", sa.String(), nullable=True),
sa.Column("FullNameEn", sa.String(), nullable=True),
sa.Column("HeaderMisparBaRasham", sa.String(), nullable=True),
sa.Column("HeaderSemelBursa", sa.String(), nullable=True),
sa.Column("IsFrontForOthers", sa.String(), nullable=True),
sa.Column("MaximumRetentionRate", sa.String(), nullable=True),
sa.Column("MezahehHotem", sa.Integer(), nullable=True),
sa.Column("MezahehTofes", sa.Integer(), nullable=True),
sa.Column("MezahehYeshut", sa.String(), nullable=True),
sa.Column("MinimumRetentionRate", sa.String(), nullable=True),
sa.Column("MisparNiarErech", sa.Integer(), nullable=True),
sa.Column("MisparZihui", sa.String(), nullable=False),
sa.Column("Nationality", sa.String(), nullable=True),
sa.Column("NeyarotErechReshumim", sa.String(), nullable=True),
sa.Column("Notes", sa.Text(), nullable=True),
sa.Column("Position", sa.String(), nullable=True),
sa.Column("PreviousAmount", sa.Integer(), nullable=True),
sa.Column("PreviousCompanyNames", sa.String(), nullable=True),
sa.Column("PumbiLoPumbi", sa.String(), nullable=True),
sa.Column("StockName", sa.String(), nullable=False),
sa.Column("SugMisparZihui", sa.String(), nullable=False),
sa.Column("TreasuryShares", sa.String(), nullable=True),
sa.Column("VotePower", sa.Float(), nullable=True),
sa.Column("VotePower_Dilul", sa.Float(), nullable=True),
sa.Column("company", sa.String(), nullable=True),
sa.Column("date", sa.DateTime(), nullable=True),
sa.Column("fix_for", sa.String(), nullable=True),
sa.Column("fixed_by", sa.String(), nullable=True),
sa.Column("id", sa.String(), nullable=True),
sa.Column("next_doc", sa.String(), nullable=True),
sa.Column("prev_doc", sa.String(), nullable=True),
sa.Column("stakeholder_type", sa.String(), nullable=True),
sa.Column("type", sa.String(), nullable=True),
sa.Column("url", sa.String(), nullable=True),
sa.PrimaryKeyConstraint(
"created_at", "MisparZihui", "StockName", "SugMisparZihui"
),
)
op.create_table(
"person",
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("UUID", sa.String(length=36), nullable=False),
sa.Column("HEBREW_NAME", sa.String(), nullable=True),
sa.Column("ENGLISH_NAME", sa.String(), nullable=True),
sa.Column("ID", sa.String(), nullable=False),
sa.Column("ID_TYPE", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("ID", "ID_TYPE"),
)
op.create_table(
"stakeholders",
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("HOLDER", sa.String(length=36), nullable=False),
sa.Column("FIRM", sa.String(length=36), nullable=False),
sa.Column("CAPITAL_PERCENT", sa.Float(), nullable=True),
sa.Column("NUM_STOCKS", sa.Integer(), nullable=True),
sa.Column("DATE", sa.Date(), nullable=True),
sa.Column("NOTES", sa.Text(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint("HOLDER", "FIRM", "created_at"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("stakeholders")
op.drop_table("person")
op.drop_table("maya_stakeholder")
op.drop_table("firm")
# ### end Alembic commands ###
|
#-*- coding: utf-8 -*-
import sqlite3
import sys
import re
import mechanize
import urllib2
import cookielib
from bs4 import BeautifulSoup
import telepot
# telepot 로 텔레그램 메시지 준비
YOUR_ACCESS_TOKEN = ""
bot = telepot.Bot(YOUR_ACCESS_TOKEN)
#print bot.getMe()["username"]
#print bot.getMe()["first_name"]
#bot.sendMessage( 24060268, u"이것보게")
# SQLITE 로 서점 책 데이터 읽기
reload(sys)
sys.setdefaultencoding('euc-kr')
AladinBook = []
AladinShop = []
con = sqlite3.connect("C:\Users\infomax\Documents\db\AladinChk.sqlite")
cursor = con.execute("select * from OffAladinBook where Value = 0")
for row in cursor:
print str(unicode(row[0])), str(row[1])
AladinBook.append( (str(unicode(row[0])), str(row[1])) )
cursor = con.execute("select * from OffAladinShop where Value = 0")
mydata = cursor.fetchall()
for row in mydata :
print str(unicode(row[0]))
AladinShop.append( ( str(unicode(row[0])), str(unicode(row[1])) ) )
# mechanize 로 브라우저에 접속
cj = cookielib.CookieJar()
br = mechanize.Browser()
br.set_cookiejar(cj)
br.set_handle_robots(False)
for i in range(len( AladinBook ) ):
# &KeyTag=A2&
# A2 종로 A8 대학로 D0 수유
br.open( "http://www.aladin.co.kr/search/wsearchresult.aspx?SearchTarget=UsedStore&SearchWord=" + AladinBook[i][1]+ "&x=0&y=0" )
#mobileurl = "http://www.aladin.co.kr/m/msearch.aspx?SearchTarget=UsedStore&SearchWord=" + AladinBook[i][0].replace(" ", "+")
soup = BeautifulSoup( br.response().read(), "html5lib" )
cols = soup.findAll( 'a', attrs={ 'class' : "usedshop_off_text3" } )
print AladinBook[i][0]
for col in cols:
print col.text
for shop in AladinShop :
if col.text.find(shop[0]) >= 0 :
mobileurl = "http://www.aladin.co.kr/m/msearch.aspx?SearchTarget=UsedStore&KeyTag=" + shop[1] + "&SearchWord=" + AladinBook[i][0].replace(" ", "+")
bot.sendMessage( 0, col.text + u"에 " + AladinBook[i][0] + u" 입고 되었습니다 \n" + mobileurl)
|
# -*- encoding = utf8 -*-
from pyglet.gl import *
class Viewer:
def __init__(self, width=400, height=400):
self.window = pyglet.window.Window(width=width, height=height)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def render(self, geometories, is_save=False, path=None):
glClearColor(1,1,1,1)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
for geometroy in geometories:
geometroy.render()
if is_save:
pyglet.image.get_buffer_manager().get_color_buffer().save(path)
else:
self.window.flip()
def draw_floor(self, x, y, height, width):
floor = Block([[x,y], [x+width, y], [x+width, y+height], [x, y+height]])
floor.set_fill_color(1.0, 1.0, 1.0) # fill white
floor.set_line_color(0.0, 0.0, 0.0) # line black
floor.render()
def _draw_wall(self):
glColor4f(0.0, 0.0, 0.0, 1.0)
glBegin(GL_POLYGON)
# down
glVertex2f(0, 0)
glVertex2f(self.window.width, 0)
glVertex2f(self.window.width, 10)
glVertex2f(0, 10)
glEnd()
# up
glBegin(GL_POLYGON)
glVertex2f(0, self.window.height-10)
glVertex2f(self.window.width, self.window.height-10)
glVertex2f(self.window.width, self.window.height)
glVertex2f(0, self.window.height)
glEnd()
class Geometory:
def __init__(self):
self._color = (0, 0, 0, 1.0)
def render(self):
raise NotImplemented
def set_color(self, r, g, b):
self._color = (r, g, b, 1)
class Block(Geometory):
def __init__(self, lefttop, height, width):
Geometory.__init__(self)
vertexs = [[lefttop[0], lefttop[1]], [lefttop[0]+height, lefttop[1]], \
[lefttop[0]+height, lefttop[1]+width], [lefttop[0], lefttop[1]+width]]
self.vertexs = vertexs
self._line_color = (0.0, 0.0, 0.0, 1.0)
self._fill_color = (1.0, 1.0, 1.0, 1.0)
def set_line_color(self, r, g, b):
if r <= 1 and g <= 1 and b <= 1:
self._line_color = (r, g, b, 1.0)
else:
self._line_color = (r/255.0, g/255.0, b/255.0, 1.0)
def set_fill_color(self, r, g, b):
if r <= 1 and g <= 1 and b <= 1:
self._fill_color = (r, g, b, 1.0)
else:
self._fill_color = (r/255.0, g/255.0, b/255.0, 1.0)
def render(self):
# fill
glColor4f(*self._fill_color)
glBegin(GL_POLYGON)
for vertex in self.vertexs:
glVertex2f(vertex[0], vertex[1])
glEnd()
# line
glColor4f(*self._line_color)
for i in range(len(self.vertexs)-1):
glBegin(GL_LINES)
glVertex2f(*self.vertexs[i])
glVertex2f(*self.vertexs[i+1])
glEnd()
glBegin(GL_LINES)
glVertex2f(*self.vertexs[i+1])
glVertex2f(*self.vertexs[0])
glEnd()
class Agent(Geometory):
def __init__(self, center, height, width):
Geometory.__init__(self)
vertexs = [[center[0]+width/3.0, center[1]+height/2.0], \
[center[0]-width/3.0, center[1]+height/2.0], \
[center[0]-2*width/3.0, center[1]], \
[center[0]-width/3.0, center[1]-height/2.0], \
[center[0]+width/3.0, center[1]-height/2.0], \
[center[0]+2*width/3.0, center[1]]]
self.vertexs = vertexs
self._fill_color = (1.0, 1.0, 1.0, 1.0)
def set_fill_color(self, r, g, b, alpha=1.0):
if r <= 1 and g <= 1 and b <= 1:
self._fill_color = (r, g, b, alpha)
else:
self._fill_color = (r/255.0, g/255.0, b/255.0, alpha)
def render(self):
# fill
glColor4f(*self._fill_color)
glBegin(GL_POLYGON)
for vertex in self.vertexs:
glVertex2f(vertex[0], vertex[1])
glEnd()
class Triangle(Geometory):
def __init__(self, center, height, width):
Geometory.__init__(self)
vertexs = [[center[0], center[1]], [center[0]-width/2, center[1]+height/2], \
[center[0]+width/2, center[1]+height/2]]
self.vertexs_up = vertexs
vertexs = [[center[0], center[1]], [center[0]-width/2, center[1]-height/2], \
[center[0]+width/2, center[1]-height/2]]
self.vertexs_down = vertexs
vertexs = [[center[0], center[1]], [center[0]-width/2, center[1]+height/2], \
[center[0]-width/2, center[1]-height/2]]
self.vertexs_left = vertexs
vertexs = [[center[0], center[1]], [center[0]+width/2, center[1]+height/2], \
[center[0]+width/2, center[1]-height/2]]
self.vertexs_right = vertexs
self._fill_color = (1.0, 1.0, 1.0, 1.0)
def set_fill_color_up(self, r, g, b, alpha=1.0):
if r <= 1 and g <= 1 and b <= 1:
self._fill_color_up = (r, g, b, alpha)
else:
self._fill_color_up = (r/255.0, g/255.0, b/255.0, alpha)
def set_fill_color_down(self, r, g, b, alpha=1.0):
if r <= 1 and g <= 1 and b <= 1:
self._fill_color_down = (r, g, b, alpha)
else:
self._fill_color_down = (r/255.0, g/255.0, b/255.0, alpha)
def set_fill_color_left(self, r, g, b, alpha=1.0):
if r <= 1 and g <= 1 and b <= 1:
self._fill_color_left = (r, g, b, alpha)
else:
self._fill_color_left = (r/255.0, g/255.0, b/255.0, alpha)
def set_fill_color_right(self, r, g, b, alpha=1.0):
if r <= 1 and g <= 1 and b <= 1:
self._fill_color_right = (r, g, b, alpha)
else:
self._fill_color_right = (r/255.0, g/255.0, b/255.0, alpha)
def render(self):
# fill
glColor4f(*self._fill_color_up)
glBegin(GL_POLYGON)
for vertex in self.vertexs_up:
glVertex2f(vertex[0], vertex[1])
glEnd()
glColor4f(*self._fill_color_down)
glBegin(GL_POLYGON)
for vertex in self.vertexs_down:
glVertex2f(vertex[0], vertex[1])
glEnd()
glColor4f(*self._fill_color_left)
glBegin(GL_POLYGON)
for vertex in self.vertexs_left:
glVertex2f(vertex[0], vertex[1])
glEnd()
glColor4f(*self._fill_color_right)
glBegin(GL_POLYGON)
for vertex in self.vertexs_right:
glVertex2f(vertex[0], vertex[1])
glEnd() |
# -*- coding: utf-8 -*-
import random
import re
import os
import requests
import sys
import lxml
import time
from logger import logger
from bs4 import BeautifulSoup
from generateExcel import generateExcel
from agentAndProxies import hds
from agentAndProxies import GetIpProxy
from model.elementConstant import elementConstant
from model.cityConstant import cityConstant
#print ("Start : %s" % time.ctime())
#print ("End : %s" % time.ctime())
class spiderWorker:
# 初始化构造函数
def __init__(self):
self.elementConstant = elementConstant()
self.cityConstant = cityConstant()
self.getIpProxy = GetIpProxy()
self.url = u''
self.infos = {}
self.proxyServer = ()
# 传参使用进行Excel生成
self.generateExcel = generateExcel()
self.elementConstant = elementConstant()
# 0)准备工作
# 获取参数:1:url 2:totalCount 3:pageCount 4:xlsPath 唯一标识 5:城市
def prepare(self, url, totalCount, pageCount, xlsPathIdentifier, city, taskIndex=1):
# 因为 {0} 作为参数无法传过来,所以把 pg 替换为 pg{0}
self.url = str(url).replace('pg', 'pg{0}')
self.totalCount = int(totalCount)
self.pageCount = int(pageCount)
self.xlsPathIdentifier = str(xlsPathIdentifier)
self.city = str(city)
self.dateFolder = time.strftime("%Y%m", time.localtime())
self.taskIndex = taskIndex
# logger 初始化
self.outputFolder = os.path.join(
os.getcwd(), u'output', self.city, self.dateFolder)
if not os.path.exists(self.outputFolder):
os.makedirs(self.outputFolder)
#self.logger = logger(os.path.join(self.outputFolder, 'worker_{0}.log'.format(self.xlsPathIdentifier)))
self.logger = open(os.path.join(self.outputFolder, 'worker_{0}.log'.format(
self.xlsPathIdentifier)), 'w', encoding='utf-8')
# 0)准备工作
# 新的便捷初始化函数,只要按约定 keys 传入 json 即可
def prepareWithJson(self, jsonData, taskIndex=1):
# e.g.
# {
# "city":"beijing",
# "logFile":"worker_begin375_end390.log",
# "pageCount":91,
# "totalCount":2718,
# "url":"https://bj.lianjia.com/ershoufang/pgbp375ep390/",
# "xlsxFile":"HouseData_begin375_end390.xlsx"
# }
keyCity = u'city'
keyLogFile = u'logFile'
keyPageCount = u'pageCount'
keyTotalCount = u'totalCount'
keyUrl = u'url'
keyXlsxFile = u'xlsxFile'
# 初始化
self.jsonData = jsonData
self.url = str(jsonData[keyUrl]).replace('pg', 'pg{0}')
self.totalCount = int(jsonData[keyTotalCount])
self.pageCount = int(jsonData[keyPageCount])
self.xlsxPath = jsonData[keyXlsxFile]
self.city = jsonData[keyCity]
self.dateFolder = time.strftime("%Y%m", time.localtime())
self.taskIndex = taskIndex
# logger 初始化
self.outputFolder = os.path.join(
os.getcwd(), u'output', self.city, self.dateFolder)
if not os.path.exists(self.outputFolder):
os.makedirs(self.outputFolder)
#self.logger = logger(os.path.join(self.outputFolder, 'worker_{0}.log'.format(self.xlsPathIdentifier)))
self.logger = open(os.path.join(self.outputFolder,
jsonData[keyLogFile]), 'w', encoding='utf-8')
# 1)开始
def start(self):
assert self.url != '', u'[ERROR] 应该先调用 prepare 函数进行初始化再使用'
self.generateExcel.addSheetExcel(u'在售列表')
for i in self.generate_allurl(self.pageCount):
self.get_allurl(i)
# self.logger.log.info(i)
self.logger.write(i + '\n')
path = os.path.join(
self.outputFolder, self.xlsxPath)
self.generateExcel.saveExcel(path)
self.logger.write('写入完毕')
self.logger.flush()
self.logger.close()
# 2)生成需要生成页数的链接
def generate_allurl(self, pageCount):
for url_next in range(1, pageCount + 1):
self.page = url_next
yield self.url.format(url_next)
# 3)从每个聚合列表页下,获取所有房产卡片数据
def get_allurl(self, generate_allurl):
geturl = self.requestUrlForRe(generate_allurl)
if geturl.status_code == 200:
# 提取 title 跳转地址 对应每套商品房 SKU
re_set = re.compile(
'<div class="item*?".*?<a.*?class="img.*?".*?href="(.*?)"')
re_get = re.findall(re_set, geturl.text)
for index in range(len(re_get)):
self.open_url(re_get[index], index)
# self.logger.log.info(re_get[index])
self.logger.write(re_get[index] + '\n')
# 增加休眠时间,防止服务器拒绝
time.sleep(3)
# 4)爬取每一套房屋 SKU 的详细数据
def open_url(self, re_get, index):
res = self.requestUrlForRe(re_get)
if res.status_code == 200 and len(res.text) > 0:
soup = BeautifulSoup(res.text, 'lxml')
self.infos[u'网址'] = re_get
self.infos[u'标题'] = soup.select('.main')[0].text
self.infos[u'总价'] = soup.select('.total')[0].text
self.infos[u'每平方售价'] = soup.select('.unitPriceValue')[0].text
self.infos[u'户型'] = soup.select('.mainInfo')[0].text
self.infos[u'朝向'] = soup.select('.mainInfo')[1].text
self.infos[u'大小'] = soup.select('.mainInfo')[2].text
self.infos[u'楼层'] = soup.select('.subInfo')[0].text
self.infos[u'装修'] = soup.select('.subInfo')[1].text
self.infos[u'房子类型'] = soup.select('.subInfo')[2].text
if self.infos[u'房子类型'] == u'未知':
# 导出数据时,为了防止改成 number_format 错误,把字符串换成数值
self.infos[u'房子类型'] = u'-1'
self.infos[u'小区名称'] = soup.select('.info')[0].text
self.infos[u'区域'] = soup.select('.info > a')[0].text
# infos[u'地区'] = soup.select('.info > a')[1].text
self.infos[u'详细区域'] = soup.select('.info')[1].text
self.infos[u'链家编号'] = soup.select('.info')[3].text
self.infos[u'关注房源'] = soup.select('#favCount')[0].text + u"人关注"
self.infos[u'看过房源'] = soup.select('#cartCount')[0].text + u"人看过"
partent = re.compile(
'<li><span.*?class="label">(.*?)</span>(.*?)</li>')
result = re.findall(partent, res.text)
for item in result:
if item[0] != u"抵押信息" and item[0] != u"房本备件":
self.infos[item[0]] = item[1]
if item[0] == u'产权年限':
self.infos[u'产权年限'] = item[1]
# 挂牌时间等信息,格式有变化,重新添加正则表达式
partent = re.compile(
'<li>\s*<span.*?class=.*?>(.*?)</span>\s+<span>(.*?)</span>\s+</li>')
result = re.findall(partent, res.text)
for item in result:
# print unicode(item[0]),unicode(item[1])
if item[0] == u'挂牌时间':
self.infos[u'挂牌时间'] = item[1]
if item[0] == u'交易权属':
self.infos[u'交易权属'] = item[1]
if item[0] == u'上次交易':
self.infos[u'上次交易'] = item[1]
if item[0] == u'房屋用途':
self.infos[u'房屋用途'] = item[1]
if item[0] == u'房屋年限':
self.infos[u'房屋年限'] = item[1]
if item[0] == u'产权所属':
self.infos[u'产权所属'] = item[1]
if item[0] == u'房本备件':
self.infos[u'房本备件'] = item[1]
row = index + (self.page - 1) * 30
self.infos[u'序号'] = str(row + 1)
self.infos[u'状态'] = u'在售'
self.infos[u'城市'] = self.cityConstant.cityToChinese[self.city]
# self.logger.log.info('taskId: ' + str(self.taskIndex) + ' row: ' + str(row) + ' / {0} {1}%'.format(self.totalCount,round(float(row)/self.totalCount * 100, 2)))
print('pid: [' + str(os.getppid()) + '] taskId: ' + str(self.taskIndex) + ' row: ' + str(
row) + ' / {0} {1}%\n'.format(self.totalCount, round(float(row)/self.totalCount * 100, 2)))
self.logger.write('taskId: ' + str(self.taskIndex) + ' row: ' + str(row) +
' / {0} {1}%\n'.format(self.totalCount, round(float(row)/self.totalCount * 100, 2)))
self.logger.flush()
if row == 0:
for index_item in self.elementConstant.data_constant.keys():
self.generateExcel.writeExcelPositon(0, self.elementConstant.data_constant.get(index_item),
index_item)
self.wirte_source_data(1)
else:
row = row + 1
self.wirte_source_data(row)
return self.infos
# 封装统一 request 请求,采取动态代理和动态修改 User-Agent 方式进行访问设置,减少服务端手动暂停的问题
def requestUrlForRe(self, url):
try:
if len(self.proxyServer) == 0:
tempProxyServer = self.getIpProxy.get_random_ip()
else:
tempProxyServer = self.proxyServer
proxy_dict = {
tempProxyServer[0]: tempProxyServer[1]
}
tempUrl = requests.get(
url, headers=hds[random.randint(0, len(hds) - 1)], proxies=proxy_dict)
code = tempUrl.status_code
if code >= 200 or code < 300:
self.proxyServer = tempProxyServer
return tempUrl
else:
self.proxyServer = self.getIpProxy.get_random_ip()
return self.requestUrlForRe(url)
except Exception as e:
self.proxyServer = self.getIpProxy.get_random_ip()
s = requests.session()
s.keep_alive = False
return self.requestUrlForRe(url)
# 源数据生成,写入Excel中,从infos字典中读取数据,放置到list列表中进行写入操作,其中可修改规定写入格式
def wirte_source_data(self, row):
for itemKey in self.infos.keys():
item_valus = self.infos.get(itemKey)
if itemKey == u'详细区域':
temps_item_valus = item_valus.replace(
u'\xa0', u' ').split(u' ')
self.generateExcel.writeExcelPositon(
row, self.elementConstant.data_constant.get(u'所属下辖区'), temps_item_valus[0])
self.generateExcel.writeExcelPositon(
row, self.elementConstant.data_constant.get(u'所属商圈'), temps_item_valus[1])
self.generateExcel.writeExcelPositon(
row, self.elementConstant.data_constant.get(u'所属环线'), temps_item_valus[2])
else:
tempItemKey = self.elementConstant.unit_check_name(itemKey)
count = self.elementConstant.data_constant.get(tempItemKey)
# print itemKey, unicode(tempItemKey), self.elementConstant.data_constant.get(tempItemKey), item_valus
if tempItemKey != None and count != None:
# todo 检查使用标准,修改使用逻辑
if tempItemKey == u'链家编号':
item_valus = item_valus[0:len(item_valus) - 2]
elif tempItemKey == u'单价':
item_valus = item_valus[0:len(item_valus) - 4]
elif tempItemKey == u'建筑面积':
item_valus = item_valus[0:len(item_valus) - 1]
elif tempItemKey == u'建成时间':
if u'年' in item_valus:
item_valus = item_valus[0:item_valus.index(u'年')]
else:
# 有些城市没有建成时间,因为 beautiful soup 的关系,会解析为建筑类型。此时不要这个字段就好了
item_valus = str(-1)
elif tempItemKey == u'关注人数' or tempItemKey == u'看过房源':
item_valus = item_valus[0:len(item_valus) - 3]
elif tempItemKey == u'挂牌时间':
item_valus = item_valus.replace('-', '/')
elif tempItemKey == u'上次交易':
item_valus = item_valus.replace('-', '/')
self.generateExcel.writeExcelPositon(row,
self.elementConstant.data_constant.get(
tempItemKey),
item_valus)
if __name__ == "__main__":
if len(sys.argv) >= 5:
print(u'[Params] {0} {1} {2} {3} {4}'.format(
sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]))
worker = spiderWorker()
worker.prepare(sys.argv[1], sys.argv[2],
sys.argv[3], sys.argv[4], sys.argv[5])
worker.start()
else:
print(u'[ERROR] 参数不足')
|
from datetime import datetime, timedelta
from earnmi.data.MarketImpl import Market2Impl
from earnmi.uitl.utils import utils
from vnpy.trader.object import BarData
def is_same_day(d1: datetime, d2: datetime) -> bool:
return d1.day == d2.day and d1.month == d2.month and d1.year == d2.year
def isPostMinitueBar(b1:BarData,b2:BarData)-> bool:
return b1.low_price >= b2.low_price and b1.high_price <= b2.high_price and b1.volume <=b2.volume
def basicTest():
market2 = Market2Impl()
code = "300004"
assert market2.isNotice(code) == False
market2.addNotice(code)
assert market2.isNotice(code) == True
key1 = market2.getNoticeData(code,"key1")
assert key1 is None
market2.putNoticeData(code,"key1","hello")
key1 = market2.getNoticeData(code,"key1")
assert key1 == "hello"
market2.removeNotice(code)
assert market2.isNotice(code) == False
try:
key1 = market2.getNoticeData(code,"key1")
assert False
except RuntimeError:
assert True
def realTimeTest():
market2 = Market2Impl()
code = "300004"
market2.addNotice(code)
has_data_day1 = datetime(year=2020, month=5, day=8,hour=1)
market2.setToday(has_data_day1)
bar = market2.getRealTime().getKBar(code)
assert bar is None
market2.setToday(datetime(year=2020, month=5, day=8,hour=9,minute=31,second=30))
bar = market2.getRealTime().getKBar(code)
assert not bar is None
bar = market2.getRealTime().getKBar(code,hour=9,minute=31,second=31)
assert bar is None
begin = datetime(year=2020, month=4, day=9,hour=1)
for i in range(50):
day = begin + timedelta(days=i)
market2.setToday(datetime(year=day.year, month=day.month, day=day.day, hour=9, minute=50, second=30))
bar = market2.getRealTime().getKBar(code)
todayIsTrade = not bar is None
if todayIsTrade:
print(f"realTimeTest:test in trad day : {day}")
"""
今天是交易日
"""
day1 = datetime(year=day.year, month=day.month, day=day.day, hour=9, minute=31, second=30)
day2 = datetime(year=day.year, month=day.month, day=day.day, hour=10, minute=31, second=30)
day3 = datetime(year=day.year, month=day.month, day=day.day, hour=13, minute=50, second=30)
day4 = datetime(year=day.year, month=day.month, day=day.day, hour=15, minute=0, second=30)
market2.setToday(day1)
bar1 = market2.getRealTime().getKBar(code)
market2.setToday(day2)
bar2 = market2.getRealTime().getKBar(code)
market2.setToday(day3)
bar3 = market2.getRealTime().getKBar(code)
market2.setToday(day4)
bar4 = market2.getRealTime().getKBar(code)
assert is_same_day(bar1.datetime,bar2.datetime) and is_same_day(bar3.datetime,bar4.datetime) and is_same_day(bar2.datetime,bar3.datetime)
assert bar1.datetime < bar2.datetime and bar2.datetime < bar3.datetime and bar3.datetime < bar4.datetime
assert isPostMinitueBar(bar1,bar2) and isPostMinitueBar(bar2,bar3) and isPostMinitueBar(bar3,bar4) and isPostMinitueBar(bar2,bar4)
# no_data_day = datetime(year=2020, month=5, day=9)
#
# has_data_day2 = datetime(year=2020, month=5, day=11)
def historyTest():
market2 = Market2Impl()
market2.setToday(datetime.now())
#获取沪市数据
code = "600000"
market2.addNotice(code)
bars = market2.getHistory().getKbars(code,20)
assert len(bars) == 20
##获取指数数据
codes = ['000300']
for code in codes:
market2.addNotice(code)
assert len(market2.getHistory().getKbars(code,20)) == 20
code = "300004"
market2.addNotice(code)
todayListBar = market2.getHistory().getKbars(code, 50);
pre_bares = None
for todayBar in todayListBar:
today = datetime(year=todayBar.datetime.year,month=todayBar.datetime.month,day=todayBar.datetime.day,minute=1)
market2.setToday(today)
bars1 = market2.getHistory().getKbars(code, 100);
##最后一个bar不应该包含今天:
assert not utils.is_same_day( bars1[-1].datetime,today)
assert len(bars1) == 100
pre_bar = None
for bar in bars1:
assert bar.datetime < today
if pre_bar:
assert pre_bar.datetime < bar.datetime
pre_bar = bar
bars2 = market2.getHistory().getKbarFrom(code,bars1[0].datetime)
for index in range(len(bars1)):
assert bars1[index].datetime == bars2[index].datetime
#昨天的bar数据
if pre_bares:
for index in range(len(pre_bares) - 1):
assert pre_bares[index + 1].datetime == bars1[index].datetime
pre_bares = bars1
pass
basicTest()
historyTest()
realTimeTest()
|
import json
import os
import pathlib
import urllib.request
import pytest
from sunpy.tests import hash
hashfile = 'https://raw.githubusercontent.com/sunpy/sunpy/master/sunpy/tests/figure_hashes_py36.json'
hashfile = urllib.request.urlopen(hashfile)
hashes = json.load(hashfile)
figpath = pathlib.Path(os.path.abspath(__file__)) / '..' / '..' / 'figures'
figpath = figpath.resolve()
figure_paths = [x for x in figpath.iterdir() if x.suffix == '.png']
ids = [figure_path.name for figure_path in figure_paths]
@pytest.mark.parametrize('fig_path', figure_paths, ids=ids)
def test_hash(fig_path):
with open(fig_path, 'rb') as f:
fhash = hash._hash_file(f)
assert hashes[fig_path.stem] == fhash
def test_missing_figures():
stems = [p.stem for p in figure_paths]
missing = []
for key in hashes:
if key not in stems:
missing.append(key)
if len(missing):
missing = '\n'.join(missing)
raise RuntimeError(f'The following figure tests are missing an image:\n{missing}')
|
import collections
import string
class Solution(object):
def ladderLength(self, start, end, arr):
arr = set(arr) # avoid TLE
q = collections.deque([(start, 1)])
visted = set()
alpha = string.ascii_lowercase # 'abcd...z'
while q:
word, length = q.popleft()
if word == end:
return length
for i in range(len(word)):
for ch in alpha:
new_word = word[:i] + ch + word[i + 1:]
if new_word in arr and new_word not in visted:
q.append((new_word, length + 1))
visted.add(new_word)
return 0
"""
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
def isoneletter(word1,word2):
count=0
n=len(word1)
for i in range(n):
if word1[i]!=word2[i]:
if count==0:
count=1
else:
return False
return True
if len(wordList)==0:
return
if endWord not in wordList:
return 0
wordList.append(beginWord)
numtoletter={}
lettertonum={}
count=0
for item in wordList:
numtoletter[count]=item
lettertonum[item]=count
count+=1
dp=[[count+1 for i in range(count)]for j in range(count)]
for i in range(count):
for j in range(count):
if i==j:
dp[i][j] = dp[j][i] = 0
else:
word1=numtoletter[i]
word2=numtoletter[j]
index=isoneletter(word1,word2)
if index==True:
dp[i][j]=dp[j][i]=1
for i in range(count):
for j in range(count):
for k in range(count):
if dp[i][j]>dp[i][k]+dp[k][j]:
dp[i][j]=dp[j][i]=dp[i][k]+dp[k][j]
index1=lettertonum[beginWord]
index2=lettertonum[endWord]
if dp[index1][index2]!=count+1:
return dp[index1][index2]+1
else:
return 0
"""
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log","cog"]
s=Solution()
print(s.ladderLength(beginWord,endWord,wordList)) |
# Homework 5
# For this homework, we will be doing some bug fixing.
# In this program, there will be many bugs and I will be providing some hints
# for you.
# IMPORTANT: the bugs will be labeled on the line above with a FIXME comment
# Do your best!
########### Section 1: printing some stuff
# this print line is fine!
print("hello world")
# this print line is also fine...
print("My favorite number is: " + str(1))
# FIXME: print can only take in one type (integer, string, real)!
# Should print "Hello, ma name is Bob"
print(Hello, my name is Bob)
# FIXME: non-strings cannot be added with a string
# Should print "hello! I am 13 years old"
print("hello! I am " + 13 + " years old")
# FIXME: you can have arithmetics in your print, but should be string
# Should print "1 + 1 = 2"
print("1 + 1 = " + (1 + 1))
########### Section 2: asking user for input.
# this user input is fine:
name = input("What is your name: ")
print("Oh.. hi "+ name)
# this user input is also fine
age = input("What is your age?")
print("wow! you are so old: " + int(age))
# FIXME: if you want to use the user's input, you need to save it
# Should print: "wow! I didn't know this ____ was your favorite color!"
input("What is your favorite color?")
print("wow! I didn't know this " + color + " was your favorite color!")
# FIXME: Whatever input the user type...is a string... even though he typed a number
x = input("give me a number!: ")
print("your number is: " + x)
########### Section 3: If True or False.
# this works!
if True:
print("I should be printed!")
# FIXME: make the below if/elif/else statement print out "HELLO!"
if False:
print("hi")
elif True:
print("hey")
elif True:
print("HELLO!")
else:
print("bye")
# FIXME: make the below if/elif/else statement print "I am great at coding"
x = 1
if x == 3:
print("I am great at cooking")
elif x > 1:
print("I am great at running")
elif not x == 1:
print("I am great at coding!")
########### Section 4: looping and looping and more looping
# this works! Should print: 0, 1, 2 each on its own line
for i in range(3):
print(i)
# this works! Should print a, b, c
for character in "abc":
print(character)
# FIXME: make this print 0, 1, 2, 3, 4
for i in range(3):
print(i)
# FIXME: make this print out everything in the list:
x = [1, 'a', 2, 3, 'b'] # this is a list
for i in fixme:
print(i)
|
#!/usr/bin/python3
import pynn.array as array
from pynn.element import Element
class Mixer(Element):
class _Context(Element._Context):
def __init__(self, size, accum, node):
Element._Context.__init__(self, node)
self.accum = accum
def newContext(self, factory):
return self._Context(self.size, factory.empty(self.size), self)
def __init__(self, size, inum, onum, **kwargs):
Element.__init__(self, [size]*inum, [size]*onum, **kwargs)
self.size = size
def _transmit(self, ctx):
array.copy(ctx.accum, ctx.src[0])
for i in range(1, self.inum):
array.radd(ctx.accum, ctx.srcs[i])
for i in range(self.onum):
array.copy(ctx.dsts[i], ctx.accum)
def _backprop(self, ctx):
array.copy(ctx.accum, ctx.dst[0])
for i in range(1, self.onum):
array.radd(ctx.accum, ctx.dsts[i])
for i in range(self.inum):
array.copy(ctx.srcs[i], ctx.accum)
class Fork(Mixer):
def newContext(self, factory):
return Element._Context(self)
def __init__(self, size, **kwargs):
Mixer.__init__(self, size, 1, 2, **kwargs)
def _transmit(self, ctx):
array.copy(ctx.dsts[0], ctx.src)
array.copy(ctx.dsts[1], ctx.src)
def _backprop(self, ctx):
array.add(ctx.src, ctx.dsts[0], ctx.dsts[1])
class Join(Mixer):
def newContext(self, factory):
return Element._Context(self)
def __init__(self, size, **kwargs):
Mixer.__init__(self, size, 2, 1, **kwargs)
def _transmit(self, ctx):
array.add(ctx.dst, ctx.srcs[0], ctx.srcs[1])
def _backprop(self, ctx):
array.copy(ctx.srcs[0], ctx.dst)
array.copy(ctx.srcs[1], ctx.dst)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement # This isn't required in Python 2.6
import os
import subprocess
import sys
import hashlib
import re
from copy import deepcopy
import xml.etree.cElementTree as xml
def getText(nodelist):
"""Get text from an XML node"""
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
class Project:
""" Class project :
The class project must contains
_liste_default_yes : compilation flag which are defined by default.
_liste_default_not : compilation flag which are not defined by default.
_optionsComp : available compilation option
_binaries : available binaries
_config : configuration file template
"""
_name = 'Default'
_liste_default_yes = []
_liste_default_not = []
_optionsComp = {}
_binaries = {}
_config = u""
_config_name = u"config.in"
_env = os.environ
_env["LC_CTYPE"] = "en_US.UTF-8"
_env["LANG"] = "en_US.UTF-8"
_configFlags = {}
_parse_util = {}
_parse_count = {}
def __init__(self):
""" Initialisation of the class
Add entries to _optionsComp using _liste_default_yes and
_liste_default_not
Replace __OPT__ in config file by a list of __keywords__ using
_liste_default_yes and _liste_default_not keys as keywords.
"""
for key in self._liste_default_yes:
self._optionsComp[key] = {
u'database_id' : key,
u'default' : {
u'replace' : {u'__%s__' % (key) : u'-D%s' % (key)},
u'database_val' : u'DEFINED'},
False : {
u'replace' : {u'__%s__' % (key) : u''},
u'database_val' : u'UNDEFINED',
u'searchInName' : re.compile(u'_NO_%s' % key)}
}
for key in self._liste_default_not:
self._optionsComp[key] = {
u'database_id' : key,
u'default' : {
u'replace' : {u'__%s__' % (key) : u''},
u'database_val' : u'UNDEFINED'},
True : {
u'replace' : {u'__%s__' % (key) : u'-D%s' % (key)},
u'database_val' : u'DEFINED',
u'searchInName' : re.compile(u'_%s' % key)}
}
for key in self._liste_default_yes:
self._config = self._config.replace(u'__OPT__',
u'__%s__ __OPT__' % key)
for key in self._liste_default_not:
self._config = self._config.replace(u'__OPT__',
u'__%s__ __OPT__' % key)
self._config = self._config.replace(u'__OPT__', u'')
return
def _addTextChild(self, node, key, value, attributes={}):
""" Add a text child to an XML node.
The resulting XML will be :
<node_key>
....
<key attributes_key=attributes_val ...> value </key>
</node_key>
"""
child = xml.Element(key)
for key2 in attributes.keys():
child.attrib[key2] = attributes[key2]
child.text = value
node.append(child)
def getRevision(self):
""" Get revision from build directory
Check if __REVISION__ file exists.
If it exists the contained integer will be used as revision.
Else, will get SVN revision consulting the repository.
If repository contains .svn, we use svn log -l 1 and
search for the revision number in it.
Else, we use git svn log -n 1. Thus, **it only works with SVN
central repository...**
"""
rev_search = re.compile('r([0-9]+)')
revision = -1
if (os.path.isfile(os.path.join(self.branchdir, u'__REVISION__'))):
return int(open(os.path.join(self.branchdir,
u'__REVISION__'), 'r').read())
if (os.path.isdir(os.path.join(self.branchdir, u'.svn'))):
try:
process = subprocess.Popen([u'svn', u'log', u'-l', u'1'],
env=self._env,
cwd=self.branchdir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
svn_log = process.communicate()[0]
svn_log = svn_log.decode(u'utf8',
u'replace')
res = rev_search.search(svn_log)
if (res):
revision = int(res.group(1))
except:
revision = -1
return revision
else:
try:
process = subprocess.Popen([u'git', u'svn',
u'log', u'-n', u'1'],
env=self._env,
cwd=self.branchdir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
svn_log = process.communicate()[0]
svn_log = svn_log.decode(u'utf8', u'replace')
res = rev_search.search(svn_log)
if (res):
revision = int(res.group(1))
except:
revision = -1
return revision
return revision
def getDiff(self):
""" Get differences from build directory
Check for a __DIFF__ file in the directory and use it as diff if
it exists.
Else, if .svn directory exists we use svn diff and if not we use
git diff remotes/git-svn.
If an error occurs with SVN we return a u"Error on getting
diff", and if git doesn't works u"Could not get diff" is returned.
"""
svn_diff = u""
if (os.path.isfile(os.path.join(self.branchdir, u'__DIFF__'))):
return open(os.path.join(self.branchdir, u'__DIFF__'), 'r').read()
if (os.path.isdir(os.path.join(self.branchdir, u'.svn'))):
try:
process = subprocess.Popen([u'svn', u'diff'],
env=self._env,
cwd=self.branchdir,
stdout=subprocess.PIPE)
svn_diff = process.communicate()[0]
svn_diff = svn_diff.decode(u'utf8', u'replace')
except:
svn_diff = u"Error on getting diff"
else:
try:
process = subprocess.Popen([u'git',u'diff',
u'remotes/git-svn', u'.'],
env=self._env,
cwd=self.branchdir,
stdout=subprocess.PIPE)
svn_diff = process.communicate()[0]
print u"yes"
svn_diff = svn_diff.decode(u'utf8', u'replace')
except:
print u"non"
svn_diff = u"Could not get diff"
return svn_diff
def Configure(self, machine, options, compilations, ident):
"""Configure the project on the given machine
Replaces __keywords__ in config and build an XML file
representing the compilation."""
self.branchdir = machine[u'ROOT']+options[u"BRANCH"]
self.compilations = compilations
self.options = options
self.user = machine[u"username"]
self.resultdir = os.path.join(machine[u"result_dir"], ident)
# Create the xml document
self.doc_xml = xml.Element(u'TestCase')
config_xml = xml.Element(u"config")
self.doc_xml.append(config_xml)
self._addTextChild(config_xml, u"PROJECT", self._name)
self._addTextChild(config_xml, u"NAME", options[u"NAME"])
self._addTextChild(config_xml, u"MACHINE", machine[u"NAME"])
date = self._idToDate(ident)
self._addTextChild(config_xml, u"DATE", date)
self._addTextChild(config_xml, u"USER", machine[u"username"])
self.diff = self.getDiff()
self._addTextChild(config_xml, "DIFF", self.diff)
try:
self.diff = self.diff.decode(u'utf8', u'replace')
except:
self.diff = u"error in diff"
self.md5diff = hashlib.md5(self.diff).hexdigest()
self._addTextChild(config_xml, "MD5DIFF", self.md5diff)
self.install = os.path.join(self.resultdir,
self.user,
machine["NAME"],
self._name,
options["NAME"])
revision = str(self.getRevision()).decode(u'utf8', u'replace')
self._addTextChild(config_xml, u"REVISION", revision)
for key in self._optionsComp.keys():
if (options.has_key(key) and
self._optionsComp[key].has_key(options[key])):
opt_set = self._optionsComp[key][options[key]]
else:
opt_set = self._optionsComp[key][u'default']
replace_dict = opt_set[u'replace']
database_val = opt_set[u'database_val']
database_id = self._optionsComp[key][u'database_id']
for key2 in replace_dict.keys():
self._config = self._config.replace(key2,
replace_dict[key2])
self._addTextChild(config_xml, u"option",
database_val,
{u'key' : database_id})
if (options.has_key("INTEGER")):
if (options['INTEGER'] == "int32"):
if (machine.has_key(u'SCOTCH_INT32')):
self._config = self._config.replace(u'__SCOTCH_DIR__',
machine[u'SCOTCH_INT32'])
if (machine.has_key(u'METIS_INT32')):
self._config = self._config.replace(u'__METIS_DIR__',
machine[u'METIS_INT32'])
elif (options['INTEGER'] == "int64"):
if (machine.has_key(u'SCOTCH_INT64')):
self._config = self._config.replace(u'__SCOTCH_DIR__',
machine[u'SCOTCH_INT64'])
if (machine.has_key(u'METIS_INT64')):
self._config = self._config.replace(u'__METIS_DIR__',
machine[u'METIS_INT64'])
elif (options['INTEGER'] == "long"):
if (machine.has_key(u'SCOTCH_LONG')):
self._config = self._config.replace(u'__SCOTCH_DIR__',
machine[u'SCOTCH_LONG'])
if (machine.has_key(u'METIS_LONG')):
self._config = self._config.replace(u'__METIS_DIR__',
machine[u'METIS_LONG'])
else:
if (machine.has_key(u'SCOTCH_INT')):
self._config = self._config.replace(u'__SCOTCH_DIR__',
machine[u'SCOTCH_INT'])
if (machine.has_key(u'METIS_INT')):
self._config = self._config.replace(u'__METIS_DIR__',
machine[u'METIS_INT'])
else:
if (machine.has_key(u'SCOTCH_INT')):
self._config = self._config.replace(u'__SCOTCH_DIR__',
machine[u'SCOTCH_INT'])
if (machine.has_key(u'METIS_INT')):
self._config = self._config.replace(u'__METIS_DIR__',
machine[u'METIS_INT'])
for key in self._configFlags.keys():
machine_key = self._configFlags[key][u'machine_key']
if machine.has_key(machine_key) : tmp = machine[machine_key]
else : tmp = self._configFlags[key][u'default']
self._config = self._config.replace(key, tmp)
self.md5conf = hashlib.md5(self._config).hexdigest()
self._addTextChild(config_xml, u"MD5CONF", self.md5conf)
self._addTextChild(config_xml, u"CONFIG_FILE", self._config)
if (machine.has_key(u"HWLOC_HOME")):
defined = u"DEFINED"
self._config = self._config.replace(u"__HWLOC_HOME__",
machine[u'HWLOC_HOME'])
else:
defined = u"UNDEFINED"
self._addTextChild(config_xml, u"option", defined, {u'key' : u"HWLOC"})
return config_xml
def Build(self, machine):
"""Build the project on the given machine"""
# Copy config.in
f=open(os.path.join(self.branchdir, self._config_name), 'w')
f.write(self._config)
f.close()
search_warning = re.compile(u"warning\s*:",
flags=re.IGNORECASE|re.MULTILINE)
search_remark = re.compile(u"remark\s*:",
flags=re.IGNORECASE|re.MULTILINE)
search_error = re.compile(u"error\s*:",
flags=re.IGNORECASE|re.MULTILINE)
subprocess.Popen([u"mkdir", u"-p", self.install], env=self._env).wait()
subprocess.Popen([u"cp",
os.path.join(self.branchdir, self._config_name),
os.path.join(self.install, self._config_name)],
env=self._env).wait()
retcode = 0
for libname in self._libraries.keys():
make_xml = xml.Element(u"Build")
self.doc_xml.append(make_xml)
print u"building %s on %s with %s [%s]" %(libname, machine[u'NAME'],
self.options[u'NAME'],
machine[u'ROOT'])
self._addTextChild(make_xml, u"OBJECT_TYPE", u"library")
self._addTextChild(make_xml, u"OBJECT_NAME", libname)
commandes = self._libraries[libname]
log = ""
for commande in commandes:
commande = commande.replace(u'__MAKE__',
machine['MAKE'])
commande = commande.replace(u'__MAKE_J__',
machine['MAKE_J'])
if (retcode == 0):
log += u'## Running '+commande+u'\n'
make = subprocess.Popen(commande,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.branchdir,
shell=True, env=self._env)
log += make.communicate()[0].decode(u'utf8', u'replace')
retcode = make.returncode
if retcode < 0:
string = u"\"%s\" was terminated by signal %d"
strin = string % (commande, -retcode)
print >>sys.stderr, string
log += string
elif not retcode == 0:
string = u"\"%s\" returned %d" % (commande, retcode)
print >>sys.stderr, string
log += string
warnings = 0
warnings += len(search_warning.findall(log))
warnings += len(search_remark.findall(log))
errors = 0
errors += len(search_error.findall(log))
if not retcode == 0:
errors = errors + 1
self._addTextChild(make_xml, u"WARNINGS",
unicode(str(warnings)))
self._addTextChild(make_xml, u"ERRORS", unicode(str(errors)))
self._addTextChild(make_xml, u"LOG", log)
self._addTextChild(make_xml, u"RETURN", unicode(str(retcode)))
for compilation in self.compilations:
print u"building %s on %s with %s [%s]" %(compilation,
machine[u'NAME'],
self.options[u'NAME'],
machine[u'ROOT'])
make_xml = xml.Element(u"Build")
self.doc_xml.append(make_xml)
self._addTextChild(make_xml, u"OBJECT_TYPE", u"binary")
self._addTextChild(make_xml, u"OBJECT_NAME", compilation)
cmd = [machine[u"MAKE"],
self._binaries[compilation][u'make_cmd']]
log = "## %s\n" % (" ".join(cmd))
build_dir = self._binaries[compilation]['build_dir']
make = subprocess.Popen(cmd,
env=self._env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.path.join(self.branchdir,
build_dir))
log += make.communicate()[0].decode(u'utf8', u'replace')
warnings = 0
warnings += len(search_warning.findall(log))
warnings += len(search_remark.findall(log))
errors = 0
errors += len(search_error.findall(log))
retcode = make.returncode
self._addTextChild(make_xml, u"WARNINGS",
unicode(str(warnings)))
if not retcode == 0:
errors = errors + 1
self._addTextChild(make_xml, u"ERRORS", unicode(str(errors)))
self._addTextChild(make_xml, u"LOG", log)
self._addTextChild(make_xml, u"RETURN", unicode(str(retcode)))
if retcode < 0:
string = u"\"%s\" was terminated by signal %d"
string = string % (u" ".join(cmd), -retcode)
print >>sys.stderr, string
elif retcode == 0:
binary_name = self._binaries[compilation][u'binary']
subprocess.Popen([u"cp",
os.path.join(self.branchdir,
binary_name),
self.install] ,
env= self._env).wait()
else:
string = u"\"%s\" returned %d"
string = string % (u' '.join(cmd), retcode)
print >>sys.stderr, string
filename = os.path.join(self.install, u"compilations.xml")
xml.ElementTree(self.doc_xml).write(filename)
def _runOne(self, binary, case, parameters,
machine, options, revision, ident):
raise NotImplementedError
def Run(self, machines, run_dict, ident):
"""Run tests for the project"""
launch =u""
self.doc_xml = xml.Element(u"Runs")
for machineconf in run_dict.keys():
machine = machines[machineconf]
print u"Running on %s [%s]" % (machine[u'NAME'], machine[u'ROOT'])
for run in run_dict[machineconf]:
binaries = run['binaries']
parameters_list = run['parameters']
cases = run['cases']
for options in run['compilations']:
self.user = machine["username"]
self.resultdir = os.path.join(machine["result_dir"],
ident)
self.branchdir = machine[u'ROOT']+options[u"BRANCH"]
self.diff = self.getDiff()
self.md5diff = hashlib.md5(self.diff).hexdigest()
revision = str(self.getRevision()).decode(u'utf8',
u'replace')
self.install = os.path.join(self.resultdir,
self.user,
machine["NAME"],
self._name,
options["NAME"])
m = hashlib.md5()
for line in open(os.path.join(self.install,
self._config_name), "r"):
m.update(line)
self.md5conf = m.hexdigest()
for parameters in parameters_list:
for case in cases:
for binary in binaries:
launch += self._runOne(binary, case, parameters,
machine, options,
revision, ident)
f=open(os.path.join(self.resultdir,
u'%s-launch.sh' % (self._name)), u'w')
f.write(launch)
f.close()
filename = os.path.join(self.resultdir,
u'%s-execution.xml' % (self._name))
xml.ElementTree(self.doc_xml).write(filename)
def Parse(self, xmlfile, ident, machine):
self.doc_xml = xml.parse(xmlfile).getroot()
self.resultdir = os.path.join(machine.resultdir, ident)
assert self.doc_xml.tag == u"Runs"
for run_xml in self.doc_xml.getiterator(u'Run'):
logfile = run_xml.find(u'logfile').text
for count_key in self._parse_count.keys():
self._parse_count[count_key][u"count"] = 0
try:
log = u""
for line in open(logfile, u'r'):
log += line
for count_key in self._parse_count.keys():
count_re = self._parse_count[count_key][u"parse_key"]
if (count_re.search(line)):
self._parse_count[count_key][u"count"] += 1
for parse_key in self._parse_util.keys():
parse_re = self._parse_util[parse_key][u"parse_key"]
parse_type = self._parse_util[parse_key][u"type"]
resultat = parse_re.search(line)
if (resultat):
child = xml.Element(u"OUTPUT")
child.attrib[u"key"] = parse_key
if (parse_type == u"memory"):
child.attrib[u"type"] = u"BIGINT"
mem = float(resultat.group(1))
if resultat.group(2) == u"Ko":
mem = mem*1024
if resultat.group(2) == u"Mo":
mem = mem*1024*1024
if resultat.group(2) == u"Go":
mem = mem*1024*1024*1024
text = str(int(mem))
else:
child.attrib[u"type"] = parse_type
text = resultat.group(1)
child.text = unicode(text)
run_xml.append(child)
for count_key in self._parse_count.keys():
child = xml.Element(u"OUTPUT")
child.attrib[u"key"] = count_key
child.attrib[u"type"] = u"INT"
count = str(self._parse_count[count_key][u"count"])
count = count.decode(u'utf8', u'ignore')
child.text = count
run_xml.append(child)
child = xml.Element(u"LOG")
log = log.decode(u'utf8', u'ignore')
child.text = log
run_xml.append(child)
except IOError:
child = xml.Element(u"LOG")
log = u"No log file"
child.text = log
run_xml.append(child)
self._checkError()
xml.ElementTree(self.doc_xml).write(xmlfile)
def _checkError(self):
"""Count number of errors or warnings in output log"""
raise NotImplementedError
def listBuildFlags(self):
''' Return the list of available flags'''
liste = {}
for flag in self._optionsComp.keys():
liste[flag] = []
for value in self._optionsComp[flag].keys():
if not (value == u'default' or
value == u'database_id'):
liste[flag].append(value)
return liste
def buildCompilationFromName(self, name, branch = u'/trunk'):
''' build a compilation dictionary from name '''
dictionary = {u'NAME' : name,
u'BRANCH' : branch}
for flag in self._optionsComp.keys():
opt = self._optionsComp[flag]
for value in opt.keys():
if not (value in [u'default', u'database_id']):
if opt[value].has_key(u'searchInName'):
if opt[value][u'searchInName'].search(name):
dictionary[flag] = value
return dictionary
def listCompilationNames(self):
''' Create a list of available keywords for compilation name '''
liste = []
for flag in self._optionsComp.keys():
liste2 = [flag]
opt = self._optionsComp[flag]
for value in opt.keys():
if not (value in [u'default', u'database_id']):
if opt[value].has_key(u'searchInName'):
liste2.append(opt[value][u'searchInName'].pattern)
liste.append(liste2)
return liste
def _idToDate(self, ident):
"""Transform ident string into date string.
String format :
ident : yyyymmddhhmmss
Date : yyyy-mm-dd hh:mm:ss
"""
year = ident[0:4]
month = ident[4:6]
day = ident[6:8]
hour = ident[8:10]
minut = ident[10:12]
sec = ident[12:14]
yearmonday = u"-".join([year,
month,
day])
hourminsec = u':'.join([hour,
minut,
sec])
return u' '.join([ yearmonday,
hourminsec ])
|
from setuptools import setup, find_packages
version = '0.9dev'
setup(name='git-svn-helpers',
version=version,
description="Command-line tools to make git-svn simple",
long_description = (
open('README.rst').read()
+ '\n' +
'Change history\n'
'**************\n'
+ '\n' +
open('HISTORY.txt').read()
+ '\n' +
'Download\n'
'********\n'),
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Topic :: Software Development :: Version Control',
'Programming Language :: Python',
'License :: OSI Approved :: BSD License',
],
keywords='git svn',
author='Tom Lazar',
author_email='tom@tomster.org',
url='http://github.com/tomster/git-svn-helpers',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=False,
zip_safe=False,
test_suite='gitsvnhelpers.tests',
install_requires=[
# -*- Extra requirements: -*-
"jarn.mkrelease",
],
entry_points="""
# -*- Entry points: -*-
[console_scripts]
gitify=gitsvnhelpers.gitify:gitify
""",
)
|
import pygame
class ActiveInfoArea(object):
def __init__(self, info_area_type, resource_manager):
self._frame = -1
self._active = True
self._text = ActiveInfoArea._get_text_from_type(info_area_type)
font_white = resource_manager.get('font_white')
self._text_size = len(self._text) * font_white.glyph_width
self._text_x = 256
self._text_sprite = resource_manager.get('font_white').get(self._text, self._text_size)
self._title_sprites = [resource_manager.get('info_area_title' + str(index))
for index in xrange(0, 9)]
@staticmethod
def _get_text_from_type(info_area_type):
texts = dict(drill_wall='Use a drill to open the crate ',
teleporter='Use a pass to use the teleporter ')
return texts[info_area_type]
'''
Public methods
'''
def run(self):
self._frame += 1
if self._frame >= 8:
self._text_x -= 2
return self._active
def render(self, board):
if self._active:
pygame.draw.rect(board, (0, 0, 0), (0, 0, 256, 8), 0)
if self._frame < 7:
board.blit(self._title_sprites[self._frame], (0, 0))
else:
if self._text_x > self._text_size * -1:
board.blit(self._text_sprite, (self._text_x, 1))
else:
board.blit(self._title_sprites[8], (0, 0))
self._active = False
class InfoArea(object):
def __init__(self, position, size, info_area_type):
self._position = position[0] + 256, position[1] + 144
self._size = size
self._info_area_type = info_area_type
@property
def position(self):
return self._position
@property
def size(self):
return self._size
@property
def info_area_type(self):
return self._info_area_type
class InfoAreaBuilder(object):
@staticmethod
def build(info_areas):
results = []
for info_area in info_areas:
info_area_elements = info_area.split(' ')
x = int(info_area_elements[0])
y = int(info_area_elements[1])
w = int(info_area_elements[2])
h = int(info_area_elements[3])
info_area_type = info_area_elements[4]
results.append(InfoArea((x, y), (w, h), info_area_type))
return results
|
from __future__ import absolute_import, unicode_literals
import time
from celery import shared_task
MIN = 60
@shared_task(priority=0)
def task_01():
print("TASK 01 (priority=0) - Started")
time.sleep(30)
print("TASK 01 (priority=0) - Finished")
@shared_task(priority=5)
def task_02():
print("TASK 02 (priority=5) - Started")
time.sleep(1 * MIN)
print("TASK 02 (priority=5) - Finished")
@shared_task(priority=10)
def task_03():
print("TASK 03 (priority=10) - Started")
time.sleep(2 * MIN)
print("TASK 03 (priority=10) - Finished")
@shared_task(queue="beat", priority=0)
def periodic_task_01():
print("PERIODIC TASK 01 (priority=0) - Started")
time.sleep(1 * MIN)
print("PERIODIC TASK 01 (priority=0) - Finished")
@shared_task(queue="beat", priority=10)
def periodic_task_02():
print("PERIODIC TASK 02 (priority=10) - Started")
time.sleep(1 * MIN)
print("PERIODIC TASK 02 (priority=10) - Finished")
@shared_task(queue="transient")
def transient_task_01():
print("TRANSIENT TASK 01 (priority=10) - Started")
time.sleep(5 * MIN)
print("TRANSIENT TASK 01 (priority=10) - Finished")
|
'''
import tensorflow as tf
from numpy.random import RandomState
data_size = 128
batch_size = 8
loss_more = 1
loss_less = 10
STEP = 5000
x = tf.placeholder(tf.float32, shape = (None, 2), name = 'input-x')
y_ = tf.placeholder(tf.float32, shape = (None, 1), name = 'input-y')
w1 = tf.Variable(tf.random_normal([2, 1], stddev = 1, seed = 1))
y = tf.matmul(x, w1)
loss = tf.reduce_sum(tf.where(tf.greater(y, y_),
(y - y_) * loss_more,
(y_ - y) * loss_less))
train = tf.train.AdamOptimizer(0.001).minimize(loss)
rdm = RandomState(1)
X = rdm.rand(data_size, 2)
Y = [[x1 + x2 + rdm.rand() / 10. - 0.05] for (x1, x2) in X]
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
for i in range(STEP):
start = (i * batch_size) % data_size
end = min(start + batch_size, data_size)
sess.run(train, feed_dict = {x : X[start : end], y_ : Y[start : end]})
print(sess.run(w1))
'''
'''
# 5层神经网络带L2正则化
import tensorflow as tf
def get_weight(shape, lambda_):
weight = tf.Variable(tf.random_normal(shape), dtype=tf.float32)
tf.add_to_collection(
'losses', tf.contrib.layers.l2_regularizer(lambda_)(weight))
return weight
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
batch_size = 8
layer_dimension = [2, 8, 10, 20, 50]
n_layers = len(layer_demension)
cur_layer = x
cur_dimension = layer_dimension[0]
for i in range(1, n_layers):
next_dimension = layer_dimension[i]
weight = get_weight([cur_dimension, next_dimension], 0.001)
bias = tf.Variable(tf.constant(0.1, shape=[next_dimension]))
cur_layer = tf.nn.relu(tf.matmul(cur_layer, weight) + bias)
cur_dimension = next_dimension
mse_loss = tf.reduce_sum(tf.square(cur_layer - y_))
regular = tf.get_collection('losses')
loss = mse_loss + regular
'''
# 平均滑动模型
import tensorflow as tf
v1 = tf.Variable(0.)
num_update = tf.Variable(0, trainable=False)
ema = tf.train.ExponentialMovingAverage(0.99, num_update)
maintian_op = ema.apply([v1])
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
print(sess.run([v1, ema.average(v1)]))
sess.run(tf.assign(v1, 5))
sess.run(maintian_op)
print(sess.run([v1, ema.average(v1)]))
|
from django.conf.urls import url,include
from . import views
from rest_framework import routers
from django.conf.urls.static import static
from django.conf import settings
router = routers.DefaultRouter()
router.register('users',views.UserViewSet)
router.register('posts',views.PostViewSet)
router.register('profile',views.ProfileViewSet)
urlpatterns = [
url(r'^$',views.index,name='index'),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^signup/$', views.signup, name='signup'),
url(r'^api/',include(router.urls)),
url(r'^(?P<username>\w+)/profile', views.user_profile, name='userprofile'),
url(r'^api-auth/',include('rest_framework.urls',namespace='rest_framework')),
url(r'^profile/(?P<username>\w+)/settings', views.edit_profile, name='edit'),
url(r'^profile/(?P<username>\w+)', views.profile, name='profile'),
url(r'^project/(?P<id>\d+)',views.project,name='project'),
url(r'^search/$',views.search_project,name='search'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from emoji_cnn import EmojiCNN
K = 5
with tf.Session() as sess:
model = EmojiCNN(sess,
data=str(K),
batch_size=100,
name='baseline',
embedding=50,
kernel_widths=[3,4,57],
kernel_filters=[64,64,64])
model.run(epochs=5)
train_loss, valid_loss = model.train_loss, model.valid_loss
# generate training data
fig = plt.figure()
fig.suptitle('training and validation loss vs epoch')
plt.xlabel('epoch')
plt.ylabel('loss')
train_handle, = plt.plot(train_loss)
valid_handle, = plt.plot(valid_loss)
plt.legend([train_handle, valid_handle], ['train', 'validation'])
fig.savefig("article.png")
plt.close(fig)
# generate performance metrics
conf_mat = np.zeros((K,K), dtype=np.int32)
samples = model.loader.batch_count('test')
model.loader.reset_batch('test')
for i in xrange(samples):
data = model.loader.next_batch('test')
predicted = sess.run(model.prediction, feed_dict={model.input_words: data[0], model.keep_rate: 1.0})
true_vals = data[1]
for j in xrange(100):
conf_mat[predicted[j], true_vals[j]] += 1
fig, ax = plt.subplots()
ax.matshow(conf_mat, cmap=plt.cm.Blues)
for i in xrange(K):
for j in xrange(K):
ax.text(i, j, str(conf_mat[j,i]), va='center', ha='center')
fig.savefig('confusion.png')
plt.close(fig)
precision = np.zeros(K)
recall = np.zeros(K)
f1 = np.zeros(K)
for i in xrange(K):
precision[i] = conf_mat[i,i] / float(conf_mat[i,:].sum() + 1e-12)
recall[i] = conf_mat[i,i] / float(conf_mat[:,i].sum() + 1e-12)
f1[i] = 2 * precision[i] * recall[i] / (precision[i] + recall[i] + 1e-12)
print('Precision %s' % precision)
print('Recall %s' % recall)
print('F1 Score %s' % f1)
print('Avg F1 Score %2.6f' % (f1.sum() / K))
|
from django.db import models
# Create your models here.
class DebitCard(models.Model):
Card_ID = models.AutoField(primary_key=True)
Card_Number = models.fields.CharField(max_length=20, unique=True)
First_Name = models.fields.CharField(max_length=30)
Last_Name = models.fields.CharField(max_length=30)
Expiry_Date = models.fields.DateField()
Security_Number = models.fields.CharField(max_length=4)
class Meta:
db_table = "DebitCard"
verbose_name = "Card"
constraints = [
models.UniqueConstraint(fields=['Card_Number'], name='unique_Card_Number')
]
class Institute(models.Model):
Institute_ID = models.AutoField(primary_key=True)
Name = models.CharField(max_length=30, unique=True)
Address = models.CharField(max_length=60)
URL = models.CharField(max_length=40, unique=True)
Card_ID = models.ForeignKey(DebitCard, on_delete=models.CASCADE, verbose_name="Card")
Token = models.IntegerField(default=0)
class Meta:
db_table = "Institute"
constraints = [
models.UniqueConstraint(fields=['Name'], name='unique_Name'),
models.UniqueConstraint(fields=['URL'], name='unique_URL')
]
class InstituteOwner(models.Model):
InstituteOwner_ID = models.AutoField(primary_key=True)
CNIC = models.CharField(max_length=15, unique=True)
DOB = models.DateField()
First_Name = models.CharField(max_length=30)
Last_Name = models.CharField(max_length=30)
Contact_Number = models.CharField(max_length=11, unique=True)
Password = models.CharField(max_length=32)
Email = models.CharField(max_length=30, unique=True)
Institute_ID = models.ForeignKey(Institute, on_delete=models.CASCADE)
class Meta:
db_table = "InstituteOwner"
constraints = [
models.UniqueConstraint(fields=['CNIC'], name='unique_CNIC'),
models.UniqueConstraint(fields=['Contact_Number'], name='unique_Contact_Number'),
models.UniqueConstraint(fields=['Email'], name='unique_Email')
]
|
'''
You have n bags numbered from 0 to n - 1. You are given two 0-indexed integer arrays capacity and rocks.
The ith bag can hold a maximum of capacity[i] rocks and currently contains rocks[i] rocks.
You are also given an integer additionalRocks, the number of additional rocks you can place in any of the bags.
Return the maximum number of bags that could have full capacity after placing the additional rocks in some bags.
'''
class Solution:
'''greedy: smallest capacity left first
'''
def maximumBags(self, capacity: List[int], rocks: List[int], additionalRocks: int) -> int:
diff = [cap - rock for cap, rock in zip(capacity, rocks)]
diff.sort()
ans = 0
for d in diff:
if additionalRocks >= d:
additionalRocks -= d
ans += 1
else:
break
return ans
|
# Generated by Django 3.1.1 on 2020-09-15 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='camera',
name='carcass',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='camera',
name='size',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='camera',
name='type',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='camera',
name='video_resolution',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='camera',
name='weight',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='categories',
name='categoris',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='camera',
name='viewing_angle',
field=models.IntegerField(blank=True, null=True),
),
]
|
import random
import items
import money
class Outcome(object):
def __init__(self,
state,
msg,
add_item=False, # USAGE: items.item_name
remove_item=False, # USAGE: items.item_name
remove_all_items=False,
clover=False,
grow_stronger=False,
beg=False,
burn_place=False,
trash_place=False,
lose=False,
die=False,
fail=False,
get_money=False,
kill=False,
lock=False,
move=False,
move_to=False,
new_person=False,
win=False,
threat=False,
unthreat=False,
flirt=None, # USAGE: (person, int)
love_confessor=None, # USAGE: person
add_employer=None,
remove_employer=None,
topic=None,
funcs=(), # USAGE [action]
funcs_with_args=(), # USAGE [(action, arg)]
actions=(), # USAGE: [(Action action(), int weight)]
succeed=False,
lose_money=None, # USAGE: (int amount)
lose_all_money=False,
score=1, # each action gives you one point by default
):
self.state = state
self.add_item = add_item
self.remove_item = remove_item
self.remove_all_items = remove_all_items
self.grow_stronger = grow_stronger
self.clover = clover
self.beg = beg
self.burn_place = burn_place
self.trash_place = trash_place
self.character = state.character
self.lose = lose
self.die = die
self.fail = fail
self.succeed = succeed
self.get_money = get_money
self.kill = kill
self.lock = lock
self.move = move
self.move_to = move_to
if msg and self.character.trip:
msg = msg.split(" ")
for i in range(len(msg)):
msg[i] = msg[i].strip(".,\"\'!")
msg[i] = msg[i].lower()
random.shuffle(msg)
self.msg = msg[0][0].upper() + " ".join(msg)[1:] + "."
else:
self.msg = msg
self.new_person = new_person
self.win = win
self.threat = threat
self.unthreat = unthreat
self.flirt = flirt
self.love_confessor = love_confessor
self.topic = topic
self.add_employer = add_employer
self.remove_employer = remove_employer
self.funcs = funcs
self.funcs_with_args = funcs_with_args
self.actions = actions
self.lose_money = lose_money
self.lose_all_money = lose_all_money
self.score = score
def execute(self):
"""
NOTE: order of conditions must be logical (based on what should be
printed first)
"""
if self.msg:
print(self.msg)
if self.beg:
self.character.person.state["given money"] = True
if self.lock:
self.state.places.locked.add(self.character.place)
if self.burn_place:
self.burn_place.name = "the smoldering remains of " \
+ self.burn_place.name
self.state.places.burnable.remove(self.burn_place)
self.state.places.burned.add(self.burn_place)
if self.trash_place:
self.trash_place.name = "the trashed remains of " \
+ self.trash_place.name
self.state.places.trashed.add(self.trash_place)
if self.kill: # this needs to happen before move_to
if self.new_person: # use the new person
person = self.new_person
else:
person = self.character.person
person.alive = False
self.character.person = None
self.character.threatend = False
if self.move:
self.character.move(self.move)
if self.move_to:
self.character.move_to(self.move_to)
if self.win:
self.score += 100
self.character.score += self.score
if self.die:
if self.character.has_item(items.four_leaf_clover) and \
self.clover:
print("Or at least that's what you imagine would have "
"happened if you didn't have a four-leaf clover.")
else:
self.character.die()
if self.new_person is None or self.new_person:
if not self.kill:
# The person will already be dead if killed is true
self.character.person = self.new_person
if self.lose:
self.character.lose = True
if self.add_item:
self.character.add_item(self.add_item)
if self.remove_item:
self.character.remove_item(self.remove_item)
if self.remove_all_items:
self.character.remove_all_items()
if self.get_money:
self.character.get_money(self.get_money)
if self.win:
self.character.win()
if self.threat:
self.character.threatened = True
if self.unthreat:
self.character.threatened = False
if self.add_employer:
self.character.add_employer(self.add_employer)
if self.remove_employer:
self.character.remove_employer(self.remove_employer)
if self.flirt:
self.flirt[0].attracted += self.flirt[1]
if self.grow_stronger:
self.character.grow_stronger(self.grow_stronger)
for func in self.funcs:
func()
for tup in self.funcs_with_args:
tup[0](tup[1]) # tup[0] is the function
# tup[1] is the argument
if self.lose_money is not None:
self.character.lose_money(self.lose_money)
if self.lose_all_money:
self.character.lose_all_money()
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the powerSum function below.
def powerSum(X, N, num=1):
if pow(num, N) < X:
return powerSum(X, N, num+1) + powerSum(X-pow(num, N), N, num+1)
elif pow(num, N) == X:
return 1
else:
return 0
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
X = int(input())
N = int(input())
result = powerSum(X, N)
fptr.write(str(result) + '\n')
fptr.close() |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_sdk import Action
from rasa_sdk.events import SlotSet
class MyCustomAction(Action):
def name(self):
return "my_custom_action"
def run(self, dispatcher, tracker, domain):
dispatcher.utter_template("utter_custom_template", tracker)
return [SlotSet("test", 4)]
|
import torch
from latent_rationale.snli.util import get_z_counts, get_n_correct
def evaluate(model, criterion, data_iter):
model.eval()
n_eval_correct, n_eval_total, eval_loss = 0, 0, 0
# kuma statistics
p2h_0, p2h_c, p2h_1 = 0, 0, 0
h2p_0, h2p_c, h2p_1 = 0, 0, 0
with torch.no_grad():
for eval_batch_idx, eval_batch in enumerate(data_iter):
answer = model(eval_batch)
n_eval_correct += int(get_n_correct(eval_batch, answer))
n_eval_total += int(eval_batch.batch_size)
eval_loss += criterion(answer, eval_batch.label).sum().item()
# statistics on p2h attention
if hasattr(model, "prem2hypo_att"):
z0, zc, z1 = get_z_counts(model.prem2hypo_att,
model.prem_mask, model.hypo_mask)
p2h_0 += z0
p2h_c += zc
p2h_1 += z1
# statistics on h2p attention
if hasattr(model, "hypo2prem_att"):
z0, zc, z1 = get_z_counts(model.hypo2prem_att,
model.hypo_mask, model.prem_mask)
h2p_0 += z0
h2p_c += zc
h2p_1 += z1
# statistics on p2h attention
if hasattr(model, "prem2hypo_att"):
z0, zc, z1 = get_z_counts(model.prem2hypo_att,
model.prem_mask, model.hypo_mask)
p2h_0 += z0
p2h_c += zc
p2h_1 += z1
# statistics on h2p attention
if hasattr(model, "hypo2prem_att"):
z0, zc, z1 = get_z_counts(model.hypo2prem_att,
model.hypo_mask, model.prem_mask)
h2p_0 += z0
h2p_c += zc
h2p_1 += z1
acc = 100. * n_eval_correct / n_eval_total
result = dict(
n_eval_correct=n_eval_correct, n_eval_total=n_eval_total,
acc=acc, loss=eval_loss)
if hasattr(model, "hypo2prem_att"):
total = h2p_0 + h2p_c + h2p_1
result["h2p_0"] = h2p_0 / total
result["h2p_c"] = h2p_c / total
result["h2p_1"] = h2p_1 / total
result["h2p_selected"] = 1 - h2p_0 / total
if hasattr(model, "prem2hypo_att"):
total = p2h_0 + p2h_c + p2h_1
result["p2h_0"] = p2h_0 / total
result["p2h_c"] = p2h_c / total
result["p2h_1"] = p2h_1 / total
result["p2h_selected"] = 1 - p2h_0 / total
return result
|
# Generated by Django 3.0.4 on 2020-04-24 11:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20200415_1340'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='first_name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='profile',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='profile',
name='last_name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='profile',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='user',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.CreateModel(
name='MobileNumber',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('phone_number', models.CharField(max_length=10, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.Profile')),
],
),
migrations.CreateModel(
name='EmailAddress',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('email_id', models.CharField(max_length=255, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.Profile')),
],
),
]
|
from critical_projects import INCLUDED_PLATFORMS
if __name__ == "__main__":
for platform in INCLUDED_PLATFORMS:
print(f"CREATE INDEX FOR (n:{platform}) ON (n.Name);")
|
#!/usr/bin/env python3
"""how to calculate a shape of matrix"""
def matrix_shape(matrix):
"""shape of matrix"""
x = [len(matrix)]
while isinstance(matrix[0], list):
x.append(len(matrix[0]))
matrix = matrix[0]
return x
|
# U05_EX15_ExamScores_BarChart.py
#
# Author: Will Baschab
# Course: Coding for OOP
# Section: A2
# Date: 19 Dec 2018
# IDE: PyCharm
#
# Assignment Info
# Exercise: 15
# Source: Python Programming
# Chapter: 05
#
# Program Description
# This program will take a text file as input with the number of students in the first
# line and the last name of the student followed by their score on the subsequent lines.
# It will return a graphical bar chart of the student's names and their scores out of 100.
#
#
# Algorithm (pseudocode)
"""
- import everything from graphics
- print introduction
- ask for file name as input and save into <name>
- set <file> to open(name, "r")
- set <student_count> to int(file.readline(1)[:-1])
- set variables <students> and <grades> to empty lists
- set lines to file.readlines()
- close file
- begin for loop for line in file.readline()
- students.append(line.split()[1])
- grades.append(line.split()[2])
- draw graph of scores
"""
from graphics import *
def main():
print("\nThis program will take a text file as input with the number of students in the first" +
"\nline and the last name of the student followed by their score on the subsequent lines." +
"\nIt will return a graphical bar chart of the student's names and their scores out of 100.\n")
filename = input('Enter name of file as well as .txt extension on end: ')
file = open(filename, "r")
studentcount = int(file.readline(1))
students, scores = [], []
lines = file.readlines()
file.close()
for line in lines[1:]:
students.append(line.split()[0])
scores.append(int(line.split()[1]))
draw_graph(studentcount, students, scores)
def draw_graph(studentcount, students, scores):
"""
- This function contains and carries out the graphical component
of the program. It creates a new widow with a height based of
the amount of students in the list.
- set width to 480
- set height to max height coordinate by a multiplier of 20
- create new window with title "Exam Scores Bar Graph", and with a width of <width> and a height of <height>
- set window coords to (0, studentcount * 3 + 5, 24, 0)
- draw "Names:" at (6,1) and "Scores:" at (18, 1)
- for i in range(studentcount):
- name = Text(Point(6, i * 3.0 + 3.0), "{0}:".format(students[i]))
- name.draw(win)
- score = Rectangle(Point(12, i * 3 + 2), Point(scores[i]/10 + 12, i * 3 + 4))
- score.setFill("green")
- score.draw(win)
- exitbox = Text(Point(6, studentcount * 3 + 3), "Click to exit")
- exitbox.draw(win)
- win.getMouse()
- win.close()
"""
width = 480
height = (studentcount * 3 + 5) * 20.0
win = GraphWin("Exam Scores Bar Graph", width, height)
win.setCoords(0.0, studentcount * 3.0 + 5.0, 24, 0)
Text(Point(6, 1), "Names:").draw(win)
Text(Point(18, 1), "Scores:").draw(win)
for i in range(studentcount):
name = Text(Point(6, i * 3.0 + 3.0), "{0}:".format(students[i]))
name.draw(win)
score = Rectangle(Point(12, i * 3 + 2), Point(scores[i]/10 + 12, i * 3 + 4))
score.setFill("green")
score.draw(win)
exitbox = Text(Point(6, studentcount * 3 + 3), "Click to exit")
exitbox.draw(win)
win.getMouse()
win.close()
main()
|
import threading
import time
import unittest
from os import path
import apps
import core.config.config
import core.controller
from core.case.callbacks import WorkflowExecutionStart, WorkflowPaused, WorkflowResumed
from core.helpers import import_all_filters, import_all_flags
from tests import config
from tests.util.case_db_help import *
from tests.util.thread_control import modified_setup_worker_env
class TestZMQCommunication(unittest.TestCase):
@classmethod
def setUpClass(cls):
apps.cache_apps(config.test_apps_path)
core.config.config.load_app_apis(apps_path=config.test_apps_path)
core.config.config.flags = import_all_flags('tests.util.flagsfilters')
core.config.config.filters = import_all_filters('tests.util.flagsfilters')
core.config.config.load_flagfilter_apis(path=config.function_api_path)
core.config.config.num_processes = 2
def setUp(self):
self.controller = core.controller.controller
self.controller.workflows = {}
self.controller.load_playbooks(resource_collection=config.test_workflows_path)
self.id_tuple = ('simpleDataManipulationWorkflow', 'helloWorldWorkflow')
self.testWorkflow = self.controller.get_workflow(*self.id_tuple)
self.testWorkflow.set_execution_uid('some_uid')
self.start = datetime.utcnow()
self.controller.initialize_threading(worker_environment_setup=modified_setup_worker_env)
case_database.initialize()
def tearDown(self):
self.controller.workflows = None
case_database.case_db.tear_down()
case_subscription.clear_subscriptions()
@classmethod
def tearDownClass(cls):
apps.clear_cache()
'''Request and Result Socket Testing (Basic Workflow Execution)'''
def test_simple_workflow_execution(self):
workflow = self.controller.get_workflow('basicWorkflowTest', 'helloWorldWorkflow')
step_uids = [step.uid for step in workflow.steps.values() if step.name == 'start']
setup_subscriptions_for_step(workflow.uid, step_uids)
self.controller.execute_workflow('basicWorkflowTest', 'helloWorldWorkflow')
self.controller.shutdown_pool(1)
steps = []
for uid in step_uids:
steps.extend(executed_steps(uid, self.start, datetime.utcnow()))
self.assertEqual(len(steps), 1)
step = steps[0]
result = step['data']
self.assertDictEqual(result['result'], {'result': "REPEATING: Hello World", 'status': 'Success'})
def test_multi_action_workflow(self):
workflow = self.controller.get_workflow('multiactionWorkflowTest', 'multiactionWorkflow')
step_names = ['start', '1']
step_uids = [step.uid for step in workflow.steps.values() if step.name in step_names]
setup_subscriptions_for_step(workflow.uid, step_uids)
self.controller.execute_workflow('multiactionWorkflowTest', 'multiactionWorkflow')
self.controller.shutdown_pool(1)
steps = []
for uid in step_uids:
steps.extend(executed_steps(uid, self.start, datetime.utcnow()))
self.assertEqual(len(steps), 2)
expected_results = [{'result': {"message": "HELLO WORLD"}, 'status': 'Success'},
{'result': "REPEATING: Hello World", 'status': 'Success'}]
for result in [step['data']['result'] for step in steps]:
self.assertIn(result, expected_results)
def test_error_workflow(self):
workflow = self.controller.get_workflow('multistepError', 'multiactionErrorWorkflow')
step_names = ['start', '1', 'error']
step_uids = [step.uid for step in workflow.steps.values() if step.name in step_names]
setup_subscriptions_for_step(workflow.uid, step_uids)
self.controller.execute_workflow('multistepError', 'multiactionErrorWorkflow')
self.controller.shutdown_pool(1)
steps = []
for uid in step_uids:
steps.extend(executed_steps(uid, self.start, datetime.utcnow()))
self.assertEqual(len(steps), 2)
expected_results = [{'result': {"message": "HELLO WORLD"}, 'status': 'Success'},
{'status': 'Success', 'result': 'REPEATING: Hello World'}]
for result in [step['data']['result'] for step in steps]:
self.assertIn(result, expected_results)
def test_workflow_with_dataflow(self):
workflow = self.controller.get_workflow('dataflowTest', 'dataflowWorkflow')
step_names = ['start', '1', '2']
step_uids = [step.uid for step in workflow.steps.values() if step.name in step_names]
setup_subscriptions_for_step(workflow.uid, step_uids)
self.controller.execute_workflow('dataflowTest', 'dataflowWorkflow')
self.controller.shutdown_pool(1)
steps = []
for uid in step_uids:
steps.extend(executed_steps(uid, self.start, datetime.utcnow()))
self.assertEqual(len(steps), 3)
expected_results = [{'result': 6, 'status': 'Success'},
{'result': 6, 'status': 'Success'},
{'result': 15, 'status': 'Success'}]
for result in [step['data']['result'] for step in steps]:
self.assertIn(result, expected_results)
'''Communication Socket Testing'''
def test_pause_and_resume_workflow(self):
self.controller.load_playbook(resource=path.join(config.test_workflows_path, 'pauseWorkflowTest.playbook'))
uid = None
result = dict()
result['paused'] = False
result['resumed'] = False
@WorkflowPaused.connect
def workflow_paused_listener(sender, **kwargs):
result['paused'] = True
self.controller.resume_workflow(uid)
@WorkflowResumed.connect
def workflow_resumed_listener(sender, **kwargs):
result['resumed'] = True
def pause_resume_thread():
self.controller.pause_workflow(uid)
return
@WorkflowExecutionStart.connect
def step_1_about_to_begin_listener(sender, **kwargs):
threading.Thread(target=pause_resume_thread).start()
time.sleep(0)
uid = self.controller.execute_workflow('pauseWorkflowTest', 'pauseWorkflow')
self.controller.shutdown_pool(1)
self.assertTrue(result['paused'])
self.assertTrue(result['resumed'])
|
""" Welcome to the GTK+3 GUI interface for the Tadman package
manager! I'm really proud with how this is all turning out. But for
the time being, this interface will not be updated until some interest
is shown in having one.
NOTE: This does NOT work with the current version of Tadman. It is
here for historical and future purposes.
"""
# Dependencies
import gi
gi.require_version('Gtk', '3.0')
import gi.repository.Gtk as Gtk
class MainInterface(Gtk.Window):
""" This class is in charge of managing the GTK+ 3.0 based GUI for Tadman.
It currently consists of two main parts: the options list with toggles on
the left, and the 'information window' on the right. When closed, the
script outputs all selected options, which will be sent to the build system
being used.
"""
def __init__(self, in_dict, pack_name, pack_version, mode):
""" Initialize all of the internal and external parts of the GUI. """
self.mode = mode
self.options_dict = in_dict
self.options_list = []
for item in self.options_dict:
self.options_list.append(item)
self.toggle_list = []
self.info_list = []
dict_length = len(in_dict)
self.list_range = range(dict_length)
# Initialize GTK window and set a title
Gtk.Window.__init__(self, title='Tadman Package Manager')
# Set the perfect window dimensions, and make it non-resizable
self.set_default_size(750, 500)
self.set_resizable(False)
self.connect("delete-event", Gtk.main_quit)
# Initialize all containers going to be used
notebook_window = Gtk.Notebook()
main_box = Gtk.HBox()
left_box = Gtk.VBox()
right_box = Gtk.VBox()
scrolly = Gtk.ScrolledWindow()
button_box = Gtk.HBox()
self.list_model = Gtk.ListStore(str, bool)
self.tree_view = Gtk.TreeView(model=self.list_model, activate_on_single_click=True)
#self.tree_view.connect('cursor-changed', self.change_help_message)
# Initialize buttons and smaller things
run_button = Gtk.Button(label='Run')
cancel_button = Gtk.Button(label='Cancel')
self.help_message = Gtk.Label("Welcome to Tadman!")
self.package_name = Gtk.Entry()
self.package_version = Gtk.Entry()
#search_bar = Gtk.Entry()
cell = Gtk.CellRendererText()
toggle = Gtk.CellRendererToggle()
for title in self.options_dict:
# Add a line to the list_store for each option
self.list_model.append([title, False])
# Configure container(s)
scrolly.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
column_one = Gtk.TreeViewColumn('Name', cell, text=0)
column_two = Gtk.TreeViewColumn('Toggle', toggle, active=1)
column_two.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
column_two.set_fixed_width(25)
self.select = self.tree_view.get_selection()
self.select.connect("changed", self.tree_selection_changed)
# Note: Here are options for a search bar, but does not really work well
self.tree_view.set_enable_search(False)
#self.tree_view.set_search_column(0)
#self.tree_view.set_search_entry(search_bar)
# Configure button(s)
run_button.connect('clicked', self.run_was_pressed)
cancel_button.connect('clicked', self.cancel_was_pressed)
toggle.connect('toggled', self.cell_was_toggled)
self.help_message.set_line_wrap(True)
self.help_message.set_width_chars(30)
self.help_message.set_max_width_chars(30)
self.help_message.set_justify(Gtk.Justification.CENTER)
self.package_name.set_text(pack_name)
self.package_version.set_text(pack_version)
# Start placing containers in containers and widgets in containers.
self.add(notebook_window)
notebook_window.append_page(main_box, Gtk.Label('Config'))
main_box.pack_start(left_box, True, True, 5)
main_box.pack_start(right_box, True, False, 5)
#left_box.pack_start(search_bar, False, False, 2)
left_box.pack_start(scrolly, True, True, 0)
left_box.pack_end(button_box, False, False, 2)
button_box.pack_start(run_button, True, True, 4)
button_box.pack_start(cancel_button, True, True, 4)
self.tree_view.append_column(column_one)
self.tree_view.append_column(column_two)
scrolly.add(self.tree_view)
right_box.pack_start(self.package_name, False, False, 1)
right_box.pack_start(self.package_version, False, False, 1)
right_box.pack_start(Gtk.Label("Number of options: %d" % dict_length),
False, False, 1)
right_box.pack_start(self.help_message, False, False, 10)
def cell_was_toggled(self, widget, path):
""" Change the toggle switch's status when clicked. """
self.list_model[path][1] = not self.list_model[path][1]
option = int(path)
# If this is a CMake build, then the option must have 'ON' appened to
# the end of it
if self.list_model[path][1]:
self.toggle_list.append(option)
else:
# If already toggled and then un-toggled, remove item from list
self.toggle_list.remove(option)
def tree_selection_changed(self, selection):
""" This one is kinda confusing, but essentially it gets the value of
the current selection once it is change, and puts up it's help message
on the right panel.
"""
model, treeiter = selection.get_selected()
if not treeiter is None:
# It must loop through all of the entries in order to set the help
# message, which I'm not very pleased with
if model[treeiter][0] in self.options_dict:
item = model[treeiter][0]
self.help_message.set_text(self.options_dict[item][1])
def run_was_pressed(self, widget):
""" When the run button is hit, the current options in the name entry
box and the toggle options selected are saved to a list, and the GUI
closes.
"""
Gtk.main_quit()
self.info_list.append(self.package_name.get_text())
self.info_list.append(self.package_version.get_text())
#self.info_list.append(self.toggle_list)
self.info_list.append(sorted(self.toggle_list, key=int))
def cancel_was_pressed(self, widget):
""" Similar to run_was_pressed, but sets all options to none, and
then closes the GUI.
"""
self.info_list = []
Gtk.main_quit()
def get_return_values(self):
return self.info_list
class InstallInterface(Gtk.Window):
def __init__(self, package_name):
self.install_choice = ()
Gtk.Window.__init__(self, title="Install")
self.set_default_size(300, 100)
self.set_resizable(False)
self.connect("delete-event", Gtk.main_quit)
main_box = Gtk.VBox()
button_box = Gtk.HBox()
install_message = "Would you like to install %s?" % package_name
intro_label = Gtk.Label(install_message)
install_button = Gtk.Button(label='Install')
cancel_button = Gtk.Button(label='Cancel')
install_button.connect('clicked', self.install_was_pressed)
cancel_button.connect('clicked', self.cancel_was_pressed)
self.add(main_box)
button_box.pack_start(install_button, True, True, 5)
button_box.pack_start(cancel_button, True, True, 5)
main_box.pack_start(intro_label, True, True, 0)
main_box.pack_start(button_box, True, True, 5)
def install_was_pressed(self, widget):
self.install_choice = True
Gtk.main_quit()
def cancel_was_pressed(self, widget):
self.install_choice = False
Gtk.main_quit()
def get_choice(self):
return self.install_choice
def run_main(mode, pack_name, pack_version, a_list):
""" A main function to run the entire GUI. Nothing all that special."""
window = MainInterface(mode, pack_name, pack_version, a_list)
window.show_all()
Gtk.main()
window.hide()
return window.get_return_info()
def run_install(package_name):
window = InstallInterface(package_name)
window.show_all()
Gtk.main()
return window.get_choice()
|
import cadquery as cq
# 1. Establishes a workplane that an object can be built on.
# 1a. Uses the named plane orientation "front" to define the workplane, meaning
# that the positive Z direction is "up", and the negative Z direction
# is "down".
# 2. Creates a 3D box that will have geometry based off it later.
result = cq.Workplane("front").box(3, 2, 0.5)
# 3. The lowest face in the X direction is selected with the <X selector.
# 4. A new workplane is created
# 4a.The workplane is offset from the object surface so that it is not touching
# the original box.
result = result.faces("<X").workplane(offset=0.75)
# 5. Creates a thin disc on the offset workplane that is floating near the box.
result = result.circle(1.0).extrude(0.5)
# Displays the result of this script
show_object(result)
|
import json
import scrapy
class KanmaincSpider(scrapy.Spider):
name = 'kanmainc'
start_urls = ['https://kanmainc.com/collections/all']
def parse(self, response):
"""This function should extract and loop item urls.
@url https://kanmainc.com/collections/all
@returns items 0
@returns requests 41
@request https://kanmainc.com/collections/all?page=2
"""
item_links = response.css('.grid-uniform a.product-grid-item::attr(href)').getall()
yield from response.follow_all(item_links, self.parse_details)
next_page = response.xpath('//ul[@class="pagination-custom"]//a[@title="Next »"]/@href').get()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
def parse_details(self, response):
"""This function should extract data from 1 item page.
@url https://kanmainc.com/products/super-soft-quilted-blanket-qb-22-pink
@returns items 1
@partial {\
"VENDORID": 1045,\
"VENDOR": "KANMA", \
"ITEMNO": "QB22-BOBK-PIK-B", \
"UPC": "653552479925", \
"CATEGORY": "blanket", \
"DESCRIPTION": "super soft quilted blanket, QB-22 PINK", \
"DESCRIPTION2": "SUPER SOFT QUILTED BLANKET FILLING 100% POLYESTER SIZE:100*140CM", \
"IMAGE_URL": "https://cdn.shopify.com/s/files/1/2062/5399/products/QB-22PINK.jpg?v=1594801336", \
"COST": 21.99, \
"PAGE_TITLE": "super soft quilted blanket, QB-22 PINK – kanmainc", \
"PAGE_URL": "https://kanmainc.com/products/super-soft-quilted-blanket-qb-22-pink" \
}
"""
json_data = response.css('#ProductJson-product-template ::text').get()
product = json.loads(json_data)
for variant in product['variants']:
yield {
"VENDORID":1045,
"VENDOR":'KANMA',
"ITEMNO":variant['sku'],
"UPC":variant['barcode'],
"CATEGORY":product['type'],
"DESCRIPTION":variant['name'],
"IMAGE_URL":next((x['src'] for x in product['media']), None),
"COST":float(variant['price']) / 100.0,
"CASEPACK":None,
"PK_SIZE":None,
"DESCRIPTION2":product['description'],
"PAGE_TITLE":response.css('title::text').get(),
"PAGE_URL":response.request.url,
}
|
# prac20
import matplotlib.pyplot as plt
import networkx as nx
def graf():
input2 = open("./graph_input2-4.txt","r")
list2 = input2.readlines()
G = {}
for i in range(len(list2)):
list2[i] = list2[i].rstrip()
a, b = list2[i].split()
if a not in G:
G[a] = {b}
else:
G[a].add(b)
if b not in G:
G[b] = {a}
else:
G[b].add(a)
return G
def bsf(G, P, start, fired):
queue = [start]
fired.add(start)
while len(queue) != 0:
current = queue.pop(0)
for neighbour in G[current]:
if neighbour not in fired:
P.add_edge(current, neighbour)
fired.add(neighbour)
queue.append(neighbour)
P = nx.Graph()
G = graf()
fired = set()
zero = 'Апельсиновый'
bsf(G, P, zero, fired)
nx.draw(P)
plt.savefig("simple_path.png") # save as png
plt.show() # display
|
class subnet:
def __init__(self, ec2_service):
self.ec2_service = ec2_service
self.sub_name =''
self.sub_id = ''
self.sub_vpc = ''
self.sub_cidr = ''
self.route_table_id = ''
self.route_name = ''
self.route_table = ''
# subnet_id is list type
def describe_route(self, subnet_id):
response = self.ec2_service.describe_route_tables(
Filters=[{
'Name': 'association.subnet-id',
'Values': [subnet_id]
}]
)
id = ''
name = ''
if 'RouteTables' in response and 0 < len(response['RouteTables']) and 'RouteTableId' in response['RouteTables'][0]:
id = response['RouteTables'][0]['RouteTableId'] + ' | '
for tag in response['RouteTables'][0]['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
return id, name
# vpc_id should be list type
def describe_subnet(self, vpc_id):
response = self.ec2_service.describe_subnets(
Filters=[{
'Name': 'vpc-id',
'Values': vpc_id
}]
)
result = []
for vpc in vpc_id:
for sub in response['Subnets']:
if sub['VpcId'] == vpc:
try:
for name in sub['Tags']:
if name['Key'] == 'Name':
self.sub_name = name['Value']
self.sub_id = sub['SubnetId']
self.sub_vpc = vpc
self.sub_cidr = sub['CidrBlock']
self.route_table_id, self.route_name = self.describe_route(self.sub_id)
self.route_table = self.route_table_id + self.route_name
except KeyError:
self.sub_name = '-'
self.sub_id = sub['SubnetId']
self.sub_vpc = vpc
self.sub_cidr = sub['CidrBlock']
self.route_table_id, self.route_name = self.describe_route(self.sub_id)
self.route_table = self.route_table_id + self.route_name
result.append((self.sub_name, self.sub_id, self.sub_vpc, self.sub_cidr, self.route_table))
return result |
import unittest
import time
import datetime
import os
from ddt import ddt, file_data, data, unpack
from selenium import webdriver
from pages.csadmin_page import Csadmin_Page
from selenium.webdriver.common.action_chains import ActionChains
from poium import Page, PageElement, PageSelect
from testdata.read_excel import get_testdata
from testdata.mysql_client import mysql_client
from selenium.webdriver.common.keys import Keys
# 指定 excel 文件路径
excel_file_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + '//' + 'testdata' + '//' + 'signin_testdata_new.xlsx'
# 获取列表中的数据
# 1,2 = 获取第1行(从1开始算,不包括第2行);0, 4 = 获取前4列(从0开始算,不包括第4列)
signinib_1_testdata = get_testdata(excel_file_dir, 'Sheet1', 1, 2, 0, 4)
# 获取第1行4到19列
signinib_2_testdata = get_testdata(excel_file_dir, 'Sheet1', 1, 2, 4, 19)
# 获取第1行19到21列
signinib_3_testdata = get_testdata(excel_file_dir, 'Sheet1', 1, 2, 21, 23)
@ddt
class Chujin_test(unittest.TestCase):
"""
测试 au2 后台出金
"""
@classmethod
def setUpClass(cls):
"""初始化"""
cls.dr = webdriver.Chrome()
cls.dr.maximize_window()
cls.page = Csadmin_Page(cls.dr)
# 后台 csadmin 登录链接
cls.page.get('https://awstau-csadmin.aetos.me/index.php/Public/login')
# 时间戳,用于注册用户名拼接
#cls.time_now = str(datetime.datetime.now().strftime('%m%d%H%M'))
# 获取操作数据库的对象
cls.mysql = mysql_client()
# 查询数据库中最新注册的用户名
cls.username = cls.mysql.get_data("SELECT email FROM t_user_account ORDER BY userId DESC limit 1;")[0][0]
def setUp(self):
pass
def login_csadmin(self, username, password):
"""登录 csadmin """
# 输入用户名
time.sleep(5)
self.page.username_input.clear()
self.page.username_input.send_keys(username)
# 输入密码
self.page.password_input.clear()
self.page.password_input.send_keys(password)
# 点击登录按钮
self.page.submit_button.click()
def test_step1(self):
"""
出金流程第一步
Cs Check
"""
self.login_csadmin('cs_test', 'aa1111')
# 点击出金管理
time.sleep(5)
self.page.chujinguanli_span.click()
# 点击 CS Check
time.sleep(3)
self.page.cs_check_a.click()
# 在 cs check 的 username 搜索框输入用户名
time.sleep(3)
self.page.cs_check_username_input.send_keys(self.username)
# 点击 submit 按钮
time.sleep(5)
self.page.cs_check_username_submit.click()
# checkstatus 选择 confirmed
time.sleep(3)
PageSelect(self.page.cs_check_checkstatus_select, value='2')
# 在 comment 输入 test
time.sleep(3)
self.page.cs_check_comment_input.send_keys('test')
# 点击 submit
time.sleep(2)
self.page.cs_check_action_submit.click()
time.sleep(2)
# 确认提交
self.page.accept_alert()
# 点击退出
time.sleep(5)
self.page.log_out.click()
def test_step2(self):
"""
出金流程第二步
Risk Check
"""
self.login_csadmin('risk_test', 'aa1111')
# 点击出金管理
time.sleep(5)
self.page.chujinguanli_span.click()
# 点击 risk check
time.sleep(3)
self.page.risk_check_a.click()
# 在搜索框输入用户名
time.sleep(2)
self.page.risk_check_username_input.send_keys(self.username)
# 点击 submit 按钮
time.sleep(3)
self.page.risk_check_username_submit.click()
# checkstatus 选择 confirmed
time.sleep(3)
PageSelect(self.page.risk_check_checkstatus_select, value='2')
# 在 comment 输入 test
time.sleep(3)
self.page.risk_check_comment_input.send_keys('test')
# 点击 submit
time.sleep(2)
self.page.risk_check_action_submit.click()
time.sleep(2)
# 确认提交
self.page.accept_alert()
# 点击退出
time.sleep(5)
self.page.log_out.click()
def test_step3_1(self):
"""
出金流程第三步
Settlement Check
(1)WD Standing Setup
"""
self.login_csadmin('stm_test', 'aa1111')
# 点击出金管理
time.sleep(5)
self.page.chujinguanli_span.click()
# 点击 WD Standing Setup
time.sleep(3)
self.page.wd_standing_setup_a.click()
# 在搜索框输入用户名
time.sleep(2)
self.page.wd_standing_setup_username_input.send_keys(self.username)
# 点击 submint
time.sleep(2)
self.page.wd_standing_setup_username_submit.click()
# 勾选搜索出来的数据
time.sleep(3)
self.page.wd_standing_setup_checkbox.click()
# 点击 process
time.sleep(3)
self.page.wd_standing_setup_process.click()
# 确认
time.sleep(1)
self.page.wd_standing_setup_alert.click()
# 点击退出
time.sleep(3)
self.page.log_out.click()
def test_step3_2(self):
"""
出金流程第三步
Settlement Check
(2)WD Allocation
"""
self.login_csadmin('stm_test', 'aa1111')
# 点击出金管理
time.sleep(5)
self.page.chujinguanli_span.click()
# 点击 wd alloction
time.sleep(5)
self.page.wd_alloction_a.click()
# 在搜索框输入用户名
time.sleep(3)
self.page.wd_alloction_username_input.send_keys(self.username)
# 点击 submit
time.sleep(3)
self.page.wd_alloction_username_submit.click()
# 选中搜索到的数据
time.sleep(3)
self.page.wd_alloction_item_div.click()
# 点击 allocate
time.sleep(2)
self.page.wd_alloction_allocate_span.click()
# 输入金额
time.sleep(5)
self.page.wd_allocions_allocate_anount_input.click()
time.sleep(2)
self.page.wd_allocions_allocate_anount_input.send_keys(Keys.BACK_SPACE)
time.sleep(2)
self.page.wd_allocions_allocate_anount_input.send_keys(50)
# # 输入 fee
# time.sleep(5)
# self.page.wd_allocions_fee_input.click()
# self.page.wd_allocions_fee_input.clear()
# self.page.wd_allocions_fee_input.send_keys(0)
# 点击 allocate 按钮
time.sleep(2)
self.page.wd_allocions_allocate_button.click()
# 点击退出按钮
time.sleep(3)
self.page.log_out.click()
def test_step4_1(self):
"""
出金流程第四步
Operation Check
(1) WD Alloc Review
"""
self.login_csadmin('opr_test', 'aa1111')
# 点击出金管理
time.sleep(5)
self.page.chujinguanli_span.click()
# 点击 WD Alloc Review
time.sleep(3)
self.page.wd_alloc_review_a.click()
# 在搜索框输入用户名
time.sleep(3)
self.page.wd_alloc_review_username_input.send_keys(self.username)
# 点击 submit
time.sleep(3)
self.page.wd_alloc_review_username_submit.click()
# 点击 review
time.sleep(3)
self.page.wd_alloc_review_review_a.click()
# 点击 confirm
time.sleep(3)
self.page.wd_alloc_review_confirm_button.click()
# 点击退出按钮
time.sleep(3)
self.page.log_out.click()
def test_step4_2(self):
"""
出金流程第四步
Operation Check
(2) Instruction Prepare
"""
self.login_csadmin('stm_test', 'aa1111')
# 点击出金管理
time.sleep(5)
self.page.chujinguanli_span.click()
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
"""
关闭浏览器
"""
time.sleep(5)
cls.dr.quit()
if __name__ == '__main__':
unittest.main() |
import AutoEncoder
import sys
import os
from os import listdir
import theano
import numpy as np
from os.path import isfile, join
import cv2
from utils import tile_raster_images
import pickle
import random
import time
from ConvImage import Conv
def getFiles(dir_path):
"""getFiles : gets the file in specified directory
dir_path: String type
dir_path: directory path where we get all files
"""
onlyfiles = [ f for f in listdir(dir_path) if isfile(join(dir_path, f)) ]
return onlyfiles
def getImmediateSubdirectories(dir):
"""
this function return the immediate subdirectory list
eg:
dir
/subdirectory1
/subdirectory2
.
.
return ['subdirectory1',subdirectory2',...]
"""
return [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
def create_image_patches(images, patch_size, stride=1):
image_patches = []
for img in images:
for i in xrange(0, img.shape[0] - patch_size[0], stride):
for j in xrange(0, img.shape[1] - patch_size[1], stride):
temp = []
for k in xrange(0, img.shape[2]):
temp.append(img[i:i+patch_size[0], j:j+patch_size[1], k].ravel())
image_patches.append(np.concatenate(temp))
return np.asarray(image_patches, dtype=theano.config.floatX)
def test(Weights, counter, ext, channel=1):
"""this is an utility that takes weights and plot there feature as image"""
tile_shape = (8, 8)
image_resize_shape = (10, 10)
img_shape = (window_size, window_size)
newimg = None
if channel == 1:
img = tile_raster_images(X=Weights.T, img_shape=img_shape, tile_shape=tile_shape, tile_spacing=(1, 1))
newimg = np.zeros((img.shape[0]*image_resize_shape[0], img.shape[1]*image_resize_shape[1]))
for i in xrange(img.shape[0]):
for j in xrange(img.shape[1]):
newimg[i*image_resize_shape[0]:(i+1)*image_resize_shape[0], j*image_resize_shape[1]:(j+1)*image_resize_shape[1]] = img[i][j] * np.ones(image_resize_shape)
cv2.imwrite('tmp/'+str(counter)+'_'+ext+'.jpg', newimg)
elif channel == 3:
tile = Weights.shape[0] / channel
i = 0
temp = (Weights.T[:, tile*i:(i+1)*tile], Weights.T[:, (i+1)*tile:(i+2)*tile], Weights.T[:, (i+2)*tile:tile*(i+3)])
img = tile_raster_images(X=temp, img_shape=img_shape, tile_shape=tile_shape, tile_spacing=(1, 1))
newimg = cv2.resize(img, (img.shape[0] * image_resize_shape[0],img.shape[1] * image_resize_shape[1]))
cv2.imwrite('tmp/'+str(counter)+'_'+ext+'.jpg', newimg)
else:
temp = []
Weights = Weights.reshape((window_size*window_size, 64, 64))
for k in xrange(Weights.shape[1]):
img = tile_raster_images(X=Weights[:,k, :].T, img_shape=img_shape, tile_shape=tile_shape, tile_spacing=(1, 1))
newimg = np.zeros((img.shape[0]*image_resize_shape[0], img.shape[1]*image_resize_shape[1]))
for i in xrange(img.shape[0]):
for j in xrange(img.shape[1]):
newimg[i*image_resize_shape[0]:(i+1)*image_resize_shape[0], j*image_resize_shape[1]:(j+1)*image_resize_shape[1]] = img[i][j] * np.ones(image_resize_shape)
temp.append(newimg)
result = np.mean(temp, axis=0)
cv2.imwrite('tmp/'+str(k)+'_'+str(counter)+'_'+ext+'.jpg', result)
window_size = 7
training_epochs = 50
batch_size = 50
file_batch_size = 10
stride = 1
# window_size = 11
# training_epochs = 50
# batch_size = 40
# file_batch_size = 5
# stride = 1
fimgList = getFiles(sys.argv[1])
number_of_file_batch = len(fimgList[0:200]) / file_batch_size
W = pickle.load(open('Weights.pickle_W1'))
cl = Conv()
aeL1 = AutoEncoder.AutoEncoder(number_of_inputLayer=window_size*window_size*64, number_of_hiddenLayer=64)
trainL1 = aeL1.fit(corruption_quantity=0.30, learning_rate=0.1, batch_size=batch_size, have_sparsity_penalty=False, type_cost='SQUAREMEAN', output_type_non_linearity='RELU')
for epoch in xrange(training_epochs):
random.shuffle(fimgList)
st = time.time()
c = []
print 'for epoch ', epoch
for batch_index_file in xrange(number_of_file_batch):
images = []
#print ' reading data in range(', batch_index_file * file_batch_size, ',', (batch_index_file+1) * file_batch_size, ')'
for fimg in fimgList[batch_index_file * file_batch_size:(batch_index_file+1) * file_batch_size]:
img = cv2.imread(sys.argv[1]+'/'+fimg)
img = cv2.resize(img, ((200*img.shape[1])/img.shape[0], 200)) / 255.0
img = cl.convolve(img, W, feature_shape=(7, 7, 3))
img = cl.max_pooling(img)
images.append(img)
data = create_image_patches(images, patch_size=(window_size, window_size), stride=stride)
aeL1.add_sample(data)
if data.shape[0] % batch_size == 0:
n_train_batches = data.shape[0]/batch_size
else:
n_train_batches = (data.shape[0]/batch_size) + 1
for batch_index in xrange(n_train_batches):
c.append(trainL1(batch_index))
fwWeights = open('Weights.pickle_W1_7x7_RELU_'+str(epoch), 'w')
print 'Training epoch %d, cost ' % epoch, np.mean(c), ' time consumed ', time.time() - st
test(aeL1.W.get_value(), epoch, 'W1_7x7_RELU_', channel=64)
W1 = aeL1.W.get_value()
print >> fwWeights, pickle.dumps(W1)
fwWeights.close() |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.exceptions import AirflowException
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class ValidStateDep(BaseTIDep):
"""
Ensures that the task instance's state is in a given set of valid states.
:param valid_states: A list of valid states that a task instance can have to meet
this dependency.
:return: whether or not the task instance's state is valid
"""
NAME = "Task Instance State"
IGNORABLE = True
def __init__(self, valid_states):
super().__init__()
if not valid_states:
raise AirflowException("ValidStatesDep received an empty set of valid states.")
self._valid_states = valid_states
def __eq__(self, other):
return isinstance(self, type(other)) and self._valid_states == other._valid_states
def __hash__(self):
return hash((type(self), tuple(self._valid_states)))
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if dep_context.ignore_ti_state:
yield self._passing_status(reason="Context specified that state should be ignored.")
return
if ti.state in self._valid_states:
yield self._passing_status(reason=f"Task state {ti.state} was valid.")
return
yield self._failing_status(reason=f"Task is in the '{ti.state}' state.")
|
import numpy as np
import matplotlib.pyplot as plt
import os
def plotCumulativeReward(train_episodes, training_rewards,path):
x = range(train_episodes)
plt.plot(x, training_rewards)
plt.xlabel('Episode')
plt.ylabel('Training total reward')
plt.title('Total rewards over all episodes in training')
plt.savefig(path)
plt.clf()
def plotCumulativeReward2(train_episodes, training_rewards,path,epsilons):
x = range(train_episodes)
fig=plt.figure()
ax=fig.add_subplot(111, label="1")
ax2=fig.add_subplot(111, label="2", frame_on=False)
ax.plot(x, epsilons, color="C1")
ax.set_ylabel("Epsilon", color="C1")
ax.tick_params(axis='x', colors="C1")
ax.tick_params(axis='y', colors="C1")
ax.yaxis.set_label_position('right')
ax.yaxis.tick_right()
ax2.plot(x, training_rewards, color="C0")
ax2.set_xlabel('Episode', color="C0")
ax2.set_ylabel('Training total reward', color="C0")
plt.title('Total rewards over all episodes in training')
plt.savefig(path)
plt.clf()
def plotValueFunction(value_map,path):
c = plt.imshow(value_map, cmap='hot', interpolation='nearest')
plt.colorbar(c)
plt.title('Value Function', fontweight ="bold")
plt.savefig(path)
plt.clf()
def plotDistributionGraph(x_axis,y_axis,x_title,y_title,main_title, path):
plt.figure(figsize = (10, 8))
plt.plot(x_axis , y_axis, 'bo', alpha = 0.5)
plt.xlabel(x_title, size = 22); plt.ylabel(y_title, size = 22); plt.title(main_title, size = 24)
plt.savefig(path)
plt.clf()
def plotDistributionHistogram(hyperParameterValues,x_title,y_title, main_title, path):
plt.figure(figsize = (8, 6))
plt.hist(hyperParameterValues, bins = 50, edgecolor = 'k')
plt.title(main_title)
plt.xlabel(x_title)
plt.ylabel(y_title)
asdf = os.getcwd()
directory_contents = os.listdir(asdf)
plt.savefig(path)
plt.clf()
|
# -*- coding: utf-8 -*-
import sys
import math
import urllib
import os
from lxml import etree
import requests
import json
from bs4 import BeautifulSoup
import xlsxwriter
def read_html(url):
try:
file1 = urllib.request.urlopen(url, timeout=5)
data = file1.read()
return data
except:
try:
file1 = urllib.request.urlopen(url, timeout=5)
data = file1.read()
return data
except:
print('read_html error:'+str(url))
return None
def scrapy(url):
data = read_html(url)
if data is not None:
html = etree.HTML(data)
# 帖子链接
link = html.xpath("//a[@class='j_th_tit ']/@href")
# 帖子数量
post_num = int(html.xpath('//span[@class="red_text"]')[0].text)
pages = math.ceil(post_num/50)
return link, pages
else:
print('get link error:'+str(url))
return None, None
def get_url(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Connection': 'close'}
try:
data1 = requests.get(url, headers=headers, timeout=(3, 7))
return data1
except:
try:
data1 = requests.get(url, headers=headers, timeout=(3, 7))
return data1
except:
print('get url error:'+str(url))
return None
def get_page_num(soup, max_page):
try:
limit = soup.find_all('span', class_='red')
page_t = limit[1].get_text()
page = min(int(page_t), int(max_page))
return page
except:
return None
def test1(url, file, max_page, count, page_now, page_total):
data1 = get_url(url)
if data1 is not None:
content = data1.text
soup = BeautifulSoup(content, 'html.parser')
excel_name = str(count)
if soup.title.string != "贴吧404":
page = get_page_num(soup, max_page)
if page is None:
print('Cannot get page:'+str(url))
else:
if os.path.exists(file):
wb1 = xlsxwriter.Workbook(file + '/' + excel_name + '.xlsx')
else:
os.makedirs(file)
wb1 = xlsxwriter.Workbook(file + '/' + excel_name + '.xlsx')
ws = wb1.add_worksheet(file)
rnb = 0
# 对于帖子的每一页
for k in range(int(page)):
url1 = url + "?pn=" + str(k + 1)
print(url1+' total:'+str(int(page))+' now '+str(page_now)+' in '+str(page_total)+' pages')
data1 = get_url(url)
if data1 is not None:
content = data1.text
soup = BeautifulSoup(content, 'html.parser')
# 对于每一层楼
for post in soup.find_all('div', class_='l_post'):
# 找回复
try:
rep = post.find('div', class_='d_post_content')
postcontent = rep.get_text()
except:
print('未找到回复')
postcontent = None
# 找时间
try:
te = post.find_all('span', class_="tail-info")
posttime = te[-2].get_text() + te[-1].get_text()
except:
try:
jsonObject = json.loads(post.attrs['data-field'])
floornum = jsonObject['content']['post_no']
floortime = jsonObject['content']['date']
posttime = str(floornum)+'楼'+str(floortime)
except:
print('未找到时间')
posttime = None
# 找人
try:
# jsonObject = json.loads(post.attrs['data-field'])
# postname = jsonObject['author']['user_name']
pp = post.find('a', class_='p_author_name')
postname = pp.get_text()
except:
print('未找到人')
postname = None
if (postcontent is not None) and (posttime is not None) and (postname is not None):
ws.write(0, 3 * rnb, postcontent)
ws.write(0, 3 * rnb + 1, posttime)
ws.write(0, 3 * rnb + 2, postname)
rnb += 1
else:
print('post floor {} error:{}'.format(rnb+1, str(url)))
else:
print('post page error:'+str(url))
wb1.close()
if __name__ == "__main__":
if len(sys.argv) == 4:
name = sys.argv[1]
file = sys.argv[2]
max_page = sys.argv[3]
elif len(sys.argv) == 3:
name = sys.argv[1]
file = sys.argv[2]
max_page = 10000
else:
name = '柯南'
file = 'kenan'
max_page = 100
x = urllib.request.quote(name)
href, page_num = scrapy("https://tieba.baidu.com/f?kw="+x)
max_page_num = min(page_num, 200)
count = 1
for j in range(0, max_page_num):
x = urllib.request.quote(name)
href, page_s = scrapy("https://tieba.baidu.com/f?kw="+x+"&pn="+str(j*50))
if href is not None:
# print('----post pages {}/{}----'.format(j+1, max_page_num))
for i in href:
path = "https://tieba.baidu.com"+i
test1(path, file, max_page, count, j+1, max_page_num)
count = count + 1
else:
print('href error')
|
#!/usr/bin/python
###########################################################
#
# This python script is used for mysql database backup
# using mysqldump and tar utility.
#
# Written by : MmrDev
# Tested with : Python 2.7.15 & Python 3.5
#
##########################################################
import os
import time
from datetime import date
class File:
def __init__(self, address):
self.address = address
def listFiles(self):
return os.listdir(self.address)
def removeWeeclyBackups(self, files, today):
today = int(today)
for key in files:
item = key[-6:]
backUp_day = int(item[:2]) + 8
if today == backUp_day and key[-3:] == 'sql':
os.remove('/opt/dbBackUp/' + key)
print(key + 'removed')
def main():
address = '/opt/dbBackUp'
today = format(date.today())[-2:]
file = File(address)
files = file.listFiles()
print('list of backUps : ')
print(files)
print('_______________\n')
file.removeWeeclyBackups(files, today)
if __name__ == '__main__':
main()
|
from django.core.management.base import BaseCommand
from openhumans.models import OpenHumansMember
from main.models import DataSourceMember
from django.conf import settings
from datauploader.tasks import process_github
# import vcr
class Command(BaseCommand):
help = 'Import existing users from legacy project'
def add_arguments(self, parser):
parser.add_argument('--infile', type=str,
help='CSV with project_member_id & refresh_token')
parser.add_argument('--delimiter', type=str,
help='CSV delimiter')
# @vcr.use_cassette('import_users.yaml', decode_compressed_response=True)
# record_mode='none')
def handle(self, *args, **options):
for line in open(options['infile']):
line = line.strip().split(options['delimiter'])
oh_id = line[0]
oh_refresh_token = line[1]
github_refresh_token = line[2]
if len(OpenHumansMember.objects.filter(
oh_id=oh_id)) == 0:
oh_member = OpenHumansMember.create(
oh_id=oh_id,
access_token="mock",
refresh_token=oh_refresh_token,
expires_in=-3600)
oh_member.save()
oh_member._refresh_tokens(client_id=settings.OPENHUMANS_CLIENT_ID,
client_secret=settings.OPENHUMANS_CLIENT_SECRET)
oh_member = OpenHumansMember.objects.get(oh_id=oh_id)
github_member = DataSourceMember(
access_token="mock",
refresh_token=github_refresh_token,
token_expires=DataSourceMember.get_expiration(
-3600)
)
github_member.user = oh_member
github_member._refresh_tokens(
client_id=settings.GITHUB_CLIENT_ID,
client_secret=settings.GITHUB_CLIENT_SECRET
)
process_github.delay(oh_member.oh_id)
process_github(oh_member.oh_id)
|
from firebase import firebase
class FirebaseManager:
firebase = firebase.FirebaseApplication("https://qless-74979.firebaseio.com", None)
def get_queues(self):
return self.firebase.get('queues', '')
def get_walk_in_queue(self):
return self.firebase.get('queues/walk_in', '')
def get_doctor_queue(self, doctor_name):
path = "queues/" + doctor_name
return self.firebase.get(path, '')
def get_now_paging(self):
return self.firebase.get("now_paging", '')
def get_patients_seen(self):
return self.firebase.get("patients_seen", '')
def check_in_scheduled_user(self, doctor_name, user_index, current_time, predicted_start_time_min, predicted_start_time_max):
path = "queues/" + doctor_name + "/" + str(user_index)
self.firebase.put(path, "is_checked_in", True)
self.firebase.put(path, "check_in_time", current_time)
self.firebase.put(path, "predicted_start_time_min", predicted_start_time_min)
self.firebase.put(path, "predicted_start_time_max", predicted_start_time_max)
def update_queue(self, doctor_name, data):
path = "queues/"
self.firebase.put(path, doctor_name, data)
def update_now_paging(self, data):
self.firebase.put('', "now_paging", data)
def update_seen_users(self, data):
self.firebase.put('', "patients_seen", data)
def update_users(self, data):
self.firebase.put('', "users", data)
def add_walk_in_user(self, index, user_id, real_name, name, current_time, predicted_start_time_min, predicted_start_time_max):
data = {
index: {
'id': user_id,
'check_in_time': current_time,
'real_name': real_name,
'name': name,
'predicted_start_time_min': predicted_start_time_min,
'predicted_start_time_max': predicted_start_time_max
}
}
self.firebase.patch('queues/walk_in', data)
def add_paging_user(self, index, data):
self.firebase.patch("now_paging", {index: data})
def add_seen_user(self, index, data):
self.firebase.patch("patients_seen", {index: data})
def get_users(self):
return self.firebase.get('users', '')
def add_user(self, user_id, name):
users = self.get_users()
if users is None:
users = []
# first check if user is existing
index = 0
for user in users:
if user.get('id') == user_id:
path = "users/" + str(index)
self.firebase.put(path, "id", user_id)
self.firebase.put(path, "name", name)
return
index = index + 1
# otherwise append new user
new_user = {
len(users): {
"id": user_id,
"name": name
}
}
self.firebase.patch("users", new_user)
|
import numpy as np
import load_data as ld
import functions as fun
training = ld.get_data('Training')
testing = ld.get_data('Testing')
biz_test = ld.get_data('Dissimilar')
weights = fun.fit(training)
for i in range(testing.shape[0]):
print('initial', testing[i])
test_output = fun.predict(weights,testing[i],100)
print('target', training[i])
print('ouput',test_output)
print('-------------')
results = fun.number_of_attractors(weights)
print('attractors',np.unique(results,axis=0))
print('training', training)
print("number of attractors ", np.unique(results,axis=0).shape[0])
print('initial bizzare', biz_test)
biz_out=fun.predict(weights,biz_test,100)
print('final bizzare',biz_out) |
# Author: Daniel Eynis
# ML HW1: Perceptrons
import perceptron
import numpy as np
p = perceptron.Perceptron(learn_rate=0.1)
w_m, t_a_h, tr_a_h, c_m = p.learn()
f = open("data.txt", "w")
f.write(np.array_str(c_m) + "\n")
f.write("Test acc: " + str(t_a_h) + "\n")
f.write("Train acc: " + str(tr_a_h) + "\n")
f.close()
|
from unittest.mock import patch, create_autospec
from web.companies.services import DnbServiceClient
from web.core.notify import NotifyService
from web.grant_management.flows import GrantManagementFlow
from web.grant_management.models import GrantManagementProcess
from web.grant_management.tests.helpers import GrantManagementFlowTestHelper
from web.tests.factories.grant_applications import CompletedGrantApplicationFactory
from web.tests.factories.users import UserFactory
from web.tests.helpers import BaseTestCase
@patch.object(
DnbServiceClient, 'get_company',
return_value={
'primary_name': 'company-1',
'is_employees_number_estimated': True,
'employee_number': 1,
'annual_sales': 100,
}
)
@patch('web.grant_management.flows.NotifyService')
class TestGrantManagementFlow(GrantManagementFlowTestHelper, BaseTestCase):
def setUp(self):
super().setUp()
self.user = UserFactory(is_superuser=True)
self.ga = CompletedGrantApplicationFactory()
def test_is_start_of_process(self, *mocks):
self.assertTrue(GrantManagementFlow.start.task_type, 'START')
def test_start_flow_sends_email_notification(self, *mocks):
notify_service = create_autospec(NotifyService)
mocks[0].return_value = notify_service
GrantManagementFlow.start.run(grant_application=self.ga)
notify_service.send_application_submitted_email.assert_called_once_with(
email_address=self.ga.applicant_email,
applicant_full_name=self.ga.applicant_full_name,
application_id=self.ga.id_str,
)
def test_grant_management_happy_path(self, *mocks):
notify_service = create_autospec(NotifyService)
mocks[0].return_value = notify_service
self.client.force_login(self.user)
# start flow and step through to end of flow
ga_process = self._start_process_and_step_through_until()
# Grant approved email should have been sent
notify_service.send_application_approved_email.assert_called()
notify_service.send_application_rejected_email.assert_not_called()
# All tasks should be completed
self.assertFalse(ga_process.active_tasks().exists())
# Process should be marked as finished
self.assertIsNotNone(ga_process.finished)
def test_grant_management_rejection(self, *mocks):
notify_service = create_autospec(NotifyService)
mocks[0].return_value = notify_service
self.client.force_login(self.user)
# start flow
ga_process = self._start_process_and_step_through_until('decision')
# Reject applicant
next_task = ga_process.active_tasks().first() # Next task should be the decision task
self.assertEqual(next_task.flow_task.name, 'decision')
self._assign_task(ga_process, next_task)
self._complete_task(
ga_process, next_task, data={'decision': GrantManagementProcess.Decision.REJECTED}
)
# Rejection email should have been sent
notify_service.send_application_rejected_email.assert_called()
notify_service.send_application_approved_email.assert_not_called()
# All tasks should be completed
self.assertFalse(ga_process.active_tasks().exists())
# Process should be marked as finished
self.assertIsNotNone(ga_process.finished)
def test_grant_management_decision_cannot_be_empty(self, *mocks):
self.client.force_login(self.user)
ga_process = self._start_process_and_step_through_until('decision')
# Next task should be the decision task
next_task = ga_process.active_tasks().first()
self.assertEqual(next_task.flow_task.name, 'decision')
# Try to reject applicant
self._assign_task(ga_process, next_task)
response, _ = self._complete_task(ga_process, next_task, make_asserts=False)
self.assertFormError(response, 'form', 'decision', 'This field is required.')
# Task should not be completed
self.assertIsNone(next_task.finished)
|
from __future__ import print_function
import time
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import adversarial as ad
from sklearn.utils import shuffle
def loss(g, Y, mean=True, add_other_losses=True):
"""Cross-entropy loss between labels and output of linear activation function"""
out = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=g)
if mean:
out = tf.reduce_mean(out)
if add_other_losses:
tf.add_to_collection('losses', out)
return tf.add_n(tf.get_collection('losses'))
return out
def acc(g, Y):
"""Accuracy"""
correct_prediction = tf.equal(Y, tf.argmax(g, 1))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def graph_builder_wrapper(arch,
num_classes=10,
adv='erm',
eps=0.3,
save_dir=None,
wd=0,
update_collection=None,
beta=1.,
save_histograms=False,
num_channels=3,
max_save=200,
training=False,
loss=loss,
order=2,
opt='momentum'):
"""Wrapper for building graph and accessing all relevant ops/placeholders"""
assert isinstance(adv, str)
input_data = tf.placeholder(tf.float32, shape=[None, 28, 28, num_channels], name='in_data')
input_labels = tf.placeholder(tf.int64, shape=[None], name='in_labels')
fc_out = arch(input_data, num_classes=num_classes, wd=wd, training=training,
beta=beta, update_collection=update_collection)
# Loss and optimizer (with adversarial training options)
learning_rate = tf.Variable(0.01, name='learning_rate', trainable=False)
if adv in ['wrm', 'fgm', 'pgm']:
if adv == 'wrm':
adv_x = ad.wrm(input_data, fc_out, eps=eps, order=order, model=arch, k=15,
num_classes=num_classes, graph_beta=beta, training=training)
elif adv == 'fgm':
adv_x = ad.fgm(input_data, fc_out, eps=eps, order=order, training=training)
elif adv == 'pgm':
adv_x = ad.pgm(input_data, fc_out, eps=eps, order=order, model=arch, k=15,
num_classes=num_classes, graph_beta=beta, training=training)
fc_out_adv = arch(adv_x, num_classes=num_classes, wd=wd,
beta=beta, update_collection=update_collection, reuse=True, training=training)
else:
fc_out_adv = fc_out
total_loss = loss(fc_out_adv, input_labels)
total_acc = acc(fc_out_adv, input_labels)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
if num_channels == 1 or opt == 'adam': # For MNIST dataset
opt_step = tf.train.AdamOptimizer(0.001).minimize(total_loss)
else:
opt_step = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(total_loss)
# Output dictionary to useful tf ops in the graph
graph = dict(
input_data = input_data,
input_labels = input_labels,
total_loss = total_loss,
total_acc = total_acc,
fc_out = fc_out,
fc_out_adv = fc_out_adv,
opt_step = opt_step,
learning_rate = learning_rate
)
# Saving weights and useful information to tensorboard
if save_dir is not None:
saver = tf.train.Saver(max_to_keep=max_save)
graph['saver'] = saver
if not os.path.isdir(save_dir):
tf.summary.scalar('loss', total_loss)
tf.summary.scalar('accuracy', total_acc)
# Add histograms for trainable variables (really slows down training though)
if save_histograms:
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Merge all the summaries and write them out to save_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(save_dir, 'train'))
graph_writer = tf.summary.FileWriter(os.path.join(save_dir, 'graph'), graph=tf.get_default_graph())
valid_writer = tf.summary.FileWriter(os.path.join(save_dir, 'validation'))
graph['merged'] = merged
graph['train_writer'] = train_writer
graph['graph_writer'] = graph_writer
graph['valid_writer'] = valid_writer
return graph
def train(Xtr, Ytr, graph, save_dir,
val_set=None,
lr_initial=0.01,
seed=0,
num_epochs=100,
batch_size=100,
write_every=1,
save_every=None,
verbose=True,
load_epoch=-1,
early_stop_acc=None,
early_stop_acc_num=10,
gpu_prop=0.2,
shuffle_data=True):
"""Train the graph"""
np.random.seed(seed)
tf.set_random_seed(seed)
if save_every is None:
if num_epochs > 100:
save_every = num_epochs/100
else:
save_every = 1
start = time.time()
training_losses, training_accs = [], []
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_prop))) as sess:
sess.run(tf.global_variables_initializer())
if load_epoch > -1:
if verbose:
print('Continuing training starting at epoch %s+1'%(load_epoch))
if save_dir is not None:
restore_weights_file = os.path.join(save_dir, 'checkpoints', 'epoch%s'%(load_epoch))
if 'saver' in graph:
graph['saver'].restore(sess, restore_weights_file)
else:
if save_dir is not None and not os.path.exists(os.path.join(save_dir, 'checkpoints')):
os.mkdir(os.path.join(save_dir, 'checkpoints'))
if 'saver' in graph and save_dir is not None:
graph['saver'].save(sess, os.path.join(save_dir, 'checkpoints', 'epoch0'))
for epoch in range(load_epoch+2, load_epoch+num_epochs+2):
lr = lr_initial*0.95**(epoch/390.) # initial lr * decay rate ^(step/decay_steps)
sess.run(graph['learning_rate'].assign(lr))
t = time.time()
training_loss = 0.
training_acc = 0.
steps = 0
if shuffle_data:
Xtr_, Ytr_ = shuffle(Xtr, Ytr)
else:
Xtr_, Ytr_ = Xtr, Ytr
if len(Xtr_)%batch_size == 0:
end = len(Xtr_)
else:
end = len(Xtr_)-batch_size
for i in range(0, end, batch_size):
x, y = Xtr_[i:i+batch_size], Ytr_[i:i+batch_size]
feed_dict = {graph['input_data']: x, graph['input_labels']: y}
training_loss_, training_acc_, _ = \
sess.run([graph['total_loss'], graph['total_acc'], graph['opt_step']],
feed_dict=feed_dict)
training_loss += training_loss_
training_acc += training_acc_
steps += 1
if verbose:
print('\rEpoch %s/%s (%.3f s), batch %s/%s (%.3f s): loss %.3f, acc %.3f'
%(epoch, load_epoch+num_epochs+1, time.time()-start, steps,
len(Xtr_)/batch_size, time.time()-t, training_loss_, training_acc_),
end='')
if 'saver' in graph and epoch%write_every == 0: # writing to tensorboard
summary = sess.run(graph['merged'], feed_dict=feed_dict)
graph['train_writer'].add_summary(summary, epoch)
if val_set is not None: # make sure to keep the val_set small
feed_dict = {graph['input_data']: val_set['X'],
graph['input_labels']: val_set['Y']}
summary = sess.run(graph['merged'], feed_dict=feed_dict)
graph['valid_writer'].add_summary(summary, epoch)
if 'saver' in graph and save_dir is not None and epoch%save_every == 0:
graph['saver'].save(sess, os.path.join(save_dir, 'checkpoints', 'epoch%s'%(epoch)))
training_losses.append(training_loss/float(steps))
training_accs.append(training_acc/float(steps))
if early_stop_acc is not None and np.mean(training_accs[-early_stop_acc_num:]) >= early_stop_acc:
if verbose:
print('\rMean acc >= %s for last %s epochs. Stopping training after epoch %s/%s.'
%(early_stop_acc, early_stop_acc_num, epoch, load_epoch+num_epochs+1), end='')
break
if verbose: print('\nDONE: Trained for %s epochs.'%(epoch))
if 'saver' in graph and save_dir is not None and not os.path.exists(os.path.join(save_dir, 'checkpoints', 'epoch%s'%(epoch))):
graph['saver'].save(sess, os.path.join(save_dir, 'checkpoints', 'epoch%s'%(epoch)))
return training_losses, training_accs
def build_graph_and_train(Xtr, Ytr, save_dir, arch,
num_classes=10,
num_channels=3,
adv=None,
eps=0.3,
wd=0,
gpu_id=0,
verbose=True,
beta=1.,
order=2,
opt='momentum',
get_train_time=False,
**kwargs):
"""Build tensorflow graph and train"""
tf.reset_default_graph()
if verbose: start = time.time()
with tf.device("/gpu:%s"%(gpu_id)):
if save_dir is None or not os.path.exists(save_dir) or 'checkpoints' not in os.listdir(save_dir):
graph = graph_builder_wrapper(arch, adv=adv, eps=eps,
num_classes=num_classes, save_dir=save_dir,
wd=wd, beta=beta, num_channels=num_channels,
order=order, training=True, opt=opt)
if get_train_time:
start = time.time()
tr_losses, tr_accs = train(Xtr, Ytr, graph, save_dir, **kwargs)
if get_train_time:
train_time = time.time()-start
else:
graph = graph_builder_wrapper(arch, num_classes=num_classes, save_dir=save_dir,
wd=wd, beta=beta, num_channels=num_channels,
order=order, update_collection='_', opt=opt)
if verbose:
print('Model already exists.. loading trained model..')
if 'gpu_prop' in kwargs:
gpu_prop = kwargs.get('gpu_prop', "default value")
if save_dir is None:
train_acc = np.nan
if verbose:
print('save_dir set to None.. returning NaN since weights not saved')
else:
Ytrhat = predict_labels(Xtr, graph, save_dir, gpu_prop=gpu_prop)
train_acc = np.sum(Ytrhat == Ytr)/float(len(Ytr))
if verbose:
print('Train acc: %.2f (%.1f s elapsed)'%(train_acc, time.time()-start))
if get_train_time:
return train_acc, train_time
return train_acc
def predict_labels_in_sess(X, graph, sess, batch_size=100):
"""Predict labels within a session"""
labels = np.zeros(len(X))
for i in range(0, len(X), batch_size):
g_ = sess.run(graph['fc_out'], feed_dict = {graph['input_data']:X[i:i+batch_size]})
labels[i:i+batch_size] = np.argmax(g_, 1)
return labels
def latest_epoch(save_dir):
"""Grabs int corresponding to last epoch of weights saved in save_dir"""
return max([int(f.split('epoch')[1].split('.')[0])
for f in os.listdir(os.path.join(save_dir, 'checkpoints')) if 'epoch' in f])
def predict_labels(X, graph, load_dir,
batch_size=100,
load_epoch=None,
gpu_prop=0.2):
"""Use trained model to predict"""
# Load from checkpoint corresponding to latest epoch if none given
if load_epoch is None:
load_epoch = latest_epoch(load_dir)
else:
load_epoch = np.min((latest_epoch(load_dir), load_epoch))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_prop))) as sess:
graph['saver'].restore(sess, os.path.join(load_dir, 'checkpoints', 'epoch%s'%(load_epoch)))
return predict_labels_in_sess(X, graph, sess, batch_size=batch_size)
def build_graph_and_predict(X, load_dir, arch,
Y=None,
num_classes=10,
gpu_id=0,
beta=1.,
num_channels=3,
load_epoch=None,
gpu_prop=0.2,
order=2,
opt='momentum'):
"""Build a tensorflow graph and predict labels"""
tf.reset_default_graph()
with tf.device("/gpu:%s"%(gpu_id)):
graph = graph_builder_wrapper(arch, num_classes=num_classes, save_dir=load_dir,
order=order, beta=beta, opt=opt,
update_collection='_', num_channels=num_channels)
Yhat = predict_labels(X, graph, load_dir, load_epoch=load_epoch, gpu_prop=gpu_prop)
if Y is None:
return Yhat
return np.sum(Yhat == Y)/float(len(Y))
def build_graph_and_get_acc(X, Y, arch, adv='erm', eps=0.3, save_dir=None, beta=1., order=2,
batch_size=100, gpu_prop=0.2, load_epoch=None, num_channels=3, opt='momentum'):
"""Build a tensorflow graph and gets accuracy"""
tf.reset_default_graph()
graph = graph_builder_wrapper(arch, adv=adv, eps=eps, save_dir=save_dir,
update_collection='_', beta=beta, opt=opt,
order=order, num_channels=num_channels)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_prop))) as sess:
load_file = tf.train.latest_checkpoint(os.path.join(save_dir, 'checkpoints'))
if load_epoch is not None:
load_file = load_file.replace(load_file.split('epoch')[1], str(load_epoch))
graph['saver'].restore(sess, load_file)
num_correct = 0
num_total_samples = 0
for i in range(0, len(X), batch_size):
x, y = X[i:i+batch_size], Y[i:i+batch_size]
num_batch_samples = len(x)
feed_dict = {graph['input_data']: x, graph['input_labels']: y}
num_correct += sess.run(graph['total_acc'], feed_dict=feed_dict)*num_batch_samples
num_total_samples += num_batch_samples
return num_correct/num_total_samples
def recover_curve(X, Y, load_dir,
num_classes=10,
gpu_id=0,
verbose=True,
keyword='epoch'):
"""Evaluate performance on a dataset during training"""
list_epochs = np.unique([int(f.split(keyword)[1].split('.')[0]) \
for f in os.listdir(os.path.join(load_dir, 'checkpoints')) if keyword in f])
accs = np.zeros(len(list_epochs))
if verbose: start = time.time()
for i, epoch in enumerate(list_epochs):
accs[i] = build_graph_and_predict(X, load_dir,
Y=Y,
num_classes=num_classes,
gpu_id=gpu_id,
load_epoch=epoch)
if verbose:
print('\rRecovered accuracy for %s %s/%s: %.2f (%.2f s elapsed)'
%(keyword, i+1, len(list_epochs), accs[i], time.time()-start), end='')
if verbose:
print('')
return accs
def recover_train_and_test_curves(Xtr, Ytr, Xtt, Ytt, load_dir,
num_classes=10,
gpu_id=0,
verbose=True):
"""Recover training and test curves"""
train_accs = recover_curve(Xtr, Ytr, load_dir,
num_classes=num_classes,
gpu_id=gpu_id,
verbose=verbose)
test_accs = recover_curve(Xtt, Ytt, load_dir,
num_classes=num_classes,
gpu_id=gpu_id,
verbose=verbose)
return train_accs,test_accs
def get_embedding_in_sess(X, graph, sess, batch_size=100):
"""Gets embedding (last layer output) within a session"""
num_classes = graph['fc_out_adv'].shape.as_list()[1]
embedding = np.zeros((len(X), num_classes))
for i in range(0, len(X), batch_size):
embedding_ = sess.run(graph['fc_out_adv'], feed_dict = {graph['input_data']:X[i:i+batch_size]})
embedding[i:i+batch_size] = embedding_
return embedding
def get_embedding(X, load_dir, arch, num_classes=10, num_channels=3, beta=1.,
adv='erm', eps=0.3, order=2,
batch_size=100, sn_fc=False, load_epoch=None, gpu_prop=0.2):
"""recovers the representation of the data at the layer before the softmax layer
Use sn_fc to indicate that last layer (should be named 'fc/weights:0') needs to be
spectrally normalized.
"""
tf.reset_default_graph()
graph = graph_builder_wrapper(arch, num_classes=num_classes, num_channels=num_channels,
save_dir=load_dir, beta=beta, update_collection='_',
order=order, adv=adv, eps=eps)
if load_epoch is None:
load_epoch = latest_epoch(load_dir)
else:
load_epoch = np.min((latest_epoch(load_dir), load_epoch))
if sn_fc:
assert 'fc/weights:0' in [v.name for v in tf.global_variables()]
W_fc_tensor = [v for v in tf.global_variables() if v.name == 'fc/weights:0'][0]
b_fc_tensor = [v for v in tf.global_variables() if v.name == 'fc/bias:0'][0]
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_prop))) as sess:
graph['saver'].restore(sess, os.path.join(load_dir, 'checkpoints', 'epoch%s'%(load_epoch)))
# spectral normalization on last layer (fully connected)
if sn_fc:
W_fc, b_fc = sess.run([W_fc_tensor, b_fc_tensor])
sigma = np.linalg.svd(W_fc.T)[1][0]
sess.run([W_fc_tensor.assign(W_fc/sigma), b_fc_tensor.assign(b_fc/sigma)])
return get_embedding_in_sess(X, graph, sess, batch_size=batch_size)
def get_grads_wrt_samples(X, Y, load_dir, arch, num_classes=10, num_channels=3, beta=1.,
batch_size=100, load_epoch=None, gpu_prop=0.2):
"""Computes gradients with respect to samples"""
if load_epoch is None:
load_epoch = latest_epoch(load_dir)
else:
load_epoch = np.min((latest_epoch(load_dir), load_epoch))
tf.reset_default_graph()
graph = graph_builder_wrapper(arch, num_classes=num_classes, num_channels=num_channels,
save_dir=load_dir, beta=beta, update_collection='_')
grad, = tf.gradients(graph['total_loss'], graph['input_data'])
g = np.zeros(np.shape(X))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_prop))) as sess:
graph['saver'].restore(sess, os.path.join(load_dir, 'checkpoints', 'epoch%s'%(load_epoch)))
for i in range(0, len(X), batch_size):
g_ = sess.run(grad, feed_dict={graph['input_data']: X[i:i+batch_size],
graph['input_labels']: Y[i:i+batch_size]})
g[i:i+batch_size] = g_
return g
def check_weights_svs(load_dir, arch, num_classes=10, n=2, load_epoch=None, beta=1.):
"""Check singular value of all weights"""
tf.reset_default_graph()
graph = graph_builder_wrapper(arch, num_classes=num_classes, save_dir=load_dir,
update_collection='_', beta=beta)
if load_epoch is None:
load_epoch = latest_epoch(load_dir)
else:
load_epoch = np.min((latest_epoch(load_dir), load_epoch))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Grab all weights
graph['saver'].restore(sess, os.path.join(load_dir, 'checkpoints', 'epoch%s'%(load_epoch)))
for tfvar in tf.get_collection('w_after_sn'):
if 'weights' in tfvar.name:
W = tfvar.eval(session=sess)
print('%30s with shape %15s and top %s sv(s): %s' \
%(tfvar.name, np.shape(W), n,
', '.join(['%.2f'%(i) for i in np.linalg.svd(W.reshape(-1, np.shape(W)[-1]))[1][:n]])))
def print_total_number_of_trainable_params():
"""prints total number of trainable parameters according to default graph"""
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
print(total_parameters)
def extract_curve_tensorboard(tb_log_file, curve='loss'):
"""Given the name of a tensorboard event file, returns the desired curve"""
values = []
for e in tf.train.summary_iterator(tb_log_file):
for v in e.summary.value:
if v.tag == curve:
values.append(v.simple_value)
return np.array(values)
def extract_train_valid_tensorboard(load_dir, curve='accuracy', show_plot=False, only_final_value=False):
"""For a particular model, grab the tfevents training and validation curves"""
# get train
event_file = sorted(os.listdir(os.path.join(load_dir, 'train')))[0]
tb_log_file = os.path.join(load_dir, 'train', event_file)
train_values = extract_curve_tensorboard(tb_log_file, curve=curve)
# get validation
event_file = sorted(os.listdir(os.path.join(load_dir, 'validation')))[0]
tb_log_file = os.path.join(load_dir, 'validation', event_file)
valid_values = extract_curve_tensorboard(tb_log_file, curve=curve)
if show_plot:
plt.figure()
plt.plot(train_values, label='training %s'%(curve))
plt.plot(valid_values, label='validation %s'%(curve))
plt.grid()
plt.legend()
plt.xlabel('epoch')
plt.ylabel(curve)
plt.show()
if only_final_value:
return train_values[-1], valid_values[-1]
return train_values, valid_values
def plot_stacked_hist(v0, v1, labels=None, bins=20):
"""Plots two histograms on top of one another"""
if labels is None:
labels = ['0', '1']
bins = np.histogram(np.hstack((v0, v1)), bins=bins)[1]
data = [v0, v1]
plt.hist(data, bins, label=labels, alpha=0.8, color=['r','g'],
normed=True, edgecolor='none')
plt.legend()
def get_margins(X, Y, load_dir, arch, sn_fc=True, beta=1.):
"""Compute margins for X (margin = last layer difference between true label and
highest value that's not the true label)
"""
num_classes = len(np.unique(Y))
embeddings = get_embedding(X, load_dir, arch, num_classes=10, beta=beta, sn_fc=sn_fc)
# embeddings = np.exp(embeddings)
# embeddings /= np.sum(embeddings, 1).reshape(-1, 1)
margins = np.zeros(len(embeddings))
print('Sanity check: accuracy is %.5f.'
%(np.sum(np.argmax(embeddings, 1) == Y)/float(len(Y))))
for i in range(len(embeddings)):
if Y[i] == 0:
margins[i] = np.max(embeddings[i][1:])
elif Y[i] == len(embeddings[0])-1:
margins[i] = np.max(embeddings[i][:-1])
else:
margins[i] = np.max([np.max(embeddings[i][:int(Y[i])]),
np.max(embeddings[i][int(Y[i])+1:])])
return margins
def get_weights(load_dir, arch, num_classes=10, beta=1., num_channels=3,
load_epoch=None, verbose=False, gpu_prop=0.2):
"""Grab all weights from graph (also works for spectrally-normalized models)"""
if load_epoch is None:
load_epoch = latest_epoch(load_dir)
else:
load_epoch = np.min((latest_epoch(load_dir), load_epoch))
tf.reset_default_graph()
graph = graph_builder_wrapper(arch, save_dir=load_dir, num_classes=num_classes, beta=beta,
update_collection='_', num_channels=num_channels)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_prop))) as sess:
graph['saver'].restore(sess, os.path.join(load_dir, 'checkpoints', 'epoch%s'%(load_epoch)))
d = {v.name:sess.run(v) for v in tf.trainable_variables()}
for v in tf.get_collection('w_after_sn'):
key = v.name.split('_SN')[0]+':0'
d[key] = sess.run(v)
if verbose:
dim = d[key].shape[-1]
print('%30s with shape %15s and top 2 sv(s): %s' \
%(key, np.shape(d[key]),
', '.join(['%.2f'%(i) for i in np.linalg.svd(d[key].reshape(-1, dim))[1][:2]])))
return d
def l2_norm(input_x, epsilon=1e-12):
"""normalize input to unit norm"""
input_x_norm = input_x/(tf.reduce_sum(input_x**2)**0.5 + epsilon)
return input_x_norm
def power_iteration_tf(W, Ip=20, seed=0):
"""Power method for computing top singular value of a matrix W
NOTE: resets tensorflow graph
"""
def power_iteration(u, w_mat, Ip):
u_ = u
for _ in range(Ip):
v_ = l2_norm(tf.matmul(u_, tf.transpose(w_mat)))
u_ = l2_norm(tf.matmul(v_, w_mat))
return u_, v_
tf.reset_default_graph()
if seed is not None:
tf.set_random_seed(seed)
u = tf.get_variable('u', shape=[1, W.shape[-1]],
initializer=tf.truncated_normal_initializer(), trainable=False)
w_mat = tf.Variable(W)
u_hat, v_hat = power_iteration(u, w_mat, Ip)
sigma = tf.matmul(tf.matmul(v_hat, w_mat), tf.transpose(u_hat))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
return sess.run(sigma).reshape(-1)
def power_iteration_conv_tf(W, length=28, width=28, stride=1, Ip=20, seed=0, padding='SAME'):
"""Power method for computing top singular value of a convolution operation using W.
NOTE: resets tensorflow graph
Also, note that if you set stride to 1 when the network is trained with stride = 2,
the output will be twice as large as expected
"""
u_dims = [1, length, width, W.shape[-2]]
def power_iteration_conv(u, w_mat, Ip):
u_ = u
for _ in range(Ip):
v_ = l2_norm(tf.nn.conv2d(u_, w_mat, strides=[1, stride, stride, 1], padding=padding))
u_ = l2_norm(tf.nn.conv2d_transpose(v_, w_mat, u_dims,
strides=[1, stride, stride, 1], padding=padding))
return u_, v_
tf.reset_default_graph()
if seed is not None:
tf.set_random_seed(seed)
# Initialize u (our "eigenimage")
u = tf.get_variable('u', shape=u_dims,
initializer=tf.truncated_normal_initializer(), trainable=False)
w_mat = tf.Variable(W)
u_hat, v_hat = power_iteration_conv(u, w_mat, Ip)
z = tf.nn.conv2d(u_hat, w_mat, strides=[1, stride, stride, 1], padding=padding)
sigma = tf.reduce_sum(tf.multiply(z, v_hat))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
return sess.run(sigma).reshape(-1)
def get_overall_sn(load_dir, arch, num_classes=10, verbose=True, return_snorms=False,
num_channels=3, seed=0, load_epoch=None, beta=1., gpu_prop=0.2):
"""Gets the overall spectral norm of a network with specified weights"""
d = get_weights(load_dir, arch, num_classes=num_classes, gpu_prop=gpu_prop,
num_channels=num_channels, load_epoch=load_epoch, beta=beta)
s_norms = {}
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_prop))) as sess:
conv_ops_dict = {'/'.join(i.name.split('/')[:-1]): {'stride':int(i.get_attr('strides')[1]),
'padding':i.get_attr('padding'),
'length':i.inputs[0].get_shape().as_list()[1],
'width':i.inputs[0].get_shape().as_list()[2],
'seed':seed}
for i in sess.graph.get_operations()
if 'Conv2D' in i.name and 'gradients' not in i.name}
for i in sorted(d.keys()):
if 'weights' in i:
if 'conv' in i:
key = '/'.join(i.split('/')[:-1])
s_norms[i] = power_iteration_conv_tf(d[i], **conv_ops_dict[key])[0]
else:
s_norms[i] = power_iteration_tf(d[i], seed=seed)[0]
if verbose:
print('%20s with spectral norm %.4f'%(i, s_norms[i]))
if return_snorms:
return s_norms
return(np.prod(s_norms.values()))
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe)) |
# Generated by Django 3.1 on 2021-04-24 13:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tour', '0005_player_booked_sent_off'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='team_id',
),
migrations.AddField(
model_name='player',
name='team_id',
field=models.ManyToManyField(related_name='players', to='tour.kfupm_team'),
),
]
|
#calculate tips
subtotal, gratuity = input("please enter subtotal and gartuity:").split( )
subtotal , gratuity = [float(subtotal), float(gratuity)]
gratuity= (subtotal* gratuity) /100
total = subtotal+gratuity
print("The gratuity is", format(gratuity,".2f"),"and the total is", format(total,".2f"))
|
from apps.settings.common import env
DATABASES = {
'default': env.db(default='postgres://django:django@db:5432/django'),
}
|
import os, sys
import json
import logging
import re
from uuid import uuid4, UUID
from datetime import datetime
from base64 import urlsafe_b64encode, urlsafe_b64decode
from harvest.drivers.cognito_driver import CognitoDriver
from harvest.controllers.role_controller import RoleController
COGNITO_USER_POOL_ID="ap-northeast-1_KrEPljcrG"
class ProjectUserController():
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
logger.setLevel(logging.DEBUG)
def __init__(self, host, port):
self.role = RoleController(host, port)
def set_user_id(self, user_id):
self.user_id = str(user_id)
self.role.set_user_id(str(user_id))
def set_project_id(self, project_id):
if len(project_id) == 22:
project_id = str(UUID(bytes=urlsafe_b64decode(project_id + "==")))
self.project_id = str(project_id)
self.role.set_project_id(str(project_id))
def list(self):
if not self.project_id:
raise ValueError
ret = []
cognito = CognitoDriver(COGNITO_USER_POOL_ID)
users = self.role.list_users()
self.logger.info("specfied project users: " + str(users))
for user in users:
detail = cognito.show_user(user["user_id"])[0]["Attributes"]
self.logger.info("fetched cognito user data: " + str(detail))
detail = { item["Name"]: item["Value"] for item in detail }
self.logger.info("proccesing cognito user data: " + str(detail))
if not "status" in user:
user.update({"status": "active"})
ret.append(
{
"user_id": user["user_id"],
"username": detail["preferred_username"],
"email": detail["email"],
"role": user["role"],
"status": user["status"]
}
)
self.logger.info("users list: " + str(detail))
return ret
def show(self, user_id):
ret = ""
detail = {}
cognito = CognitoDriver(COGNITO_USER_POOL_ID)
user = self.role.show(user_id)
if not user:
# TODO: カスタム例外なげる
raise ValueError
detail = cognito.show_user(user_id)[0]["Attributes"]
self.logger.info("fetched cognito user data: " + str(detail))
detail = { item["Name"]: item["Value"] for item in detail }
self.logger.info("proccesing cognito user data: " + str(detail))
if not detail:
# TODO: カスタム例外なげる
raise ValueError
ret = {
"user_id": user_id,
"username": detail["preferred_username"],
"email": detail["email"],
"role": user["role"]
}
self.logger.info("user detail info: " + str(ret))
return ret
def join(self, user_id=None):
if not self.project_id:
raise ValueError
if not user_id:
user_id = self.user_id
return self.role.create(
"worker",
user_id = user_id,
project_id = self.project_id,
status = "request"
)
def accept(self, user_id=None):
if not self.project_id:
raise ValueError
if not user_id:
user_id = self.user_id
return self.role.update_status(
status = "active",
user_id = user_id
)
def reject(self, user_id=None):
if not self.user_id or not self.project_id:
raise ValueError
if not user_id:
user_id = self.user_id
return self.role.update_status(
status = "reject",
user_id = user_id
)
def delete(self, user_id=None):
if not self.user_id or not self.project_id:
raise ValueError
if not user_id:
user_id = self.user_id
return self.role.delete(user_id)
def update_role(self, role, user_id=None):
if not self.project_id:
raise ValueError
if not user_id:
user_id = self.user_id
return self.role.update(role, user_id)
|
"""
Basic module to handle launching the relevant tests from within the script.
"""
import unittest
import os
def run_tests(test_subdir):
if '*' in test_subdir or not test_subdir:
test_subdir = '' # Run all tests in directory.
print("Running %s tests" % (test_subdir if test_subdir else 'all'))
base_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
tests = unittest.TestLoader().discover('redditdownloader/tests/'+test_subdir, top_level_dir=base_dir)
stats = unittest.TextTestRunner(verbosity=1, buffer=True).run(tests)
return len(stats.failures)
|
def move_zeros(array):
array = sorted(array, key=lambda x: x is 0 or type(x) is float)
return (array) |
from collections import namedtuple
import itertools
FindAndReplaceData = namedtuple('FindAndReplaceData', ['orig', 'sub'])
remove_stop_pattern = r'\b(a |an |the |in |of |)*\b{0}\b( scan| image)*\b'
remove_stop = [FindAndReplaceData(remove_stop_pattern.format(device), device) for device in ['ct', 'mri', 'mra', 'cta']]
find_and_replace_collection = [FindAndReplaceData(r'magnetic resonance imaging', 'mr'),
FindAndReplaceData(r'magnetic resonance angiography', 'mr'),
FindAndReplaceData(r'mri', 'mr'),
]+remove_stop
imaging_devices = ['ct', 'mr', 'us', 'xr', 'angiogram', 'mammograph']
diagnosis = ['fracture',
'cyst',
'cerebral',
'acute',
'syndrome',
'cell',
'disease',
'artery',
'carcinoma',
'malformation',
'tumor',
'pulmonary',
'lymphoma',
'venous',
'abscess',
'meningioma',
'aortic',
'sclerosis',
'astrocytoma',
'spinal',
'infarction',
'renal',
'multiple',
'glioblastoma',
'multiforme',
'aneurysm',
'thrombosis',
'intracranial',
'arteriovenous',
'posterior',
'adenocarcinoma',
'bone',
'dural',
'secondary',
'schwannoma',
'nerve',
'cancer',
'diffuse',
'carotid',
'sinus',
'central',
'metastatic',
'cerebellar',
'appendicitis',
'kidney',
'hernia',
'epidermoid',
'infarct',
'lung',
'orbital',
'glioma',
'histiocytosis',
'vein',
'dysplasia',
'arachnoid',
'subclavian',
'hemangioma',
'cavernous',
'cord',
'breast',
'epidural',
'dermoid',
'tear',
'osteomyelitis',
'lipoma',
'anterior',
'dissection',
'hemorrhage',
'esophageal',
'embolism',
'ependymoma',
'hematoma',
'adenoma',
'disseminated',
'encephalomyelitis',
'tuberous',
'medulloblastoma',
'b-cell',
'injury',
'arch']
locations = ['lung, mediastinum, pleura',
'skull and contents',
'genitourinary',
'spine and contents',
'musculoskeletal',
'heart and great vessels',
'vascular and lymphatic',
'gastrointestinal',
'face, sinuses, and neck',
'breast']
planes = ['axial',
'longitudinal',
'coronal',
'lateral',
'ap',
'sagittal',
'mammo - mlo',
'pa',
'mammo - cc',
'transverse',
'mammo - mag cc',
'frontal',
'oblique',
'3d reconstruction',
'decubitus']
all_tags = list(itertools.chain(imaging_devices, diagnosis, locations, planes))
models_folder = "C:\\Users\\Public\\Documents\\Data\\2018\\models"
dbg_file_csv_train = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Train\\VQAMed2018Train-QA.csv'
dbg_file_xls_train = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Train\\VQAMed2018Train-QA_post_pre_process_intermediate.xlsx' # "'C:\\\\Users\\\\avitu\\\\Documents\\\\GitHub\\\\VQA-MED\\\\VQA-MED\\\\Cognitive-LUIS-Windows-master\\\\Sample\\\\VQA.Python\\\\dumped_data\\\\vqa_data.xlsx'
dbg_file_xls_processed_train = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Train\\VQAMed2018Train-QA_post_pre_process.xlsx'
train_embedding_path = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Train\\VQAMed2018Train-images\\embbeded_images.hdf'
images_path_train = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Train\\VQAMed2018Train-images'
dbg_file_csv_validation = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Valid\\VQAMed2018Valid-QA.csv'
dbg_file_xls_validation = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Valid\\VQAMed2018Valid-QA_post_pre_process_intermediate.xlsx'
dbg_file_xls_processed_validation = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Valid\\VQAMed2018Valid-QA_post_pre_process.xlsx'
validation_embedding_path = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Valid\\VQAMed2018Valid-images\\embbeded_images.hdf'
images_path_validation = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Valid\\VQAMed2018Valid-images'
dbg_file_csv_test = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Test\\VQAMed2018Test-QA.csv'
dbg_file_xls_test = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Test\\VQAMed2018Test-QA_post_pre_process_intermediate.xlsx'
dbg_file_xls_processed_test = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Test\\VQAMed2018Test-QA_post_pre_process.xlsx'
test_embedding_path = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Test\\VQAMed2018Test-images\\embbeded_images.hdf'
images_path_test = 'C:\\Users\\Public\\Documents\\Data\\2018\\VQAMed2018Test\\VQAMed2018Test-images'
DataLocations = namedtuple('DataLocations', ['data_tag', 'raw_csv', 'raw_xls', 'processed_xls', 'images_path'])
train_data = DataLocations('train', dbg_file_csv_train, dbg_file_xls_train, dbg_file_xls_processed_train,
images_path_train)
validation_data = DataLocations('validation', dbg_file_csv_validation, dbg_file_xls_validation,
dbg_file_xls_processed_validation, images_path_validation)
test_data = DataLocations('test', dbg_file_csv_test, dbg_file_xls_test, dbg_file_xls_processed_test, images_path_test)
|
import pygame
import Global_vars
def make_platforms():
i = 0
while Global_vars.platforms > 0:
if Global_vars.platforms > 0:
width.append(random.randrange(Global_vars.SCREEN_WIDTH - 200, Global_vars.SCREEN_WIDTH + 400))
gap = random.randrange(120, Global_vars.player_jump_length)
if len(width) > 2:
x += width[i] + gap
else:
x = width[i] + gap
y = random.randrange(height, height + 50)
if x <= width[i]:
x += Global_vars.player_jump_length
platform_level.append([width[-1], height, x, y])
Global_vars.platforms -= 1
i += 1
|
#%%
import os
import pickle
import time
from pathlib import Path
import colorcet as cc
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from graspologic.plot import pairplot
from sparse_decomposition import SparseComponentAnalysis
from sparse_decomposition.utils import calculate_explained_variance_ratio
from sparse_new_basis.data import load_scRNAseq
from sparse_new_basis.plot import savefig, set_theme
from sparse_new_basis.R import setup_R, sma_R_epca
import networkx as nx
from graspologic.utils import get_lcc, pass_to_ranks, to_laplace
set_theme()
epca = setup_R()
def sma_R(*args, **kwargs):
return sma_R_epca(epca, *args, **kwargs)
fig_dir = Path("sparse_new_basis/results/maggot_1.0")
def stashfig(name, *args, **kwargs):
savefig(fig_dir, name, *args, **kwargs)
#%%
data_dir = Path("sparse_new_basis/data/maggot")
g = nx.read_weighted_edgelist(
data_dir / "G.edgelist", create_using=nx.DiGraph, nodetype=int
)
meta = pd.read_csv(data_dir / "meta_data.csv", index_col=0)
adj = nx.to_numpy_array(g, nodelist=meta.index)
adj, inds = get_lcc(adj, return_inds=True)
meta = meta.iloc[inds]
hemisphere = "left"
if hemisphere == "left":
meta["inds"] = np.arange(len(meta))
meta = meta[meta["left"]]
inds = meta["inds"]
adj = adj[np.ix_(inds, inds)]
# TODO just try with one hemisphere
#%%s
preprocessing = "ptr"
if preprocessing == "ptr":
adj_to_embed = pass_to_ranks(adj)
elif preprocessing == "sqrt":
pass # TODO
else:
adj_to_embed = adj
lap_to_embed = to_laplace(adj_to_embed, form="R-DAD")
#%%
currtime = time.time()
n_components = 20
gamma = 0.5 * np.sqrt(len(lap_to_embed) * n_components)
Z, B, Y, info = sma_R(
lap_to_embed, k=n_components, gamma=gamma, epsilon=1e-5, return_all=True
)
print(f"{time.time() - currtime} elapsed to run SMA")
#%%
# from sparse_new_basis.plot import CLASS_COLOR_DICT
from src.visualization import CLASS_COLOR_DICT
from graspologic.plot import pairplot
left_latent = Z
print("Plotting pairplot...")
currtime = time.time()
columns = [f"Dimension {i+1}" for i in range(left_latent.shape[1])]
plot_df = pd.DataFrame(data=left_latent, columns=columns, index=meta.index)
plot_df = pd.concat((plot_df, meta), axis=1)
pg = sns.PairGrid(
data=plot_df,
hue="merge_class",
palette=CLASS_COLOR_DICT,
vars=columns[:6],
corner=True,
)
pg.map_lower(sns.scatterplot, s=10, linewidth=0, alpha=0.7)
pg.set(xticks=[], yticks=[])
stashfig("left_sma_R")
print(f"{time.time() - currtime} elapsed to plot pairplot")
#%%
right_latent = Y
print("Plotting pairplot...")
currtime = time.time()
columns = [f"Dimension {i+1}" for i in range(left_latent.shape[1])]
plot_df = pd.DataFrame(data=right_latent, columns=columns, index=meta.index)
plot_df = pd.concat((plot_df, meta), axis=1)
pg = sns.PairGrid(
data=plot_df,
hue="merge_class",
palette=CLASS_COLOR_DICT,
vars=columns[:6],
corner=True,
)
pg.map_lower(sns.scatterplot, s=10, linewidth=0, alpha=0.7)
pg.set(xticks=[], yticks=[])
stashfig("right_sma_R")
print(f"{time.time() - currtime} elapsed to plot pairplot")
#%%
embedding = Z
hue = "merge_class"
palette = CLASS_COLOR_DICT
p_nonzeros = []
all_component_neurons = []
for i, dim in enumerate(embedding.T[:10]):
dim = dim.copy()
# this just makes the biggest entries in abs value positive
if dim[np.argmax(np.abs(dim))] < 0:
dim = -dim
sort_inds = np.argsort(dim)
plot_df = pd.DataFrame()
plot_df["dim"] = dim[sort_inds]
plot_df["ind"] = range(len(plot_df))
plot_df["labels"] = meta[hue].values[sort_inds]
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
sns.scatterplot(
x="ind",
y="dim",
hue="labels",
data=plot_df,
ax=ax,
palette=palette,
legend=False,
s=15,
alpha=0.8,
linewidth=0,
)
nonzero_inds = np.nonzero(dim)[0]
p_nonzero = len(nonzero_inds) / len(dim)
p_nonzeros.append(p_nonzero)
zero_inds = np.nonzero(dim[sort_inds] == 0)[0]
min_cutoff = min(zero_inds)
max_cutoff = max(zero_inds)
# min_cutoff = max(np.nonzero(dim[sort_inds][: len(dim) // 2])[0])
# max_cutoff = min(np.nonzero(dim[sort_inds][len(dim) // 2 :])[0])
line_kws = dict(color="grey", linewidth=1, linestyle="--")
ax.axhline(0, **line_kws)
ax.axvline(min_cutoff, **line_kws)
ax.axvline(max_cutoff, **line_kws)
ax.set(
xticks=[],
yticks=[],
ylabel=f"Component {i+1}",
xlabel="Index (sorted)",
title=r"$p$ nonzero = " + f"{p_nonzero:.2}",
)
component_neurons = meta.iloc[nonzero_inds].index
all_component_neurons.append(component_neurons)
stashfig(f"Z_component_{i}")
#%%
from src.pymaid import start_instance
from src.visualization import plot_3view
def make_figure_axes():
fig = plt.figure(figsize=(15, 5))
# for the morphology plotting
margin = 0.01
# gap = 0.02
n_col = 3
morpho_gs = plt.GridSpec(
1,
3,
figure=fig,
wspace=0,
hspace=0,
left=margin,
right=margin + 3 / n_col,
top=1 - margin,
bottom=margin,
)
morpho_axs = np.empty((1, 3), dtype="O")
i = 0
for j in range(3):
ax = fig.add_subplot(morpho_gs[i, j], projection="3d")
morpho_axs[i, j] = ax
ax.axis("off")
return fig, morpho_axs
skeleton_color_dict = dict(
zip(meta.index, np.vectorize(CLASS_COLOR_DICT.get)(meta["merge_class"]))
)
start_instance()
start = 5
for i, component_neurons in enumerate(all_component_neurons[start:10]):
i += start
print(i)
fig, axs = make_figure_axes()
plot_3view(
list(component_neurons), axs[0, :], palette=skeleton_color_dict, row_title="",
)
stashfig(f"Z_component_{i+1}_morphology")
|
import threading
import adns
import logging
import Queue
# rr = adns.rr.A
def worker(q, c, f, n):
print q.qsize()
with n:
if q.empty():
print "Nothing in the queue"
f.set()
else:
host = q.get()
# print host
c.submit(host, adns.rr.A)
n.notify()
class WorkerProcess(threading.Thread):
def __init__(self, params):
threading.Thread.__init__(self)
self._start_flag = params["start"]
self._jobs = params["jobs"]
print self.name, ":queue length:", self._jobs.qsize()
self._adns = params["adns"]
self._new_job = params["notify"]
self._finished = threading.Event()
def run(self):
self._start_flag.wait()
while not self._finished.is_set():
worker(self._jobs, self._adns, self._finished, self._new_job)
def main():
# logger = threading.log_to_stderr()
# logger.setLevel(logging.INFO)
f = open("/home/edward/Projects/test_python_adns/top-1m.csv", "r")
urls = [line.split(',')[1].strip() for line in f.readlines()]
f.close()
num = 500
# Put urls into queue
urls = urls[:num]
q = Queue.Queue()
count = 0
for each in urls:
q.put(each)
count += 1
print count
print q.qsize()
# other initialization
resolved_list = []
start_flag = threading.Event()
adns_state = adns.init()
new_job = threading.Condition()
# build process pools
opts = {"start": start_flag, "jobs": q, "adns": adns_state, "notify": new_job}
pool = [WorkerProcess(opts) for i in xrange(1)]
for each in pool:
each.start()
start_flag.set()
while True:
with new_job:
# TODO: Figure out why adns library crashed even with a lock
# It is not successfully passed into threads
for query in adns_state.completed():
answer = query.check()
resolved_list.append(answer)
print answer
new_job.wait()
if len(resolved_list) == count:
break
for each in pool:
each.join()
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
# this script is used for build a decision tree for satellites in chapter6 exercises7 and exercise8
# exercise7
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=10000, noise=0.4, random_state=42)
from sklearn.model_selection import train_test_split, GridSearchCV
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
print(X_train.shape)
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, f1_score, classification_report
dtree = DecisionTreeClassifier(random_state=42)
max_leaf_nodes_list = list(range(2, 11))
parameters = {"max_leaf_nodes": max_leaf_nodes_list}
gc = GridSearchCV(dtree, param_grid=parameters, cv=10)
gc.fit(X_train, y_train)
# the best params
print(gc.best_params_)
# find out the best_dtree_model
best_dtree_model = gc.best_estimator_
y_pred = best_dtree_model.predict(X_test)
print(accuracy_score(y_test, y_pred))
# exercise8
from sklearn.model_selection import ShuffleSplit
n_trees = 1000
n_instances = 100
mini_sets = []
rs = ShuffleSplit(n_splits=n_trees, test_size=len(
X_train) - n_instances, random_state=42)
for mini_train_index, mini_test_index in rs.split(X_train):
X_mini_train = X_train[mini_train_index]
y_mini_train = y_train[mini_train_index]
mini_sets.append((X_mini_train, y_mini_train))
from sklearn.base import clone
forest = [clone(gc.best_estimator_) for _ in range(n_trees)]
accuracy_scores = []
for tree, (X_mini_train, y_mini_train) in zip(forest, mini_sets):
tree.fit(X_mini_train, y_mini_train)
y_pred = tree.predict(X_test)
accuracy_scores.append(accuracy_score(y_test, y_pred))
np.mean(accuracy_scores)
# 建立一个1000*2000的df,每一行代表一个实例,每一列代表一个决策树模式预测的结果
Y_pred = np.empty([n_trees, len(X_test)], dtype=np.uint8)
for tree_index, tree in enumerate(forest):
Y_pred[tree_index] = tree.predict(X_test)
from scipy.stats import mode
y_pred_majority_votes, n_votes = mode(Y_pred, axis=0)
y_pred_majority_votes=y_pred_majority_votes[0]
accuracy_score(y_test, y_pred_majority_votes)
|
"""Add roles, users tables
Revision ID: d2a98f6fa6e6
Revises: 1b7bd3b61539
Create Date: 2020-02-09 17:55:38.749448
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'd2a98f6fa6e6'
down_revision = '1b7bd3b61539'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
)
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.Text(), nullable=True),
sa.Column('password', sa.Text(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
)
op.create_table(
'user_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('role_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(('role_id',), ['roles.id'], ),
sa.ForeignKeyConstraint(('user_id',), ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('user_roles')
op.drop_table('users')
op.drop_table('roles')
|
"""Given a string s and a list of words words, where each word is the same length,
find all starting indices of substrings in s that is a concatenation of every word
in words exactly once.
For example, given s = "dogcatcatcodecatdog" and words = ["cat", "dog"],
return [0, 13], since "dogcat" starts at index 0 and "catdog" starts at index 13.
Given s = "barfoobazbitbyte" and words = ["dog", "cat"],
return [] since there are no substrings composed of "dog" and "cat" in s.
The order of the indices does not matter."""
def permute(A: set):
if len(A)==1:
return [list(A)]
permutations = []
for x in A:
for y in permute(A-{x}):
permutations.append([x]+y)
return permutations
def concat_substr(s, words):
ind = []
perms = permute(set(words))
for perm in perms:
sub = "".join(perm)
if s.find(sub) >= 0:
ind.append(s.find(sub))
else:
return []
return ind
if __name__ == "__main__":
assert concat_substr("dogcatcatcodecatdog", ["cat", "dog"]) == [0, 13]
assert concat_substr("barfoobazbitbyte", ["cat", "dog"]) == []
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
def q1():
# 1
data = pd.read_csv('./specs/marks_question1.csv')
# print(data)
axe = plt.subplot(1, 1, 1, facecolor='white')
x = np.arange(0, 12, 1)
y1 = data.get('midterm')
y2 = data.get('final')
axe.set_title('a')
axe.plot(x, y1)
axe.plot(x, y2)
# plt.show()
# 2
a = []
b = []
for item in y1:
temp = []
temp.append(item)
a.append(temp)
for item in y2:
temp = []
temp.append(item)
b.append(temp)
model = LinearRegression()
model.fit(a, b)
print("final = " + str(model.coef_) + ' * midterm + ' + str(model.intercept_))
print(model.predict([[86]]))
q1()
|
from django.core.management.base import BaseCommand, CommandError
import logging
import urllib2
import json
from .basic import *
logger = logging.getLogger('django')
token_file = "/root/wechat_server/files/token_file.txt"
wechat_token = "mytoken"
class Command(BaseCommand):
def handle(self, *args, **options):
logger.info("Start to send message ...")
wechat = WechatBasic(token=wechat_token)
ACCESS_TOKEN = wechat.get_access_token()
data = {"touser":"orJ1ruFQPF98rVxIFsP4HDpqt-V4", "msgtype":"text", "text": { "content":"Hello World" }}
data_json = json.dumps(data)
url = "https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=" + ACCESS_TOKEN
logger.info(url)
request = urllib2.Request(url, data_json, {'Content-Type': 'application/json'})
f = urllib2.urlopen(request)
response = f.read()
logger.info(response)
f.close
|
# encoding:utf-8
import time
import readwrite_operate as rwo
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from Queue import Queue
#全局变量(逻辑需要)---------------------------------------------
time_list = [] #用于统计累加算法核心各段的时间
remove_list = [] #用于记录优化移除的节点,便于后期补入
fileinfo = {} #图文件名
#全局变量(节省时间成本需要)---------------------------------------------
dist = {} #图对应的最短路径字典表,用作全局是为了节省时间成本,避免多次计算
G = nx.Graph() #图,读取图文件后,直接存为全局变量,避免后期多次读图
def BC_SP(graph):
'''
提取自BC中的最短路径计算代码
计算图中任意两个节点对间的最短路径
:param graph:
:return:list: dict:最短路径表,字典嵌套字典,字典值是每个节点到所有节点的最短路径
'''
dist_temp = {}
for s in graph.nodes(): # 依次将节点作为源节点
s_dist = dict.fromkeys(graph, None) # 初始化最短路径为‘None’
s_dist[s] = 0 # 源节点的距离初始化为0
Q = Queue() # 用于广度优先搜索的队列
Q.put(s) # 将源节点加入到队列中
while not Q.empty(): # 如果队列Q不为空
v = Q.get() # 从队列Q中取出一个节点v
for w in graph.neighbors(v): # 取出v的所有邻接点
if s_dist[w] == None: # 如果s到w还没计算过,没计算过的用‘None’表示
s_dist[w] = s_dist[v] + 1 # 则初始化d(s,w)=d(s,v)+1
Q.put(w) # 取出w放入队列Q中
dist_temp[s] = s_dist
return dist_temp
def Vertex_SP(dict):
'''
计算单源节点的最短路径和
:param dict:该节点到其他节点的最短路径(字典值)
:return:最短路径和(int)
'''
sum_temp = 0
for i in dict:
if dict[i] != None:
sum_temp += dict[i]
return sum_temp
def Network_SP(dist):
'''
计算图最短路径和
:param dist: 最短路径表
:return: 图的最短路径和(int)
'''
sum_dist = 0
for key in dist:
sum_single = Vertex_SP(dist[key])
sum_dist += sum_single
return sum_dist
def getSPIG_NCC_VAR(graph, node):
'''
计算删除节点前后的最短路径增加值
备注:增加值=删后值-删前值 为计算方便,上述公式
等价于 增加值 = 删后值 - (删前值 - 该节点为源节点的最短路径)
等价于 增加值 = 删后值 + 该节点为源节点的最短路径
:param graph:图
:param node:删除的节点
:return:dict:单个的节点三属性字典
'''
M = nx.Graph() #复制图,并用复制后的图进行删点操作,避免后续需要重复读图
M.add_edges_from(graph.edges())
node_info = {}.fromkeys(['NCC','VAR','SPIG']) #NCC(number_connected_components):子连通分量个数.VAR(variance):方差])
start_time = time.time()
M.remove_node(node) #删除对应节点
NCC_value = nx.number_connected_components(M)
if (NCC_value == 1):
SPG_before_instead = Vertex_SP(dist[node]) # 计算删前SPG代替值 该节点为源节点的最短路径
dist_after_remove = BC_SP(M) # 计算删后的SPG值
SPG_after = Network_SP(dist_after_remove)
SPIG_value = SPG_after + SPG_before_instead #SDG增加值的替代值
node_info.update(NCC=1, VAR=0 , SPIG=SPIG_value)
elif (NCC_value > 1): #产生多个子连通分量时,就记录子连通分量个数NCC和各个子连通分量节点数的方差VAR
NN = []
a = nx.connected_components(M)
for i in a:
NN.append(len(i)) #将各个子连通分量节点数加入到数组NN中便于计算方差
VAR_value = np.var(NN) #计算节点数方差
node_info.update(NCC=NCC_value, VAR=VAR_value, SPIG=0)
end_time = time.time()
run_time = end_time - start_time
time_list.append(run_time)
return node_info
def optimize_graph(graph):
'''
图优化
删除图中度为1的节点,以及度为2且其两个邻接点间有边的节点
:param graph: 待处理图
:return: graph:处理后的图
'''
s_time = time.time()
for i in range(0, graph.number_of_nodes()):
if graph.degree(i) == 1:
graph.remove_node(i)
remove_list.append(i)
e_time = time.time()
r_time = e_time - s_time
time_list.append(r_time)
return graph
def SNV_valuefill(dic):
'''
补全SNV_list中被优化节点数据'0'
:param dic: SNV_dic
:return: dic:补全的SNV_dic
'''
for key in remove_list: dic[key] = 0
return dic
def norm_merge_3att(dict):
'''
将节点属性中的三个值归一化,这里的归一化是排除0的
:param dict: 节点属性字典
:return: dict:归一化的节点属性字典
'''
min_ncc = 999999999999999
min_var = 999999999999999
min_spig = 999999999999999
max_ncc = 0
max_var = 0
max_spig = 0
norm_merge = {}
for key in dict:
node_ncc = dict[key]['NCC']
if max_ncc < node_ncc: max_ncc = node_ncc
if min_ncc > node_ncc and node_ncc != 0: min_ncc = node_ncc
if dict[key]['VAR'] != 0: #VAR是负相关,取其倒数参与归一化,该操作是为排除0不可以做除数的影响
node_var = 1/float(dict[key]['VAR'])
if max_var < node_var: max_var = node_var
if min_var > node_var and node_var != 0: min_var = node_var
node_spig = dict[key]['SPIG']
if max_spig < node_spig: max_spig = node_spig
if min_spig > node_spig and node_spig != 0: min_spig = node_spig
min_spig = min_spig - 1 #这个需要注意:最小值再下探1,主要是为了避免SPIG最小的节点与度为1的节点混在一起
min_ncc = min_ncc - 1
min_var = min_var - 1
ncc_gap = max_ncc - min_ncc
var_gap = max_var - min_var
spig_gap = max_spig - min_spig
for key in dict:
temp = {}.fromkeys(['NCC','VAR','SPIG'])
node_ncc = dict[key]['NCC']
if dict[key]['VAR']!=0: #VAR是负相关,取其导数参与归一化,该操作是为排除0不可以做除数的影响
node_var = 1/float(dict[key]['VAR'])
else:node_var = 0
node_spig = dict[key]['SPIG']
if node_ncc!=0:
temp['NCC'] = (float(node_ncc) - min_ncc) / ncc_gap
else:temp['NCC'] = 0.0
if node_var!=0:
temp['VAR'] = (float(node_var) - min_var) / var_gap
else:temp['VAR'] = 0.0
if node_spig!=0:
temp['SPIG'] = (float(node_spig) - min_spig) / spig_gap
else:temp['SPIG'] = 0.0
norm_merge[key] = 0.2 * temp['NCC'] + 0.8 * (temp['VAR'] + temp['SPIG'])
return norm_merge
def file(name):
path = '../dataset/'+ name +'.csv'
fileinfo['name'] = name + '_SNV-BC.csv' #将SPIG最终文件名存储到全局变量fileinfo中便于后期调用
return path
def BC_filtering(graph):
filtering_list = []
C = nx.centrality.betweenness_centrality(graph, normalized=False)
sorted_key_list = sorted(C, key=lambda x:C[x]) # 顺序排列,从小到大排列
# sorted_key_list = sorted(C, key=lambda x: C[x], reverse=True) # 倒序排列,从大到小
sorted_dict = map(lambda x: {x: C[x]}, sorted_key_list)
if graph.number_of_nodes() <2000 :
n = graph.number_of_nodes() - 50 #最低节点数不低于50
else:
n = int( graph.number_of_nodes() * (1-0.025)) #超过2000个节点的网络,取前2.5%的节点
for i in range(0, n):
s = int(sorted_dict[i].keys()[0])
remove_list.append(s)
return filtering_list
if __name__ == '__main__':
G = rwo.read_graph_csv(file('coauthor'))
G = optimize_graph(G) #优化图
BC_filtering(G) #BC过滤
print remove_list #优化、BC过滤后删除的节点存为全局变量,便于后续寻找节点
dist = BC_SP(G) #计算优化后的最短路径表,并作为全局常量,避免后续重复计算
node_3att = {}
for key in dist: #遍历计算各个节点三属性值
if key not in remove_list:
node_3att[key] = getSPIG_NCC_VAR(G, key)
else:continue
node_att = norm_merge_3att(node_3att) #归一化、合并节点三属性值
rwo.save_nodeatt(node_3att,'att.csv')
SNV_list = SNV_valuefill(node_att) #补缺删除的节点属性值为0
rwo.save_SNV(SNV_list, fileinfo['name']) #SPIG值保存到文件
run = sum(time_list)
print run
|
"""
246. Strobogrammatic Number
A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is represented as a string.
For example, the numbers "69", "88", and "818" are all strobogrammatic.
"""
def isStrobogrammatic(num):
"""
:type num: str
:rtype: bool
"""
l = len(num)
if l == 1:
if num[0] == "0" or num[0] == "1" or num[0] == "8":
return True
else:
return False
for i in range(0, (l // 2) + 1):
if num[i] == num[l - 1 - i]:
# print ("#1 ", num[i], " - ", num[l - 1 - i])
if num[i] == "0" or num[i] == "1" or num[i] == "8":
continue
else:
return False
else:
# print ("#2 ", num[i], " - ", num[l - 1 - i])
if (num[i] == "9" and num[l - 1 - i] == "6") or (num[i] == "6" and num[l - 1 - i] == "9"):
continue
else:
return False
return True
s = "96801866069810896"
print(isStrobogrammatic(s))
s = "659"
print(isStrobogrammatic(s))
s = "1"
print(isStrobogrammatic(s))
s = "11"
print(isStrobogrammatic(s))
s = "2"
print(isStrobogrammatic(s))
s = "88"
print(isStrobogrammatic(s))
s = "818"
print(isStrobogrammatic(s))
s = "181"
print(isStrobogrammatic(s))
s = "182"
print(isStrobogrammatic(s))
|
#!/user/bin/python
#-*- coding:utf-8 -*-
import sys
from gensim.models import word2vec
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # 今どれくらい処理が進んでるか確認する用
sentences = word2vec.Text8Corpus('data/wakati_bocchan.txt')
model = word2vec.Word2Vec(sentences, size=200)
def s(posi, nega=[], n=5):
cnt = 1
result = model.most_similar(positive = posi, negative = nega, topn = n)
print '順位', '単語', '類似度'
for r in result:
print cnt, r[0], r[1]
cnt += 1
|
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers=['localhost:9092'], api_version=(0, 10))
csvFilePath = 'stat.csv'
data = []
with open(csvFilePath, "rb") as csvfile:
data = csvfile.readlines()
for rec in data:
print("sending record",rec)
producer.send('csvdata', rec) #bytes sent
print("sent record")
|
'''
SERVO CONTROL SAMPLE THROUGH THE USE OF 4 BUTTONS (2 for each servo, on pins 15, 4, 22 and 23)
The servos are controlled through PWM enambled pins 5 and 21
'''
import machine
import time
#button declarations:
button0 = machine.Pin(15, machine.Pin.IN, machine.Pin.PULL_UP)
button1 = machine.Pin(4, machine.Pin.IN, machine.Pin.PULL_UP)
button2 = machine.Pin(22, machine.Pin.IN, machine.Pin.PULL_UP)
button3 = machine.Pin(23, machine.Pin.IN, machine.Pin.PULL_UP)
button_list = [button0, button1, button2, button3]
#servo declarations:
servo0 = machine.PWM(machine.Pin(5), freq=50)
servo1 = machine.PWM(machine.Pin(21), freq=50)
servo_list = [servo0, servo1]
#Increments or decreases the value of both servo duties depending on which button was pressed
def servo_control(button_up0, button_down0, button_up1, button_down1, duty0, duty1):
if is_pressed(button_up0) and duty0 < 115:
duty0 += 1
if is_pressed(button_down0) and duty0 > 40:
duty0 -= 1
if is_pressed(button_up1) and duty1 < 115:
duty1 += 1
if is_pressed(button_down1) and duty1 > 40:
duty1 -= 1
return duty0, duty1
#checks if button is pressed, returns True if button is pressed, else return False
def is_pressed(button):
if button.value() == 0:
return True
else:
return False
#Outputs the status of buttons
def check_buttons(button_list):
for button in button_list:
if is_pressed(button):
print('button on ' + str(button) + 'was pressed')
#Main program loop
def loop():
#initial value for the servos' positions
duty0 = 77
duty1 = 77
while True: #This is the actual loop
check_buttons(button_list) #This can be commented
duty0, duty1 = servo_control(button0, button1, button2, button3, duty0, duty1)
servo0.duty(duty0)
servo1.duty(duty1)
time.sleep(0.03) #This can me modified to increase/decrease the speed at which the servos rotate
#call to loop
loop() |
import time
import unittest
from vika import Vika
from . import TEST_API_BASE, TEST_API_TOKEN, TEST_TABLE
class TestUpdateRecords(unittest.TestCase):
def setUp(self):
vika = Vika(TEST_API_TOKEN)
vika.set_api_base(TEST_API_BASE)
self.dst = vika.datasheet(TEST_TABLE)
def test_record_update(self):
# 更新单个字段
record = self.dst.records.get(title="无人生还")
record.title = "无人生还2"
self.assertEqual(record.title, "无人生还2")
time.sleep(1)
# 更新多个字段
record = self.dst.records.get(title="无人生还2")
r = record.update({
"title": '无人生还3',
"comment": '真好看'
})
self.assertEqual(r.title, "无人生还3")
self.assertEqual(r.comment, "真好看")
time.sleep(1)
# 更新多条记录
self.dst.records.filter(
title="无人生还3").update(title="无人生还4")
record = self.dst.records.get(title="无人生还4")
self.assertEqual(record.title, "无人生还4")
time.sleep(1)
def tearDown(self):
self.dst.records.filter(title="无人生还4").update(title="无人生还")
if __name__ == '__main__':
unittest.main()
|
from django.urls import path, include
from django.conf.urls import url
from . import views
urlpatterns = [
path('study/', views.study, name='uploader-uploaderStudy'),
path('studyInfo', views.studyInfo, name='uploader-uploaderStudyInfo'),
path('info/', views.info, name='uploader-uploaderInfo'),
path('extraInfo/', views.extraInfo, name='uploader-uploaderExtraInfo'),
path('finalPrompt/', views.finalPrompt, name='uploader-uploaderFinalPrompt'),
path('uploading/', views.upload, name='uploader-uploading'),
path('error/', views.error, name='uploader-uploaderError'),
path('success/', views.success, name='uploader-uploaderSuccess')
] |
#!/usr/bin/env python
import re
import sys
rgx_num = re.compile('^(\d+)$')
rgx_val = re.compile('^(\S+) -> (\S+)$')
rgx_and = re.compile('^(\S+) AND (\S+) -> (\S+)$')
rgx_or = re.compile('^(\S+) OR (\S+) -> (\S+)$')
rgx_lsh = re.compile('^(\S+) LSHIFT (\d+) -> (\S+)$')
rgx_rsh = re.compile('^(\S+) RSHIFT (\d+) -> (\S+)$')
rgx_not = re.compile('^NOT (\S+) -> (\S+)$')
class Instr(object):
def __init__(self, dependson, attribute=None):
self.dep = dependson
self.attr = int(attribute) if attribute is not None else None
self.value = None
def __repr__(self):
return str(self.getvalue())
class NumValue(Instr):
def getvalue(self):
return self.attr
class Value(Instr):
def getvalue(self):
if self.value is None:
self.value = self.dep[0].getvalue()
return self.value
class And(Instr):
def getvalue(self):
if self.value is None:
self.value = self.dep[0].getvalue() & self.dep[1].getvalue()
return self.value
class Or(Instr):
def getvalue(self):
if self.value is None:
self.value = self.dep[0].getvalue() | self.dep[1].getvalue()
return self.value
class LShift(Instr):
def getvalue(self):
if self.value is None:
self.value = self.dep[0].getvalue() << self.attr
return self.value
class RShift(Instr):
def getvalue(self):
if self.value is None:
self.value = self.dep[0].getvalue() >> self.attr
return self.value
class Not(Instr):
def getvalue(self):
if self.value is None:
self.value = (~self.dep[0].getvalue()) & (2**16-1)
return self.value
wires = {}
head = None
with open(sys.argv[1]) as f:
for line in f.readlines():
instr = None
match = rgx_val.match(line.strip())
if match:
prov = match.group(2)
instr = Value([match.group(1)])
wires[prov] = instr
match = rgx_and.match(line.strip())
if match:
prov = match.group(3)
instr = And([match.group(1), match.group(2)])
wires[prov] = instr
match = rgx_or.match(line.strip())
if match:
prov = match.group(3)
instr = Or([match.group(1), match.group(2)])
wires[prov] = instr
match = rgx_lsh.match(line.strip())
if match:
prov = match.group(3)
instr = LShift([match.group(1)], match.group(2))
wires[prov] = instr
match = rgx_rsh.match(line.strip())
if match:
prov = match.group(3)
instr = RShift([match.group(1)], match.group(2))
wires[prov] = instr
match = rgx_not.match(line.strip())
if match:
prov = match.group(2)
instr = Not([match.group(1)])
wires[prov] = instr
#print "Adding provider for wire %s" % prov
wires['b'] = Value(['16076'])
for k,v in wires.items():
for idx,d in enumerate(v.dep):
match = rgx_num.match(d)
if match:
v.dep[idx] = NumValue([], match.group(1))
else:
v.dep[idx] = wires[d]
print "The values are: %s" % wires
print "The value of 'a' is: %s" % wires['a']
|
from game import Game
# first take input
print("Please enter two sizes line by line")
x = int(input())
y = int(input())
tot = x * y
print("Please enter bomb amount. Must be less than %d." % (tot))
num_bombs = int(input())
# generate the game
g = Game(x, y, num_bombs)
g.print_board()
result = 0
while result == 0:
print("Please enter coordinates line by line.")
print("Marking/Unmarking? Answer y/n.")
mark = input()
print("Row then column")
x = int(input())
y = int(input())
if mark == 'n':
result = g.make_move(x, y)
else:
g.mark_bomb(x, y)
if result == -1:
print("BOMB DETONATED!")
g.print_board()
solved = False
if g.check_win() == False:
print("Haven't found all the bombs yet.")
else:
print("You win!")
break
g.print_board()
|
# 01_fruits.py
# 利用CNN实现图像分类
# 数据集:爬虫从百度图片搜索结果爬取
# 内容:包含1036张水果图片
# 共5个类别(苹果288张、香蕉275张、葡萄216张、
# 橙子276张、梨251张)
################## 数据预处理 ##################
import os
name_dict = {"apple": 0, "banana": 1, "grape": 2,
"orange": 3, "pear": 4}
data_root_path = "data/fruits/" # 数据集所在目录
# 测试集、训练集文件路径
test_file_path = data_root_path + "test.txt"
train_file_path = data_root_path + "train.txt"
name_data_list = {} # 记录每个类别有那些图片
def save_name_data_list(path, # 图像路径
name): # 类别名称
if name not in name_data_list: # 字典中没有该类别
img_list = [] # 创建空列表
img_list.append(path) # 将图片存入列表
name_data_list[name] = img_list # 存入字典
else: # 字典中已经存在该类别
name_data_list[name].append(path)
# 遍历数据集中的每个子目录,取出图像样本路径
# 并写入name_data_list字典
dirs = os.listdir(data_root_path)
for d in dirs:
full_path = data_root_path + d # 子目录完整路径
# print(full_path)
if os.path.isdir(full_path): # 是一个目录
imgs = os.listdir(full_path) # 列出所有文件
for img in imgs:
img_full_path = full_path + "/" + img
save_name_data_list(img_full_path,
d) # 目录名称即类别名称
else: # 文件
pass
# 遍历name_data_list字典,划分测试集、训练集
with open(test_file_path, "w") as f:
pass
with open(train_file_path, "w") as f:
pass
# 遍历字典
for name, img_list in name_data_list.items():
i = 0
num = len(img_list) # 获取每个列别图片数量
print("%s: %d张" % (name, num))
for img in img_list:
line = "%s\t%d\n" % (img, name_dict[name])
if i % 10 == 0: # 划分到测试集合
with open(test_file_path, "a") as f:
f.write(line)
else: # 划分到训练集
with open(train_file_path, "a") as f:
f.write(line)
i += 1
print("数据预处理完成.")
############### 模型搭建/训练 ##################
import paddle
import paddle.fluid as fluid
import numpy
import sys
import os
from multiprocessing import cpu_count
import time
import matplotlib.pyplot as plt
def train_mapper(sample):
"""
根据传入样本路径、类别,读取图像数据
:param sample: 一行文本样本, 元组(文件路径,类别)
:return: 返回图像数据、类别
"""
img, label = sample # img为路径, lable为类别
if not os.path.exists(img):
print(img, "文件不存在")
# 读取文件内容
img = paddle.dataset.image.load_image(img)
# 将图像设置为固定大小
img = paddle.dataset.image.simple_transform(
im=img, # 原始图像
resize_size=100, # 图像缩放大小
crop_size=100, # 裁剪图像大小
is_color=True, # 彩色图像
is_train=True) # 训练模型(做随机裁剪)
# 归一化处理,将每个像素值转换为0~1之间
img = img.astype("float32") / 255.0
return img, label
# 从训练集中读取数据
def train_r(train_list, buffred_size=1024):
def reader():
with open(train_list, "r") as f:
lines = f.readlines()
for line in lines:
# 去除空格和换行符
line = line.strip().replace("\n", "")
img_path, lab = line.split("\t")
yield img_path, int(lab)
return paddle.reader.xmap_readers(
train_mapper, # 接收reader读取的数据二次处理
reader, # 原始读取器
cpu_count(), # 线程数量
buffred_size) # 缓冲区大小
# 定义reader
BATCH_SIZE = 32 # 批次大小
trainer_reader = train_r(train_list=train_file_path)
random_train_reader = paddle.reader.shuffle(
reader=trainer_reader,
buf_size=1300) # 随机读取器
batch_train_reader = paddle.batch(
random_train_reader,
batch_size=BATCH_SIZE)
# 占位符
image = fluid.layers.data(name="image",
shape=[3, 100, 100],
dtype="float32")
label = fluid.layers.data(name="label",
shape=[1],
dtype="int64")
def create_CNN(image, type_size):
"""
搭建卷积神经网络
:param image: 图像数据(经过归一化处理)
:param type_size:类别数量
:return: 一组分类概率
"""
# 第一组 conv/pool/dropout
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=image, # 输入图像数据
filter_size=3, # 卷积核大小
num_filters=32, # 卷积核数量
pool_size=2, # 2*2区域做池化
pool_stride=2, # 池化步长
act="relu") # 激活函数
drop = fluid.layers.dropout(x=conv_pool_1,
dropout_prob=0.5)
# 第二组 conv/pool/dropout
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=drop, # 前一个dropout输出作为输入
filter_size=3, # 卷积核大小
num_filters=64, # 卷积核数量
pool_size=2, # 2*2区域做池化
pool_stride=2, # 池化步长
act="relu") # 激活函数
drop = fluid.layers.dropout(x=conv_pool_2,
dropout_prob=0.5)
# 第三组 conv/pool/dropout
conv_pool_3 = fluid.nets.simple_img_conv_pool(
input=drop, # 前一个dropout输出作为输入
filter_size=3, # 卷积核大小
num_filters=64, # 卷积核数量
pool_size=2, # 2*2区域做池化
pool_stride=2, # 池化步长
act="relu") # 激活函数
drop = fluid.layers.dropout(x=conv_pool_3,
dropout_prob=0.5)
# fc
fc = fluid.layers.fc(input=drop,
size=512, # 神经元数量
act="relu")
# dropout
drop = fluid.layers.dropout(x=fc,
dropout_prob=0.5)
# 输出层(使用softmax作为激活函数的fc)
predict = fluid.layers.fc(input=drop,
size=type_size,
act="softmax")
return predict
# 创建VGG模型
def vgg_bn_drop(image, type_size):
def conv_block(ipt, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=ipt, # 输入图像, 格式[N,C,H,W]
pool_stride=2,#池化步长
pool_size=2, #池化区域大小
conv_num_filter=[num_filter] * groups,
conv_filter_size=3, #卷积核大小
conv_act="relu",#激活函数
conv_with_batchnorm=True,#是否采用BN
pool_type="max")#池化类型
conv1 = conv_block(image, 64, 2, [0.0, 0.0])
conv2 = conv_block(conv1, 128, 2, [0.0, 0.0])
conv3 = conv_block(conv2, 256, 3, [0.0, 0.0, 0.0])
conv4 = conv_block(conv3, 512, 3, [0.0, 0.0, 0.0])
conv5 = conv_block(conv4, 512, 3, [0.0, 0.0, 0.0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop,
size=512,
act=None)
bn = fluid.layers.batch_norm(input=fc1,
act="relu")#批量归一化
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.0)
fc2 = fluid.layers.fc(input=drop2,
size=512,
act=None)
predict = fluid.layers.fc(input=fc2,
size=type_size,
act="softmax")
return predict
# 调用函数,创建模型
# predict = create_CNN(image=image, type_size=5)
predict = vgg_bn_drop(image=image, type_size=5)
# 损失函数
cost = fluid.layers.cross_entropy(
input=predict,
label=label)
avg_cost = fluid.layers.mean(cost)
# 准确率
accuracy = fluid.layers.accuracy(input=predict,
label=label)
# 优化器
optimizer = fluid.optimizer.Adam(
learning_rate=0.001)
optimizer.minimize(avg_cost) # 优化目标函数
# 执行器
place = fluid.CUDAPlace(0) # GPU训练
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# feeder
feeder = fluid.DataFeeder(
feed_list=[image, label],
place=place)
costs = [] # 记录损失函数值
accs = [] # 记录准确度
times = 0
batchs = [] # 迭代次数
# 开始训练
for pass_id in range(5):
train_cost = 0 # 临时变量,记录损失值
train_acc = 0
times += 1
for batch_id, data in enumerate(batch_train_reader()):
train_cost, train_acc = exe.run(
program=fluid.default_main_program(),
feed=feeder.feed(data), # 喂入参数
fetch_list=[avg_cost, accuracy])
# 打印损失值、准确率
if batch_id % 20 == 0:
print("pass_id:%d, batch_id:%d, cost:%f, acc:%f"
% (pass_id, batch_id,
train_cost[0], train_acc[0]))
accs.append(train_acc[0])
costs.append(train_cost[0])
batchs.append(times)
# 保存模型
model_save_dir = "./model/fruits/"
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
fluid.io.save_inference_model(
dirname=model_save_dir, #保存路径
feeded_var_names=["image"],#预测时传入参数
target_vars=[predict],#预测结果
executor=exe)#执行器
print("模型保存成功:", model_save_dir)
# 训练过程可视化
plt.title("training", fontsize=24)
plt.xlabel("iter", fontsize=20)
plt.ylabel("cost/acc", fontsize=20)
plt.plot(batchs, costs, color='red', label="Training Cost")
plt.plot(batchs, accs, color='green', label="Training Acc")
plt.legend()
plt.grid()
plt.savefig("train.png")
plt.show()
#################### 预测 #####################
from PIL import Image
# 加载图像数据
def load_img(path): # path为图像路径
img = paddle.dataset.image.load_and_transform(
path, 100, 100, False).astype("float32")
img = img / 255.0 # 归一化
return img
# 定义执行器
place = fluid.CPUPlace()
infer_exe = fluid.Executor(place) #用于预测的执行器
infer_imgs = [] # 存放待预测的图像数据
test_img = "apple_1.png" # 待测试的图像
infer_imgs.append(load_img(test_img))#将图像数据存入待预测列表
infer_imgs = numpy.array(infer_imgs)#将列表转换为数组
# 加载模型
infer_program, feed_target_names, fetch_targets = \
fluid.io.load_inference_model(model_save_dir,
infer_exe)
# 执行预测
results = infer_exe.run(infer_program,
feed={feed_target_names[0]:infer_imgs},
fetch_list=fetch_targets)
# print(results)
result = numpy.argmax(results[0][0])
for k, v in name_dict.items():
if result == v:
print("预测结果:", k)
# 显示待预测的图像
img = Image.open(test_img)
plt.imshow(img)
plt.show()
|
from nltk import Tree
def rec(t):
if(type(t)==Tree or type(t)==list):
for x in t: rec(x)
else: return t
def rec_p(t):
if(type(t)==Tree or type(t)==list):
for x in t: rec_p(x)
else: print(t)
|
from cmc.worker import WorkerCircularQueue, Worker
class TestWorkerCircularQueue:
def test_wcq_init(self):
k = WorkerCircularQueue()
assert len(k) == 0
def test_wcq_len(self):
wq = WorkerCircularQueue()
for i in range(10):
wq.add(Worker())
assert len(wq) == 10
def test_wcq_next_add(self):
wq = WorkerCircularQueue()
for i in range(10):
wq.add(Worker())
last = wq.next()
for _ in range(1000):
t = wq.next()
assert len(wq) == 10
assert last != t
last = t
|
s=int(input("enter the first interval"))
a=int(input("enter the second interval"))
for i in range(s,a+1):
if i%2!=0:
print(i)
|
import config
import telebot
from telebot import types
from bot import utils
from bot.call_types import CallTypes
from bot.states import States
from backend.models import BotUser, ShopCard
from backend.templates import Messages, Keys
def start_command_handler(bot: telebot.TeleBot, message):
chat_id = message.chat.id
keyboard = types.InlineKeyboardMarkup()
uz_language_button = utils.make_inline_button(
text=Keys.LANGUAGE.get(BotUser.Lang.UZ),
CallType=CallTypes.Language,
lang=BotUser.Lang.UZ,
)
ru_language_button = utils.make_inline_button(
text=Keys.LANGUAGE.get(BotUser.Lang.RU),
CallType=CallTypes.Language,
lang=BotUser.Lang.RU,
)
en_language_button = utils.make_inline_button(
text=Keys.LANGUAGE.get(BotUser.Lang.EN),
CallType=CallTypes.Language,
lang=BotUser.Lang.EN,
)
keyboard.add(uz_language_button)
keyboard.add(ru_language_button)
keyboard.add(en_language_button)
text = Messages.START_COMMAND_HANDLER.text
bot.send_message(chat_id, text,
reply_markup=keyboard)
def language_call_handler(bot: telebot.TeleBot, call):
call_type = CallTypes.parse_data(call.data)
lang = call_type.lang
chat_id = call.message.chat.id
user, success = BotUser.objects.get_or_create(chat_id=chat_id)
user.lang = lang
user.save()
ShopCard.shop_cards.get_or_create(user=user)
if success:
registration_start_handler(bot, call.message)
else:
menu_command_handler(bot, call.message)
def registration_start_handler(bot: telebot.TeleBot, message):
chat_id = message.chat.id
user = BotUser.objects.get(chat_id=chat_id)
lang = user.lang
user.bot_state = States.SEND_CONTACT
user.save()
text = Messages.REGISTRATION_INFO.get(lang)
contact_button = types.KeyboardButton(
text=Keys.SEND_CONTACT.get(lang),
request_contact=True
)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(contact_button)
bot.send_message(chat_id, text,
reply_markup=keyboard)
def menu_command_handler(bot: telebot.TeleBot, message):
chat_id = message.chat.id
user = BotUser.objects.get(chat_id=chat_id)
lang = user.lang
products_button = utils.make_inline_button(
text=Keys.PRODUCTS.get(lang),
CallType=CallTypes.Products,
)
shop_card_button = utils.make_inline_button(
text=Keys.SHOP_CARD.get(lang),
CallType=CallTypes.ShopCard,
)
orders_button = utils.make_inline_button(
text=Keys.ORDERS.get(lang),
CallType=CallTypes.Orders,
page=1,
)
profile_button = utils.make_inline_button(
text=Keys.PROFILE.get(lang),
CallType=CallTypes.Profile,
)
info_button = utils.make_inline_button(
text=Keys.INFO.get(lang),
CallType=CallTypes.Info,
)
menu_keyboard = types.InlineKeyboardMarkup()
menu_keyboard.add(products_button, shop_card_button)
menu_keyboard.add(orders_button, profile_button)
menu_keyboard.add(info_button)
if user.type == BotUser.Type.ADMIN:
admin_button = utils.make_inline_button(
text=Keys.ADMIN.get(lang),
CallType=CallTypes.Admin,
)
menu_keyboard.add(admin_button)
text = Messages.MENU.get(lang)
if hasattr(message, 'edited'):
bot.edit_message_text(
chat_id=chat_id,
text=text,
message_id=message.id,
reply_markup=menu_keyboard,
)
else:
bot.send_message(chat_id, text,
reply_markup=menu_keyboard)
def back_call_handler(bot: telebot.TeleBot, call):
call.message.edited = True
menu_command_handler(bot, call.message)
|
# Generated by Django 2.0 on 2018-01-06 16:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('song_load', '0003_song_image_url'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('album_id', models.CharField(max_length=200)),
],
),
migrations.RemoveField(
model_name='song',
name='image_url',
),
]
|
#!/usr/bin/env python
#import vonvif
#import onvif_wrap
from vonvif import request_cmd
__all__ =["vonvif"]
|
# coding: utf-8
# _Autor:_ __Jesús Casado__ <br> _Revisión:_ __16/07/2018__ <br>
#
# __Introducción__<br>
# En este código se incluyen las funciones generadas para desagregar un hidrograma en sus dos componentes: flujo rápido (escorrentía superficial) y flujo lento (flujo base).
#
# __Funciones__<br>
# Evento:
# - `extract_event`: recorta el 'data frame' de caudales a las fechas indicadas, calcula la pendiente y curvatura y muestra su evolución temporal si así se desea.
#
# Curvas de recesión:
# - `k_recession`: calcula la *k*, constante de recesión, de un evento de recesión de caudal.
# - `Qt`: calcula el caudal base por medio de la ley de recesión, dados el caudal en un momento y la *k*
#
# Desagregación de hidrogramas:
# - `key_points2`: encuentra los puntos de inicio, pico, inflexión y fin de cada evento de escorrentía superficial en una hidrograma.
# - `mml`: desagrega el hidrograma mediante el método de los mínimos locales
# - `mlr`: desagrega el hidrograma mediante el método de la línea recta
# - `mbf`: desagrega el hidrograma mediante el método de la base fija
# - `mpv`: desagrega el hidrograma mediante el método de la pendiente variable
#
# __Cosas a corregir__ <br>
#
# __Índice__<br>
#
# __[1. Evento](#1.-Evento)__<br>
#
# __[2. Curvas de recesión](#2.-Curvas-de-recesión)__<br>
#
# __[3. Desagregación de hidrogramas](#3.-Desagregación-de-hidrogramas)__<br>
# [3.1. Puntos clave](#3.1.-Puntos-clave)<br>
# [3.2. Métodos de desagregación](#3.2.-Métodos-de-desagregación)<br>
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
plt.style.use('seaborn-whitegrid')
import pandas as pd
import datetime
from calendar import monthrange
from math import ceil, floor, log, exp
import os
import itertools,operator
import sys
sys._enablelegacywindowsfsencoding() # leer archivos con caracteres especiales
#from scipy.interpolate import griddata
# ## 1. Evento
# In[4]:
def extract_event(data, start=None, end=None, fillna=False, smoothing=None, window=12, alpha=0.2, max_freq=50,
plot=True):
"""Sobre una serie de datos de caudal de cualquier resolución, se extraen la serie correspondiente a las fechas indicadas, se suaviza la serie mediante una media móvil, y se calculan tres nuevos campos sobre la serie suavizada: 'dQ' es la derivada del caudal con respecto al tiempo (la pendiente del hidrograma), 'm' es el signo de la pendiente (-1:decreciente, 0:nulo, 1:creciente), y 'd2Q' es la segunda derivada del caudal respecto al tiempo (la curvatura del hidrograma).
Parámetros:
-----------
data: data frame. Las filas representan pasos temporales y tendrá al menos un campo llamado 'Q' con la serie de caudal
start: datetime. Inicio de la serie a extraer. Si es 'None' no se recorta.
end: datetime. Fin de la serie a extraer. Si es 'None' no se recorta.
fillna: boolean. Si se quieren rellenar los huecos mediante interpolación lineal
smoothing: boolean. Si se quiere suavizar la serie temporal. Por defecto 'None', no se suaviza la serie. Los métodos disponibles son la media móvil ('ma'), exponencial ('ex') o transformada de Fourier ('ft'). Dependiendo de qué método se escoja, deberá definirse otro parámetro: 'window', 'alpha' o 'max_freq'
respectivamente
window: integer. Ancho de la ventan móvil. Por defecto se ponen 12 para hacer la media horaria a partir de
datos cincuminutales
alpha: float. Define la fuerza que se le da al dato original frente al suavizado. Debe estar entre 0 y 1; si es
1 no hay suavizado; si es 0 se obtiene una serie constante con el valor inicial de la serie original
max_freq: integer. Máxima frecuencia de la descomposición de Fourier que se utilizará en la inversión de la
transformada para suavizar la serie. Cuanto menor sea este parámetro mayor será el suavizado
plot: boolean. Si se quieren plotear las series de caudal, pendiente, signo de pendiente y curvatura
Salidas:
--------
flood: data frame. Corte de 'data' con los tres nuevos campos: 'dQ', 'm' ,'d2Q'
Si 'plot=True' se muestra una figura con cuatro gráficos de las series de caudal, pendiente, signo de la pendiente y
curvatura.
"""
if start and end:
# Extraer la serie para el evento
flood = data.loc[start:end,:].copy()
else:
flood = data.copy()
start, end = flood.index[0], flood.index[-1]
# Corregir huecos en la serie mediante interpolación lineal
if fillna == True:
# Pasos temporales
At = (flood.index[1] - flood.index[0]).total_seconds()
delta = datetime.timedelta(seconds=At)
# 'Data frame' auxiliar con los huecos en la serie
aux = flood.loc[np.isnan(flood.Q),:]
# Definir el inicio ('d1') y fin ('d2') de cada hueco
for i, d1 in enumerate(aux.index):
for j, d2 in enumerate(aux.index[i+1:]):
if int((d2 - d1).total_seconds() / 3600) != j + 1:
d2 = aux.index[j]
break
# Paso previo ('st') y posterior ('en') al hueco
st, en = d1 - datetime.timedelta(seconds=At), d2 + datetime.timedelta(seconds=At)
tt = (en - st).total_seconds() / At
# Caudal observado en 'st' y 'en'
Qst, Qen = flood.Q[st], flood.Q[en]
# Interpolación lineal
for t, d in enumerate(data[d1:d2].index):
flood.Q[d] = Qst + (Qen - Qst) * (t + 1) / tt
# Suavizado de la serie
# ---------------------
if smoothing == 'ma':
flood = moving_average_smoothing(flood, window=window, plot=False)
serie = 'Qma'
elif smoothing == 'ex':
flood = exponential_smoothing(flood, alpha=alpha, plot=False)
serie = 'Qex'
elif smoothing == 'ft':
flood = fourier_smoothing(flood, max_freq=max_freq, plot=False)
serie = 'Qft'
else:
serie = 'Q'
# Calcular la derivada del caudal
flood['dQ'] = np.nan
for i in flood.index[1:]:
flood.loc[i, 'dQ'] = (flood[serie][i] - flood[serie][i-1])
# Calcular la derivada del caudal
flood['d2Q'] = np.nan
for i in flood.index[2:]:
flood.loc[i, 'd2Q'] = (flood.dQ[i] - flood.dQ[i-1])
# Signo de la pendiente
flood['m'] = np.nan
flood.loc[flood.dQ < 0, 'm'] = -1
flood.loc[flood.dQ == 0, 'm'] = 0
flood.loc[flood.dQ > 0, 'm'] = 1
if plot == True:
# Visualizar
fig, ax = plt.subplots(nrows=4, figsize=(18,10))
# Caudal
if smoothing != None:
ax[0].plot(flood.Q, linewidth=1, c='steelblue', label='Qo')
ymax = ceil(flood.Q[start:end].max() / 10) * 10
ax[0].plot(flood[serie], '--k', linewidth=1, label=serie)
else:
ax[0].plot(flood.Q, linewidth=1, c='steelblue', label='Q')
ymax = ceil(flood.Q[start:end].max() / 10) * 10
ax[0].set(xlim=(start, end), ylim=(0,ymax))
ax[0].set_ylabel('Q (m³/s)', fontsize=13)
ax[0].legend(fontsize=12)
# Derivada del caudal
ax[1].plot(flood.dQ, linewidth=1, c='orange', label='dQ')
ymax = ceil(flood.loc[start:end, 'dQ'].max() / 1) * 1
ymin = floor(flood.loc[start:end, 'dQ'].min() / 1) * 1
ax[1].set(xlim=(start, end), ylim=(ymin,ymax))
ax[1].set_ylabel('dQ/dt (m³/s²)', fontsize=13)
# Signo de la pendiente
ax[2].plot(flood.m, linewidth=1, c='red', label='dQ')
ax[2].set(xlim=(start, end))#, ylim=(ymin,ymax))
ax[2].set_ylabel('signo (-)', fontsize=13);
# Segunda derivada del caudal
ax[3].plot(flood.d2Q, linewidth=1, c='green', label='dQ')
ymax = ceil(flood.loc[start:end, 'd2Q'].max() / 0.25) * 0.25
ymin = floor(flood.loc[start:end, 'd2Q'].min() / 0.25) * 0.25
ax[3].set(xlim=(start, end), ylim=(ymin,ymax))
ax[3].set_ylabel('d²Q/dt² (m³/s³)', fontsize=13);
return flood
# ## 2. Curvas de recesión
# In[2]:
def k_recession(data, tf, to=None, timesteps=96):
"""Se calcula la constante de decaimiento de la recesión de caudales.
Parámetros:
-----------
data: series. La serie de caudal a analizar
tf: datetime. Final (o punto posterior al inicio) de la curva de recesión.
to: datetime. Inicio de la curva de recesión. Opcional, si no se incluye, el 'to' se calcula como el momento 'timesteps' previos a 'tf'
timesteps: integer. Número de pasos temporales entre 'to' y 'tf'. Sólo se usa si no se especifica 'to'
Salidas:
--------
k: float. Constante de decaimiento (s)
"""
# Incremento temporal en segundos
idxf = data.index.get_loc(tf)
tf = data.index[idxf]
if to == None: # si no se introduce directamente 'to'
to = tf - timesteps
else:
idxo = data.index.get_loc(to)
to = data.index[idxo]
At = tf - to
At = At.total_seconds()
# Caudal de inicio y fin
Qo, Q = data[to], data[tf]
if Qo <= Q:
print('ERROR: caudal final superior al inicial')
return
# Constante de decaimiento exponencial
k = At / log(Qo / Q)
if k <= 0:
k = np.nan
return k
# In[4]:
def Qt(Qo, to, t, k):
"""Esta función genera el caudal en un tiempo 't' mediante la curva maestra de recesión del caudal base.
Parámetros:
-----------
Qo: float. Valor de caudal base al inicio de la curva de recesión (m³/s).
to: datetime. Fecha y hora correspondiente a 'Qo'.
t: datetime. Fecha y hora a la que se quiere calcular el caudal.
k: constante de decaimiento exponencial (s).
Salidas:
--------
Q: float. Valor de caudal base en el momento 't' (m³/s)."""
# Calcular el incremento temporal en segundos
At = t - to
At = At.total_seconds()
# Calcular el caudal en el tiempo t
Q = Qo * exp(-At / k)
return Q
# ## 3. Desagregación de hidrogramas
# ### 3.1. Puntos clave
# In[ ]:
def encontrar_ml(data, A, tipo=1, m=1.5):
"""Econtrar los mínimos locales de una serie (en principio de caudal).
Entradas:
---------
data: series. Serie temporal (en principio de caudal)
A: float. Área de la cuenca hidrográfica de la estación de aforo (km²)
tipo: integer. Método empleado para la búsqueda de los mínimos locales: 1, método de los mínimos locales; 2, del tipo 1 se le eliminan los mínimos locales con un caudal superior a 'm' veces el caudal mínimo; 3, al tipo 2 se le añaden mínimos intermedios; 4, al tipo 1 se le eliminan mínimos locales que superan 'm' veces el caudal medio entre los mínimos locales adyacentes
m: float. Factor multiplicador del caudal mínimo utilizado en los tipos 2 y 3 para reducir el número de mínimos locales
Salidas:
--------
valles: list. Fechas en las que acontece un mínimo local en la serie"""
# Calcular el número de días a observar antes y después
N = int(round(0.8 * A**0.2))
# Iniciar la serie de mínimos locales con el primer intervalo (sea o no mínimo local)
valles1 = [data.index[0]]
# Encontrar mínimos locales en la serie de caudal
for t in data.index[N:-N]:
if data[t] == data[t-N:t+N].min():
valles1.append(t)
# Añadir el último punto de la serie
valles1.append(data.index[-1])
# TIPO 1
# ------
if tipo == 1:
return valles1
# TIPO 2|3
# --------
elif (tipo == 2) or (tipo == 3):
# reducir el número de mínimos locales
valles2 = [valles1[0]]
for i, valle in enumerate(valles1[1:-1]):
if data[valle] <= m * data.min():
valles2.append(valle)
valles2.append(valles1[-1])
# Calcular el caudal base mediante 'mml'
Qslow, Qquick = mml(data, valles2, plot=False)
# TIPO 2
# ------
if tipo == 2:
return valles2
# TIPO 3
# ------
else:
# agregar mínimos sobre 'valles2'
valles_add = []
for i in range(len(valles2) - 1): # intervalos de 'valles2'
# buscar mínimos locales (de 'valles') dentro del intervalo
l1, l2 = valles2[i], valles2[i+1]
laux = []
for valle in valles1:
if (valle > l1) and (valle < l2):
laux.append(valle)
# seleccionar mínimos a añadir
if len(laux) == 0:
continue
else:
for j in range(1, len(laux) - 1):
a0, a1, a2 = laux[j-1], laux[j], laux[j+1]
# añadir el punto 'a1' si su caudal real es menor que la recta de interpolación desagregada
if (data[a1] <= Qslow[a1]):
valles_add.append(a1)
continue
# añadir mínimos dentro del intervalo
if (data[a1] < data[a0]) and (data[a1] < data[a2]):
valles_add.append(a1)
# unir los nuevos mínimos a los mínimos reducidos
valles3 = valles2 + valles_add
valles3.sort()
return valles3
if tipo == 4:
valles4 = valles1.copy()
i = 1
while i < len(valles4) - 1:
v0, v1, v2 = valles4[i-1], valles4[i], valles4[i+1]
if data[v1] > m * np.mean((data[v0], data[v2])):
valles4.remove(v1)
else:
i +=1
return valles4
# In[5]:
def key_points(data, serie='Q', k=1e6, dQ_threshold=0.25, d2Q_threshold=0.1, window=8, plot=True):
"""Identifica los valles (inicio de un evento de escorrentía superficial) y los picos (caudal máximo de dicho evento) en una serie de caudal. Se considera valle al primer intervalo temporal con una pendiente del hidrograma superior a 'dQ_threshold' o una curvatura superior a 'd2Q_threshold'. Se considera un intervalo como pico si la pendiente en los 'window' intervalos anteriores es positiva o nula y en los 'window' intervalos posteriores es negativa.
Parámetros:
-----------
data: data frame. Las filas representan pasos temporales y habrá al menos cuatro campos: 'serie' contiene los datos de caudal (m³/s), 'dQ' contiene la pendiente del hidrograma(m³/s²), 'm' contiene el signo de la pendiente del hidrograma (-), y 'd2Q' con la curvatura del hidrograma (m³/s³).
serie: string. Nombre del campo que contiene la serie de caudal
dQ_threshold: float. Valor mínimo de la pendiente del hidrograma a partir del cual se considera que empieza el evento de escorrentía directa.
d2Q_threshold: float. Valor mínimo de la curvatura del hidrograma a partirl del cual se ocnsidera que empieza el evento de escorrentía directa.
window: integer. Número de pasos temporales que se tendrán en cuenta para encontrar los valles y los picos
plot:
Salidas:
--------
valleys: lista. Fechas en las que aparecen los valles.
peaks: lista. Fechas en las que ocurren los picos"""
# Listas donde se guardan las fechas de los puntos clave
valleys, peaks, recess, inflex = [], [], [], []
# intervalo temporal en segundos
At = (data.index[1] - data.index[0]).total_seconds()
# Identificar valles y picos, es decir, eventos de escorrentía directa
# --------------------------------------------------------------------
# Condición de los valles
mask = (data.dQ > dQ_threshold) | (data.d2Q > d2Q_threshold)
i = 0
while i <= data.shape[0] - window:
# Inicio del hidrograma de escorrentía directa
tv = data.iloc[i:].loc[mask].first_valid_index()
i = data.index.get_loc(tv)
if (data.m[i:i+window] >= 0).all(): # al menos 'window' intervalos tienen pendiente creciente
valleys.append(tv)
# Pico del hidrograma de escorrentía directa
for i in range(i, data.shape[0]):
if (data.m[i+1-window:i+1] >= 0).all() & (data.m[i+1:i+1+window] == -1).all():
tp = data.index[i]
Qp = data.Q[tp]
if Qp < data.loc[tv:tp+window, 'Q'].max():
tp = data.loc[tv:tp+window, 'Q'].idxmax()
peaks.append(tp)
#print('tp =', tp)
break
i += 1
# Identificar puntos de recesión e inflexión
# ------------------------------------------
for i, (tv, tp) in enumerate(zip(valleys, peaks)):
# 'tr' es el punto final del flujo rápido. Es el primer intervalo a partir del cual la pendiente de la curva
# ln(Q()/t es menor que -1/k en al menos 'w' intervalos posteriores
w = 8
# Punto final sobre el que buscar el receso
if i < len(valleys) - 1:
tf = valleys[i+1]
else:
tf = data.index[-w]
# Encontrar el 'tr' preliminar
for tr in data[tp:tf].index:
if (log(data.Q[tr+w]) - log(data.Q[tr])) / (w * At) <= -k**-1:
break
# Calcular la curva de recesión retroactivamente desde 'tr'
Qr = data.loc[tr, 'Q']
data['Qrec'] = np.nan
for t in data[tp:tr].index:
data.loc[t, 'Qrec'] = Qt(Qr, tr, t, k)
# establecer un nuevo 'tr' si se cruzan el caudal y la curva de recesión
tr_aux = data.loc[data.Qrec > data.Q, :].first_valid_index()
if tr_aux != None:
tr = tr_aux
del tr_aux
# Guardar 'tr' en la lista
recess.append(tr)
# 'ti' es el punto de inflexión de la curva descendente.
# se hace una media móvil sobre la segunda derivada del caudal para suavizar la curva
data['d2Qma'] = np.nan
for t in data[tp:tr].index:
data.loc[t, 'd2Qma'] = data.d2Q[t-3:t+3].mean()
# el punto de inflexión es el del primer valor positivo o nulo
ti = data[data['d2Qma'] >= 0].first_valid_index() # ARREGLAR ES EL PASO ANTERIOR!!!!
if ti == None:
ti = tp + (tr - tp) / 2
# Guardar 'ti' en la lista
inflex.append(ti)
# Visualizar
if plot == True:
print('Nº de valles:', len(valleys), '\tNº de picos:', len(peaks), '\t\tNº de inflexiones:', len(inflex),
'\t\tNº de recesiones:', len(recess))
plt.figure(figsize=(18,3))
plt.fill_between(data.index, data.Q, alpha=0.3, label='hidrograma')
plt.scatter(valleys, data.loc[valleys, 'Q'], color='green', s=10, label='valles')
plt.scatter(peaks, data.loc[peaks, 'Q'], color='maroon', s=10, label='picos')
plt.scatter(recess, data.loc[recess, 'Q'], marker='x', color='black', s=10, label='inflexión')
plt.scatter(inflex, data.loc[inflex, 'Q'], marker='X', color='grey', s=10, label='recesión')
plt.ylabel('Q (m³/s)', fontsize=13)
ymax = ceil(data.Q.max() / 10) * 10
plt.xlim((data.index[0], data.index[-1]))
plt.ylim((0,ymax))
plt.legend(fontsize=12);
# Crear el diccionario de salida
key_points = {'valley': valleys, 'peak': peaks, 'recession': recess, 'inflexion': inflex}
return key_points
# In[7]:
def key_points2(data, serie='Q', k=1e6, dQ_threshold=0.1, d2Q_threshold=1, multiple=None, fraction=4, window=8,
plot=True):
"""Identifica los valles (inicio de un evento de escorrentía superficial) y los picos (caudal máximo de dicho evento) en una serie de caudal. Se considera valle al primer intervalo temporal con una pendiente del hidrograma superior a 'dQ_threshold' o una curvatura superior a 'd2Q_threshold'. Se considera un intervalo como pico si la pendiente en los 'window' intervalos anteriores es positiva o nula y en los 'window' intervalos posteriores es negativa.
Parámetros:
-----------
data: data frame. Las filas representan pasos temporales y habrá al menos cuatro campos: 'serie' contiene
los datos de caudal (m³/s), 'dQ' contiene la pendiente del hidrograma(m³/s²), 'm' contiene el signo
de la pendiente del hidrograma (-), y 'd2Q' con la curvatura del hidrograma (m³/s³).
serie: string. Nombre del campo que contiene la serie de caudal
k: float. Constante de recesión de caudal (s)
dQ_threshold: float. Valor mínimo de la pendiente del hidrograma a partir del cual se considera que empieza el evento de
escorrentía directa.
d2Q_threshold: float. Valor mínimo de la curvatura del hidrograma a partirl del cual se ocnsidera que empieza el evento
de escorrentía directa.
multiple: float. Tanto por uno aplicado sobre el caudal en un valle para que otro punto pueda ser considerado valle
fraction: integer. Fracción utilizada para aceptar un punto como inicio de la recesión. Un punto no se aceptará como
tal si su caudal es superior a 'fraction' entre el pico y el valle.
window: integer. Número de pasos temporales que se tendrán en cuenta para encontrar los valles y los picos
plot: boolean. Si se quiere mostrar el hidrograma con los puntos
Salidas:
--------
key_points: dictionary. Contiene cuatro listas con los puntos clave del hidrograma de escorrentía directa:
'valley' contiene la lista de los puntos de inicio de cada evento
'peak' contiene la lista con los picos de cada evento
'recession' contiene la lista con los puntos finales de cada evento
'inflexion0 contiene la lista con los puntos de inflexión de la rama descendente de cada evento'"""
# Listas donde se guardan las fechas de los puntos clave
valleys, peaks, recess, inflex = [], [], [], []
# Campos que hace falta crear
data['Qrec'], data['d2Qma'] = np.nan, np.nan
# se hace una media móvil sobre la segunda derivada del caudal para suavizar la curva
data.d2Qma = data.d2Q.rolling(window=window).mean()
# intervalo temporal en segundos
At = (data.index[1] - data.index[0]).total_seconds()
delta = datetime.timedelta(seconds=At)
if At == 300:
freq = '5min'
elif At == 3600:
freq = 'H'
elif At == 86400:
freq = 'D'
# Identificar los inicios del hidrograma de escorrentía directa
# -------------------------------------------------------------
# Puntos que cumplen las condiciones de primera y segunda derivada
mask = ((data.dQ > dQ_threshold) | (data.d2Q > d2Q_threshold))
date = data.index[0]
aux1 = []
while date <= data.index[-1] - window:
# Inicio del hidrograma de escorrentía directa
if ((data.dQ[date] > dQ_threshold) | (data.d2Q[date] > d2Q_threshold)):
aux1.append(date)
date += window
else:
date += 1
valleys = aux1
# De los puntos anteriores, cuáles tienen pendiente positiva en 'window' intervalos posteriores
aux2 = []
for valley in valleys:
dates = pd.date_range(valley, periods=window, freq=freq)
if (data.m[dates] >= 0).all():
aux2.append(valley)
aux2.append(data.index[-1])
valleys = aux2
# De los anteriores, eliminar aquellos que no tengan una pendiente menor que un umbral durante
aux3 = []
for valley in valleys:
dates = pd.date_range(valley - window, periods=window, freq=freq)
if (abs(data.dQ[dates]) < dQ_threshold).all():
aux3.append(valley)
valleys = aux3
# De los puntos anteriores, eliminar aquellos que no tengan un pico entre él y el anterior
for v1, v2 in zip(valleys[:-2], valleys[1:]):
if data[serie][v1:v2].max() <= max(data[serie][v1], data[serie][v2]):
valleys.remove(v2)
#valleys = aux2
# De los anteriores, eliminar aquellos cuyo caudal sea superior en un 15% al del punto anterio
if multiple != None:
aux4 = [valleys[0]]
for i, v1 in enumerate(valleys[:-1]):
if v1 < aux4[-1]: # saltar si v1 ya fue eliminado
continue
for v2 in valleys[i+1:]:
if data[serie][v2] < data[serie][v1] * multiple:
aux4.append(v2)
break
valleys = aux4
# Identificar los picos del hidrograma de escorrentía directa
# -----------------------------------------------------------
aux = valleys + [data.index[-1]]
for i, (v1, v2) in enumerate(zip(aux[:-2], aux[1:])):
peaks.append(data[serie][v1:v2].idxmax())
# Eliminar última entrada de 'valleys' por ser simplemente el punto final de la serie
valleys = valleys[:-1]
# Identificar puntos de recesión e inflexión
# ------------------------------------------
for i, tp in enumerate(peaks):
# Buscar el último pico del hidrograma de escorrentía superficial
# encontrar el índice en fecha de todos los picos
#if i + 1 < len(valleys):
#tv1, tv2 = valleys[i], valleys[i+1]
#else:
#tv1, tv2 = valleys[i], data.index[-1]
#idx = np.argwhere(np.diff(np.sign(data.dQ[tv1:tv2]), n=1) != 0).reshape(-1)
#idx = data[tv1:tv2].index[idx]
# límite de caudal para encontrar picos
#Qlim = np.percentile(data.loc[idx, 'Q'], 75) # percentil 75 de los picos
#Qlim = data.loc[idx, 'Q'].mean() # media de los picos
#tp = data.loc[idx,:].loc[data.Q > Qlim, :].index[-1]
# Punto final sobre el que buscar el receso
if i + 1 < len(valleys):
tf = valleys[i+1]
elif i + 1 == len(valleys):
tf = data.index[-1]
# Encontrar el 'tr' preliminar
tr = tp
# 'tr' es el punto final del flujo rápido. Es el primer intervalo a partir del cual la pendiente de la
# curva ln(Q)/t es menor que -1/k en al menos 'w' intervalos posteriores
#for tr in data[tp:tf].index:
#if (log(data.Q[tr+window]) - log(data.Q[tr])) / (window * At) <= -k**-1:
#break
while data[serie][tr] > (data[serie][tf] + (data[serie][tp] - data[serie][tf]) / fraction):
# Calcular la curva de recesión retroactivamente desde 'tf'
Qf = data[serie][tf]
for t in data[tp:tf].index:
data.loc[t, 'Qrec'] = Qt(Qf, tf, t, k)
# establecer 'tr' en la intersección del hidrograma y la curva de recesión
try:
# Puntos de intersección entre el hidrograma y la curva de recesión
idx = np.argwhere(np.diff(np.sign(data[serie][tp:tf] - data.Qrec[tp:tf])) != 0).reshape(-1)
# Se toma 'tr' como el penúltimo punto de intersección
tr = data[tp:tf].index[idx[-2]]
#tr = data.loc[tp:tf,:].loc[data.Q <= data.Qrec].index[-2]#.first_valid_index()
tf = tr + ((tf - tr) / 2).round(freq)
except:
tf = tp + ((tf - tp) / 2).round(freq)
# Guardar 'tr' en la lista
recess.append(tr)
# 'ti' es el punto de inflexión de la curva descendente.
# el punto de inflexión es el del primer valor positivo o nulo de 'd2Qma'
try:
# Encontrar puntos de inflexión del hidrograma que pasan de curvatura - a + entre 'tp' y 'tr'
idx = np.argwhere(np.diff(np.sign(data.d2Qma[tp:tr]), n=1) > 0).reshape(-1)
# Se toma 'ti' como el último de dichos puntos
ti = data[tp:tr].index[idx[-1]]
#ti = data.loc[tp:tr,:].loc[data['d2Qma'] >= 0].index[0] # ARREGLAR ES EL PASO ANTERIOR!!!!
except:
ti = tp + (tr - tp) / 2
# Guardar 'ti' en la lista
inflex.append(ti)
# Visualizar
# ----------
if plot == True:
print('Nº de valles:', len(valleys), '\tNº de picos:', len(peaks), '\t\tNº de inflexiones:', len(inflex),
'\t\tNº de recesiones:', len(recess))
plt.figure(figsize=(18,3))
plt.fill_between(data.index, data[serie], alpha=0.3, label=serie)
plt.plot(data.Qrec, '--k', linewidth=1)
plt.scatter(valleys, data.loc[valleys, serie], color='green', s=10, label='valles')
plt.scatter(peaks, data.loc[peaks, serie], color='maroon', s=10, label='picos')
plt.scatter(recess, data.loc[recess, serie], marker='x', color='black', s=10, label='recesión')
plt.scatter(inflex, data.loc[inflex, serie], marker='X', color='grey', s=10, label='inflexión')
plt.ylabel('Q (m³/s)', fontsize=13)
ymax = ceil(data[serie].max() / 10) * 10
plt.xlim((data.index[0], data.index[-1]))
plt.ylim((0,ymax))
plt.legend(fontsize=12);
# Crear el diccionario de salida
key_points = {'valley': valleys, 'peak': peaks, 'recession': recess, 'inflexion': inflex}
return key_points
# In[ ]:
def key_points3(data, k, dQ_threshold=0.1, d2Q_threshold=1, window=8, plot=True):
"""Identifica los valles (inicio de un evento de escorrentía superficial) y los picos (caudal máximo de dicho evento)
en una serie de caudal. Se considera valle al primer intervalo temporal con una pendiente del hidrograma superior a
'dQ_threshold' o una curvatura superior a 'd2Q_threshold'. Se considera un intervalo como pico si la pendiente en los
'window' intervalos anteriores es positiva o nula y en los 'window' intervalos posteriores es negativa.
Parámetros:
-----------
data: data frame. Las filas representan pasos temporales y habrá al menos cuatro campos: 'Q' con los datos de
caudal (m³/s), 'dQ' con la pendiente del hidrograma(m³/s²), 'm' con el signo de la pendiente del hidrograma
(-), y 'd2Q' con la curvatura del hidrograma (m³/s³)
dQ_threshold: float. Valor mínimo de la pendiente del hidrograma a partir del cual se considera que empieza el evento de
escorrentía directa.
d2Q_threshold: float. Valor mínimo de la curvatura del hidrograma a partirl del cual se ocnsidera que empieza el evento
de escorrentía directa.
window: integer. Número de pasos temporales que se tendrán en cuenta para encontrar los valles y los picos
Salidas:
--------
valleys: lista. Fechas en las que aparecen los valles.
peaks: lista. Fechas en las que ocurren los picos"""
# Listas donde se guardan las fechas de los puntos clave
valleys, peaks, recess, inflex = [], [], [], []
# Campos que hace falta crear
data['Qrec'], data['d2Qma'] = np.nan, np.nan
# intervalo temporal en segundos
At = (data.index[1] - data.index[0]).total_seconds()
delta = datetime.timedelta(seconds=At)
if At == 300:
freq = '5min'
elif At == 3600:
freq = 'H'
elif At == 86400:
freq = 'D'
# Identificar los inicios del hidrograma de escorrentía directa
# -------------------------------------------------------------
aux = []
for date in data.index[window:-window]:
#print(date-window, date-1, date, date+1, date+window)
if (data.Q[date] <= data.Q[date-window:date-1]).all() & (data.Q[date] <= data.Q[date+1:date+window]).all():
#print(data.Q[date-window:date-1].min(), data.Q[date], data.Q[date+1:date+window].min())
aux.append(date)
# De los puntos anteriores, eliminar aquellos que no tengan un pico entre él y el anterior
for v1, v2 in zip(aux[:-1], aux[1:]):
if data.Q[v1:v2].max() <= max(data.Q[v1], data.Q[v2]):
aux.remove(v1)
valleys = aux
# Visualizar
# ----------
if plot == True:
print('Nº de valles:', len(valleys), '\tNº de picos:', len(peaks), '\t\tNº de inflexiones:', len(inflex),
'\t\tNº de recesiones:', len(recess))
plt.figure(figsize=(18,3))
plt.fill_between(data.index, data.Q, alpha=0.3, label='hidrograma')
#plt.plot(data.Qrec, '--k', linewidth=1)
plt.scatter(valleys, data.loc[valleys, 'Q'], color='green', s=10, label='valles')
#plt.scatter(peaks, data.loc[peaks, 'Q'], color='maroon', s=10, label='picos')
#plt.scatter(recess, data.loc[recess, 'Q'], marker='x', color='black', s=10, label='recesión')
#plt.scatter(inflex, data.loc[inflex, 'Q'], marker='X', color='grey', s=10, label='inflexión')
plt.ylabel('Q (m³/s)', fontsize=13)
ymax = ceil(data.Q.max() / 10) * 10
plt.xlim((data.index[0], data.index[-1]))
plt.ylim((0,ymax))
plt.legend(fontsize=12);
# Crear el diccionario de salida
key_points = {'valley': valleys, 'peak': peaks, 'recession': recess, 'inflexion': inflex}
return key_points
# ### 3.2. Métodos de desagregación
# In[1]:
def mml_old(data, A, plot=True, plot_dot=False, xlim=None, title=None, label=None):
"""Desagrega la serie de caudal aforado en río en una serie de caudal rápido (o escorrentía superficial) y una serie de
caudal lento (o caudal base). Para ello utiliza el método de los mínimos locales.
Este método identifica primeramente los mínimos locales como los puntos con caudal mínimo en una ventana centrada de
ancho 2N+1. El caudal base es la interpolación lineal entre los mínimos locales. La escorrentía superficial es la dife-
rencia entre el caudal aforado y el caudal base.
Parámetros:
-----------
data: series. Serie de caudal
A: float. Área de la cuenca hidrográfica de la estación de aforo (km²)
plot: boolean. Si se quiere mostrar el hidrograma desagregado
plot_dot: boolena. Si se quieren mostrar en el hidrograma los mínimos locales
xlim: list. Fechas de inicio y fin de la gráfica del hidrograma
title: string. Título del gráfico. None por defecto.
Salidas:
--------
data: data frame. El 'data frame' de entrada con dos nuevas columnas: 'Qslow' y 'Qquick'
Si plot == True, se mostrará el hidrograma desagregado"""
Qslow = pd.Series(index=data.index)
Qquick = pd.Series(index=data.index)
# Calcular el número de días a observar antes y después
N = int(round(0.8* A**0.2))
# 'Data frame' con los intervalos que representan un mínimo local
# Iniciar la serie de mínimos locales con el primer intervalo (sea o no mínimo local)
lows = [data.index[0]]
for t in data.index[N:-N]:
Qt = data[t]
if Qt == data[t-N:t+N].min():
lows.append(t)
# Añadir el último punto de la serie
lows.append(data.index[-1])
# Calcular la serie de caudal base
for i in range(len(lows) - 1):
v1, v2 = lows[i], lows[i+1]
Q1, Q2 = data[v1], data[v2]
for t in data[v1:v2].index:
Qslow[t] = min(Q1 + (Q2 - Q1) * (t - v1) / (v2 - v1), data[t])
# Calcular la serie de caudal lento
Qquick = data - Qslow
if plot == True:
# Visualizar
plt.figure(figsize=(18,3))
plt.plot(data, '-k', linewidth=0.5, label=label)
if plot_dot == True:
plt.scatter(lows.index, lows[serie], color='black', s=8, label='mín. local')
plt.fill_between(data.index, Qslow, alpha=0.3, label='slow flow')
plt.fill_between(data.index, Qquick + Qslow, Qslow, alpha=0.3, label='quick flow')
if xlim == None:
plt.xlim((data.index[0], data.index[-1]))
ymax = ceil(data.max() / 10) * 10
else:
plt.xlim(xlim)
ymax = ceil(data.loc[xlim[0]:xlim[1], serie].max() / 10) * 10
plt.ylim((0,ymax))
plt.ylabel('Q (m³/s)', fontsize=13)
plt.title(title, fontsize=14)
plt.title('Método de los mínimos locales', fontsize=13)
plt.legend(fontsize=13);
return lows, Qslow, Qquick
# In[4]:
def mml(data, valles, factor=1, plot=False, plot_dot=False, xlim=None, title=None, label=None):
"""Desagrega la serie de caudal total en una serie de caudal rápido (escorrentía superficial + interflujo) y una serie de caudal lento (ocaudal base).
Se utiliza el método de los mínimos locales. Este método identifica primeramente los mínimos locales como los puntos con caudal mínimo en una ventana centrada de ancho 2N+1. El caudal base es la interpolación lineal entre los mínimos locales. La escorrentía superficial es la diferencia entre el caudal aforado y el caudal base.
Parámetros:
-----------
data: series. Serie de caudal total
valles: list. Fechas en las que acontece un mínimo local en la serie
plot: boolean. Si se quiere mostrar el hidrograma desagregado
plot_dot: boolena. Si se quieren mostrar en el hidrograma los mínimos locales
xlim: list. Fechas de inicio y fin de la gráfica del hidrograma
title: string. Título del gráfico. None por defecto.
Salidas:
--------
Qslow: series. Serie de caudal lento
Qquick: series. Serie de caudal rápido
Si plot == True, se mostrará el hidrograma desagregado"""
Qslow = pd.Series(index=data.index)
Qquick = pd.Series(index=data.index)
# Calcular la serie de caudal base
for i in range(len(valles) - 1):
v1, v2 = valles[i], valles[i+1]
Q1, Q2 = data[v1] * factor, data[v2] * factor
for t in data[v1:v2].index:
Qslow[t] = min(Q1 + (Q2 - Q1) * (t - v1) / (v2 - v1), data[t])
# Calcular la serie de caudal lento
Qquick = data - Qslow
if plot == True:
# Visualizar
plt.figure(figsize=(18,3))
plt.plot(data, '-k', linewidth=0.5, label=label)
if plot_dot == True:
plt.scatter(valles, data[valles], color='black', s=8,
label='mín. local')
plt.fill_between(data.index, Qslow, color='steelblue', alpha=0.25,
label='slow flow')
plt.fill_between(data.index, Qquick + Qslow, Qslow, color='steelblue',
alpha=0.5, label='quick flow')
if xlim == None:
plt.xlim((data.index[0], data.index[-1]))
ymax = ceil(data.max() / 10) * 10
else:
plt.xlim(xlim)
ymax = ceil(data.loc[xlim[0]:xlim[1]].max() / 10) * 10
plt.ylim((0,ymax))
plt.ylabel('Q (m³/s)', fontsize=13)
plt.title(title, fontsize=14)
plt.title('Método de los mínimos locales', fontsize=13)
plt.legend(fontsize=13);
return Qslow, Qquick
# In[12]:
def mlr(data, valley, A, recession=None, plot=True, xlim=None):
"""Se desagrega el hidrograma de un evento de inundación en caudal rápido y lento por medio del método de la línea
recta.
Parámetros:
-----------
data: series. Serie temporal con el caudal aforado
valley: list of datetime. Valor del índice de 'data' para los puntos de inicio del hidrograma de escorrentía
superficial.
A: integer. Área de la cuenca vertiente al punto (km²)
recession: list of datetime. Valor del índice de 'data' para los puntos finales del hidrograma de escorrentía
superficial.
plot: boolean. Si se quiere mostrar el hidrograma desagregado
xlim: list. Dos valores de las fechas inicial y final del hidrograma a mostrar. Sólo si plot==True
Salidas:
--------
Qslow: series. Serie temporal de caudal lento
Qquick: series. Serie temporal de caudal rápido"""
# Crear las series de caudal lento y rápido
Qslow = pd.Series(index=data.index)
Qquick = pd.Series(index=data.index)
for i, tv in enumerate(valley): # 'tv' es el paso en el que empieza el flujo rápido
if recession != None: # si se suministra el fin del hidrograma de escorrentía directa
tr = recession[i]
Qv, Qr = data[tv], data[tr]
# Calcular la serie de caudal lento
# Entre el valle y la recesión
for t in data[tv:tr].index[1:]:
Qt = Qv + (t - tv) / (tr - tv) * (Qr - Qv)
Qslow[t] = min(Qt, data[t])
# Entre la recesión y el siguiente valle
if i + 1 < len(valley):
tv2 = valley[i+1]
else:
tv2 = data.index[-1]
aux = mml(data[tr:tv2].copy(), A, serie=serie, plot=False)
Qslow[tr:tv2] = aux.Qslow[tr:tv2]
else:
# 'tv2' es el siguiente valle o el fin de la serie
if i < len(valley) - 1:
tv2 = valley[i+1]
else:
tv2 = data.index[-1]
# paso en el que vuelve la recesión del caudal base
mask = (data.index > tv) & (data <= data[tv])
tr = data[mask].first_valid_index()
if (tr == None):# | (tr > tv2):
tr = tv2
# calcular la serie de caudal lento
Qslow[tv:tr] = data[tv]
# calcaular la serie de caudal rápido
Qquick = data - Qslow
Qquick[Qquick < 0] = np.nan
if plot == True:
# Visualizar
plt.figure(figsize=(18,3))
plt.plot(data, '-k', linewidth=0.5, label='total flow')
plt.fill_between(data.index, Qslow, alpha=0.3, label='slow flow')
plt.fill_between(data.index, Qquick + Qslow, Qslow, alpha=0.3, label='quick flow')
if xlim == None:
plt.xlim((data.index[0], data.index[-1]))
else:
plt.xlim(xlim)
ymax = ceil(data.max() / 10) * 10
plt.ylim((0,ymax))
plt.ylabel('Q (m³/s)', fontsize=13)
plt.title('Método de la línea recta', fontsize=13)
plt.legend(fontsize=13);
return Qslow, Qquick
# In[16]:
def mbf_old(data, valley, peak, k, A, base=None, recession=None, plot=True, xlim=None):
"""Se desagrega el hidrograma de un evento de inundación en caudal rápido y lento por medio del método de la base fija.
Parámetros:
-----------
data: series. Serie temporal de caudal aforado
valley: integer. Fila de 'data' en la que se inicia el hidrograma de escorrentía superficial
peak: integer. Fila de 'data' en la que ocurre el pico del hidrograma
k: float. Constante de deacimiento de la curva de recesión (s)
A: integer. Área de la cuenca vertiente al punto (km²)
serie: string. Nombre del campo que contiene la serie a desagregar
base: integer. Número de pasos temporales entre el pico la desaparición del flujo rápido
recession: list of datetime. Valor del índice de 'data' para los puntos finales del hidrograma de escorrentía
superficial.
plot: boolean. Si se quiere mostrar el hidrograma desagregado
xlim: list. Dos valores de las fechas inicial y final del hidrograma a mostrar. Sólo si plot==True
Salidas:
--------
Qslow: series. Serie temporal de caudal lento
Qquick: series. Serie temporal de caudal rápido"""
# Crear las series de caudal lento y rápido
Qslow = data
Qquick = np.nan
for i, (tv, tp) in enumerate(zip(valley, peak)):
# Puntos clave del hidrograma
# 'tv' es el paso en el que empieza el flujo rápido
# 'tp' es el paso en el que ocurre el pico de caudal
# 'tr' es el paso en el que desaparece el flujo rápido
if recession == None:
tr = tp + base
else:
tr = recession[i]
print(tv, tp, tr)
# CALCULAR LA SERIE DE FLUJO BASE
# entre el inicio y el pico
for t in data[tv:tp].index:
Q = Qt(data[tv], tv, t, k)
Qslow[t] = min(Q, data[t])
# entre el pico y el fin
#for t in data[tp:tr].index[1:]:
#Q = Qslow[tp] + (data[tr] - Qslow[tp]) * (t - tp) / (tr - tp)
#Qslow[t] = min(Q, data[t])
# Entre el fin y el siguiente inicio
#if i + 1 < len(valley):
#tv2 = valley[i+1]
#else:
#tv2 = data.index[-1]
#if tr > tv2:
#tr = tv2
#l, Qslow[tr:tv2], quick = mml(data[tr:tv2], A, plot=False)
##Qslow[tr:tv2] = slow[tr:tv2]
# calcular la serie de flujo rápido
Qquick = data - Qslow
Qquick[Qquick < 0] = np.nan
if plot == True:
# Visualizar
plt.figure(figsize=(18,3))
plt.plot(data, '-k', linewidth=0.5, label='total flow')
plt.fill_between(data.index, Qslow, alpha=0.3, label='slow flow')
plt.fill_between(data.index, Qquick + Qslow, Qslow, alpha=0.3, label='quick flow')
if xlim == None:
plt.xlim((data.index[0], data.index[-1]))
else:
plt.xlim(xlim)
ymax = ceil(data.max() / 10) * 10
plt.ylim((0,ymax))
plt.ylabel('Q (m³/s)', fontsize=13)
plt.title('Método de la base fija', fontsize=13)
plt.legend(fontsize=13);
return Qslow, Qquick
# In[3]:
def mbf(data, valles, k, factor=1, plot=False, plot_dot=False, xlim=None, title=None):
""""""
# Crear las series de caudal lento y rápido
Qslow = pd.Series(data=None, index=data.index)
Qquick = pd.Series(data=None, index=data.index)
# Calcular picos
picos = []
for l1, l2 in zip(valles[:-1], valles[1:]):
picos.append(data[l1:l2].argmax())
# CALCULAR LA SERIE DE FLUJO BASE
for i, (tv, tp) in enumerate(zip(valles, picos)):
# Puntos clave del hidrograma
# 'tv': paso en el que empieza el flujo rápido
# 'tp': paso en el que ocurre el pico de caudal
# 'tr': paso en el que desaparece el flujo rápido
tr = valles[i+1]
# Qslow entre el pico y el fin
for t in pd.date_range(tp, tr):
Q = Qt(data[tr] * factor, tr, t, k)
Qslow[t] = min(Q, data[t])
# Qslow entre el inicio y el pico
for t in pd.date_range(tv, tp):
Q = data[tv] * factor + (t - tv) * (Qslow[tp] - data[tv] * factor) / (tp - tv)
Qslow[t] = min(Q, data[t])
# CALCULAR LA SERIE DE FLUJO RÁPIDO
Qquick = data - Qslow
Qquick[Qquick < 0] = np.nan
if plot == True:
# Visualizar
plt.figure(figsize=(18,3))
plt.plot(data, '-k', linewidth=0.5, label='aforado')
if plot_dot == True:
plt.scatter(valles.index, valles[serie], color='black', s=8, label='mín. local')
plt.fill_between(data.index, Qslow, color='steelblue', alpha=0.25, label='lento')
plt.fill_between(data.index, Qquick + Qslow, Qslow, color='steelblue', alpha=0.5, label='rápido')
if xlim == None:
plt.xlim((data.index[0], data.index[-1]))
ymax = ceil(data.max() / 10) * 10
else:
plt.xlim(xlim)
ymax = ceil(data.loc[xlim[0]:xlim[1], serie].max() / 10) * 10
plt.ylim((0,ymax))
plt.ylabel('Q (m³/s)', fontsize=13)
plt.title(title, fontsize=13)
plt.title('Método de los mínimos locales', fontsize=13)
plt.legend(fontsize=13);
return Qslow, Qquick
# In[14]:
def mpv(data, valley, peak, inflexion, recession, k, A, plot=True, xlim=None, w=8):
"""Se desagrega el hidrograma de un evento de inundación en caudal rápido y lento por medio del método de la pendiente
variable.
Parámetros:
-----------
data: series. Serie temporal de caudal aforado
valley: integer. Fila de 'data' en la que se inicia el hidrograma de escorrentía superficial
peak: integer. Fila de 'data' en la que ocurre el pico del hidrograma
inflexion: list of datetime. Valor del índice de 'data' para los puntos de inflexión del hidrograma de
escorrentía superficial.
recession: list of datetime. Valor del índice de 'data' para los puntos finales del hidrograma de escorrentía
superficial.
k: float. Constante de deacimiento de la curva de recesión (s)
A: integer. Área de la cuenca vertiente al punto (km²)
serie: string. Nombre del campo que contiene la serie a desagregar
plot: boolean. Si se quiere mostrar el hidrograma desagregado
xlim: list. Dos valores de las fechas inicial y final del hidrograma a mostrar. Sólo si plot==True
w: integer. Nº de intervalos en los que se debe cumplir la regla de la curva de recesión.
Salidas:
--------
Qslow: series. Serie temporal de caudal lento
Qquick: series. Serie temporal de caudal rápido"""
# Crear las series de caudal lento y rápido
Qslow = data
Qquick = np.nan
At = (data.index[1] - data.index[0]).total_seconds() # intervalo temporal en segundos
# Generar la serie de flujo base
# ------------------------------
for i, (tv, tp, ti, tr) in enumerate(zip(valley, peak, inflexion, recession)):
# Caudal en los puntos clave
Qv = data[tv]
Qp = Qt(Qv, tv, tp, k)
Qr = data[tr]
Qi = Qt(Qr, tr, ti, k)
# Serie de caudal base
for t in data[tv:tp].index: # entre el inicio y el pico
Q = Qt(Qv, tv, t, k)
Qslow[t] = min(Q, data[t])
for t in data[tp:ti].index[1:]: # entre el pico y la inflexión
Q = Qp + (Qi - Qp) * (t - tp) / (ti - tp)
Qslow[t] = min(Q, data[t])
for t in data[ti:tr].index: # entre la inflexión y el fin
Q = Qt(Qi, ti, t, k)
Qslow[t] = min(Q, data[t])
# Entre el fin y el siguiente inicio
if i + 1 < len(valley):
tv2 = valley[i+1]
else:
tv2 = data.index[-1]
aux = mml(data[tr:tv2].copy(), A, serie=serie, plot=False)
Qslow[tr:tv2] = aux.Qslow[tr:tv2]
# Generar la serie de flujo rápido
# --------------------------------
Qquick = data - Qslow
Qquick[Qquick < 0] = np.nan
if plot == True:
# Visualizar
plt.figure(figsize=(18,3))
plt.plot(data, '-k', linewidth=0.5, label=serie)
plt.fill_between(data.index, Qslow, alpha=0.3, label='slow flow')
plt.fill_between(data.index, Qquick + Qslow, Qslow, alpha=0.3, label='quick flow')
if xlim == None:
plt.xlim((data.index[0], data.index[-1]))
else:
plt.xlim(xlim)
ymax = ceil(data.max() / 10) * 10
plt.ylim((0,ymax))
plt.ylabel('Q (m³/s)', fontsize=13)
plt.title('Método de la pendiente variable', fontsize=13)
plt.legend(fontsize=13);
return Qquick, Qslow
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.