text stringlengths 38 1.54M |
|---|
import requests, json
# a Python dictionary that will be turned into a JSON object
resourceParams = {
'restype_id': 'http://www.knora.org/ontology/anything#ThingPicture',
'properties': {
},
'label': "Zuerich",
'project_id': 'http://data.knora.org/projects/anything'
}
# the name of the file to be submitted
filename = "/Users/system/Desktop/logo.jpg"
# a tuple containing the file's name, its binaries and its mimetype
file = {'file': (filename, open(filename, 'rb'), "image/jpeg")} # use name "file"
# do a POST request providing both the JSON and the binaries
r = requests.post("http://130.60.24.65:3333/v1/resources",
data={'json': json.dumps(resourceParams)}, # use name "json"
files=file,
auth=('anything.user02@example.org', 'test'))
#r.raise_for_status()
print (r.text)
|
#~ f_in = open('B-small-practice.in')
#~ f_out = open('B-small-practice.out', 'w')
#f_in = open('B-large-practice.in')
#f_out = open('B-large-practice.out', 'w')
## The number of test cases
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
#~ n, m = [int(s) for s in raw_input().split(" ")] # read a list of integers, 2 in this case
pancakes = str(raw_input())
flips = 0
for j in range(1, len(pancakes)):
if (pancakes[j] != pancakes[j-1]):
flips += 1
if pancakes[-1] == '-':
flips += 1
print "Case #{}: {}".format(i, flips)
# check out .format's specification for more formatting options
|
from django.shortcuts import render,redirect
from django.views import generic
from django.urls import reverse_lazy
import datetime
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponse
import json
from django.db.models import Sum
from .models import Proveedor, ComprasEnc, ComprasDet
from cmp.forms import ProveedorForm, ComprasEncForm
from bases.views import SinPrivilegios
from inv.models import Producto
class ProveedorView(SinPrivilegios, generic.ListView):
permission_required = "cmp.view_proveedor"
model = Proveedor
template_name = "cmp/proveedor_list.html"
context_object_name = 'obj'
class ProveedorNew(SinPrivilegios, generic.CreateView):
permission_required = "cmp.add_proveedor"
model = Proveedor
template_name="cmp/proveedor_form.html"
context_object_name = 'obj'
form_class=ProveedorForm
success_url=reverse_lazy("cmp:proveedor_list")
def form_valid(self, form):
form.instance.uc = self.request.user
print(self.request.user.id)
return super().form_valid(form)
class ProveedorEdit(SinPrivilegios, generic.UpdateView):
permission_required = "cmp.add_proveedor"
model=Proveedor
template_name="cmp/proveedor_form.html"
context_object_name='obj'
form_class=ProveedorForm
success_url=reverse_lazy("cmp:proveedor_list")
def form_valid(self, form):
form.instance.um=self.request.user.id
print(self.request.user.id)
return super().form_valid(form)
@login_required(login_url='/login/')
@permission_required('cmp.change_proveedor', login_url='bases:sin_privilegios')
def proveedorInactivar(request,id):
template_name='cmp/inactivar_prv.html'
contexto={}
prv = Proveedor.objects.filter(pk=id).first()
if not prv:
return HttpResponse('Proveedor no existe '+ str(id))
if request.method == 'GET':
contexto={'obj':prv}
if request.method == 'POST':
prv.estado=False
prv.save()
contexto={'obj':'OK'}
return HttpResponse('Proveedor Inactivado')
return render(request,template_name,contexto)
class ComprasView(SinPrivilegios, generic.ListView):
permission_required = "cmp.view_compras"
model = ComprasEnc
template_name = "cmp/compras_list.html"
context_object_name = 'obj'
@login_required(login_url='/login/')
@permission_required('cmp.view_comprasenc', login_url='bases:sin_privilegios')
def compras(request,compra_id=None):
template_name="cmp/compras.html"
prod=Producto.objects.filter(estado=True)
form_compras={}
contexto={}
if request.method=='GET':
form_compras=ComprasEncForm()
enc=ComprasEnc.objects.filter(pk=compra_id).first()
if enc:
det=ComprasDet.objects.filter(compra=enc)
fecha_compra=datetime.date.isoformat(enc.fecha_compra)
fecha_factura=datetime.date.isoformat(enc.fecha_factura)
e={
'fecha_compra':fecha_compra,
'proveedor':enc.proveedor,
'observacion':enc.observacion,
'no_factura':enc.no_factura,
'fecha_factura':fecha_factura,
'sub_total':enc.sub_total,
'descuento':enc.descuento,
'total':enc.total
}
form_compras=ComprasEncForm(e)
else:
det=None
contexto={'productos':prod,'encabezado':enc,'detalle':det,'form_enc':form_compras}
if request.method=='POST':
fecha_compra=request.POST.get("fecha_compra")
observacion=request.POST.get("observacion")
no_factura=request.POST.get("no_factura")
fecha_factura=request.POST.get("fecha_factura")
proveedor=request.POST.get("proveedor")
sub_total=0
descuento=0
total=0
if not compra_id:
prov=Proveedor.objects.get(pk=proveedor)
enc=ComprasEnc(
fecha_compra=fecha_compra,
observacion=observacion,
no_factura=no_factura,
fecha_factura=fecha_factura,
proveedor=prov,
uc=request.user
)
if enc:
enc.save()
compra_id=enc.id
else:
enc=ComprasEnc.objects.filter(pk=compra_id).first()
if enc:
enc.fecha_compra=fecha_compra
enc.observacion=observacion
enc.no_factura=no_factura
enc.fecha_factura=fecha_factura
enc.um=request.user.id
enc.save()
if not compra_id:
return redirect("cmp:compras_list")
producto=request.POST.get("id_id_producto")
cantidad=request.POST.get("id_cantidad_detalle")
precio=request.POST.get("id_precio_detalle")
sub_total_detalle=request.POST.get("id_subtotal_detalle")
descuento_detalle=request.POST.get("id_descuento_detalle")
total_detalle=request.POST.get("id_total_detalle")
prod=Producto.objects.get(pk=producto)
det=ComprasDet(
compra=enc,
producto=prod,
cantidad=cantidad,
precio_prv=precio,
descuento=descuento_detalle,
costo=0,
uc=request.user
)
if det:
det.save()
sub_total=ComprasDet.objects.filter(compra=compra_id).aggregate(Sum('sub_total'))
descuento=ComprasDet.objects.filter(compra=compra_id).aggregate(Sum('descuento'))
enc.sub_total=sub_total["sub_total__sum"]
enc.descuento=descuento["descuento__sum"]
enc.save()
return redirect("cmp:compras_edit",compra_id=compra_id)
return render(request, template_name, contexto)
class CompraDetDelete(SinPrivilegios, generic.DeleteView):
permission_required="cmp.delete_comprasdet"
model=ComprasDet
template_name="cmp/compras_det_del.html"
context_object_name='obj'
def get_success_url(self):
compra_id=self.kwargs['compra_id']
return reverse_lazy('cmp:compras_edit',kwargs={'compra_id':compra_id}) |
#!/usr/bin/python
import sys
import os
class TodoCommandParser(object):
def __init__(self, commandLineArgs):
# split the arguments by space and skip the first (command name)
A = commandLineArgs.split()
A = A[1:]
self.command = ''
self.arg1 = ''
self.arg2 = ''
if len(A) < 1:
return
A[0] = A[0].lower()
if self.__matchSingleArgCommand(A[0]):
self.arg1 = ' '.join(A[1:])
elif 'move'.startswith(A[0]):
self.command = 'move'
self.arg1 = int(A[1])
self.arg2 = ' '.join(A[2:])
else:
raise Exception('Not an acceptable command: {0:s}'.format(A[0]))
def __matchSingleArgCommand(self, s):
matchedSingleArg = False
for command in ['add', 'complete', 'list']:
if command.startswith(s):
self.command = command
matchedSingleArg = True
break
return matchedSingleArg
class TodoList(object):
def __init__(self):
self.__todoList = []
self.read()
def read(self):
pass
def write(self):
pass
def add(self, arg1='', arg2=''):
self.__todoList.append(arg1)
def get(self, k):
return self.__todoList[k]
def find(self, s):
s = s.lower()
lowerTodo = map(lambda x : x.lower(), self.__todoList)
for k, item in enumerate(lowerTodo):
if s in item:
return k
return -1
def complete(self, arg1='', arg2=''):
k = -1
if type(arg1) == int:
k = int(arg1)
else:
k = self.find(arg1)
self.__todoList.pop(k)
def list_all(self, arg1='', arg2=''):
return '\n'.join(self.__todoList)
def move(self, arg1='', arg2=''):
k = -1
if type(arg2) == int:
k = int(arg2)
else:
k = self.find(arg2)
s = self.__todoList.pop(k)
self.__todoList.insert(arg1, s)
class FileTodoList(TodoList):
def __init__(self, path='.todo'):
self.path = path
super(FileTodoList, self).__init__()
def read(self):
# create the file if it doesn't exist
if not os.path.isfile(self.path):
self.write()
with open(self.path, 'r') as f:
for item in f.read().split('\n'):
if len(item.strip()) > 0:
self.add(item)
def write(self):
with open(self.path, 'w') as f:
f.write(self.list_all())
class TodoRouter(object):
def __init__(self, todoList):
self.todo = todoList
def route(self, cmd):
routes = {
'add': self.todo.add,
'complete': self.todo.complete,
'list': self.todo.list_all,
'move': self.todo.move,
}
if cmd not in routes:
return None
return routes[cmd]
def process(self, commandLineInput):
parser = TodoCommandParser(commandLineInput)
f = self.route(parser.command)
if f is None:
return
return f(parser.arg1, parser.arg2)
if __name__ == '__main__':
CommandString = ' '.join(sys.argv)
FileTodo = FileTodoList(path = os.path.expanduser('~/.TODO'))
Router = TodoRouter(FileTodo)
result = Router.process(CommandString)
if result != None:
print result
FileTodo.write()
|
#!/usr/bin/env python
__VERSION__ = '1.0'
import immlib
import argparse
import immutils
import getopt
import pelib
import pefile
from immutils import *
imm = immlib.Debugger()
"""
Funcitons
"""
def CheckIntersectionJMP(inst):
imm.log("trying to locate intersection jmp")
return "Done"
def CheckPushAdd(inst):
imm.log("checking push Add")
def CheckWxorX(inst):
imm.log("checking WxorX")
def getEPsection(pe,rva):
"""returns the section that contains the entry point, -1 if none"""
for i,s in enumerate(pe.sections):
if s.contains_rva(rva):
break
else: return -1
return i
"""load PE, return pe object, it's entry point, imagebase, VA of the section of the entry point, its physical size"""
def loadPE():
try:
name = imm.getDebuggedName()
module = imm.getModule(name)
if not module:
raise Exception, "Couldn't find %s .." % name
return False
except Exception,e:
imm.log('module %s not found'%(name))
return False
start = module.getBaseAddress()
size = module.getSize()
data = imm.readMemory(start, size)
pe = pefile.PE(data=data)
oep = pe.OPTIONAL_HEADER.AddressOfEntryPoint
ib = pe.OPTIONAL_HEADER.ImageBase
section = pe.sections[getEPsection(pe,oep)]
start, size = section.VirtualAddress, section.SizeOfRawData
return pe, oep, ib, start, size
def displaySections(pe):
for i,section in enumerate(pe.sections):
imm.log("%s %s %s %s"%(i, hex(section.VirtualAddress+ib), hex(section.Misc_VirtualSize), hex(section.SizeOfRawData )))
"""
Main
"""
def usage():
imm.log("!findpacker -t --techniques Comma separed list of techbiques for find OEP:intersectionJMP,pushadd,WxorX all to use the whole set",focus=1)
def main(args):
"""arguments error handling"""
imm.log("dsadsa")
pe,oep,ib,start,size = loadPE()
for i,section in enumerate(pe.sections):
imm.log("%s %s %s %s"%(i, hex(section.VirtualAddress+ib), hex(section.Misc_VirtualSize), hex(section.SizeOfRawData )))
if not args:
usage()
return "No args"
try:
opts, argo = getopt.getopt(args,"t:")
except getopt.GetoptError:
usage()
return "Bad argument %s" % args[0]
techniques = {
"intersectionJMP" : CheckIntersectionJMP,
"pushadd" : CheckPushAdd,
"WxorX" : CheckWxorX
}
"""Parse the chosen techniques"""
for option,ar in opts:
if option == "-t":
chosenTech = ar.split(",")
"""Set the function that has to been executed for each instruction"""
toExecuteTech = []
for tec in chosenTech:
if(techniques.get(tec) is not None):
imm.log("Activating technique "+tec)
toExecuteTech.append(techniques.get(tec))
else:
imm.log("Technique "+tec+" not found")
for execTech in toExecuteTech:
execTech("test")
return "Done"
|
import perceptron
from point import *
from tkinter import *
canvas_width = 700
canvas_height = 700
brain = perceptron.Perceptron(2)
points = []
point_graphique = []
nb_point = 1000
i = 0
while i < nb_point:
new_point = Point(canvas_width,canvas_height)
points.append(new_point)
i += 1
fenetre = Tk()
canvas = Canvas(fenetre,width = canvas_width,height = canvas_height)
for p in points:
if p.reponse == 1:
objet = canvas.create_oval(p.position[0],p.position[1],p.position[0] + 25,p.position[1] + 25,width=2)
else:
objet = canvas.create_oval(p.position[0],p.position[1],p.position[0] + 25,p.position[1] + 25,width=10)
point_graphique.append(objet)
canvas.create_line(0,0,canvas_width,canvas_height,width=2)
y = 0
for g in points:
brain.guess(g)
if brain.guess(g) == g.reponse:
canvas.itemconfigure(point_graphique[y],fill="green")
else:
canvas.itemconfigure(point_graphique[y],fill="red")
y += 1
def tst(fd):
q = 0
print("training")
for p in points:
brain.train(p)
q = 0
error = 0
for h in points:
if brain.guess(h) == h.reponse:
canvas.itemconfigure(point_graphique[q],fill="green")
else:
canvas.itemconfigure(point_graphique[q],fill="red")
error += 1
q += 1
canvas.pack()
print("Erreur restante:",error)
""" Version 1
for p in points:
brain.train(p)
if brain.guess(p) == p.reponse:
canvas.itemconfigure(point_graphique[q],fill="green")
else:
canvas.itemconfigure(point_graphique[q],fill="red")
error += 1
q += 1
canvas.pack()
"""
fenetre.bind("<Button-1>", tst)
canvas.pack()
fenetre.mainloop()
|
#from pyimagesearch.io import HDF5DatasetWriter
import numpy as np
import argparse
from imutils import paths
import cv2
import os
import imutils
import random
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def encode_utf8_string(text, length, dic, null_char_id):
char_ids_padded = [null_char_id]*length
char_ids_unpadded = [null_char_id]*len(text)
for i in range(len(text)):
hash_id = dic[text[i]]
char_ids_padded[i] = hash_id
char_ids_unpadded[i] = hash_id
return char_ids_padded, char_ids_unpadded
width = 300
height = 32
k1 = 300/32
imagePaths = list(paths.list_images('/home/hansama/Documents/crnn/test/'))
print(imagePaths)
random.shuffle(imagePaths)
for i, imagePath in enumerate(imagePaths):
image = cv2.imread(imagePath, cv2.IMREAD_GRAYSCALE)
k2 = image.shape[1]/image.shape[0]
if k2 > k1:
resized = imutils.resize(image, width = 300)
zeros = np.zeros((32 - resized.shape[0], 300))
results = np.concatenate((resized, zeros), axis=0)
else:
resized = imutils.resize(image, height = 32)
zeros = np.zeros((32, 300 - resized.shape[1]))
results = np.concatenate((resized, zeros), axis=1)
results = imutils.rotate_bound(results, 90)
cv2.imwrite(str(i)+'.jpg', results)
#get the dictionary
# dic = {}
# dic[" "] = 0
# with open('dic.txt', encoding="utf-8") as dict_file:
# for i, line in enumerate(dict_file):
# if i == 0:
# continue
# (key, value) = line.strip().split('\t')
# dic[value] = int(key)
# dict_file.close()
# #convert label
# label = imagePath.split('.')[0] + '.txt'
# with open(label, 'r') as f:
# for line in f:
# char_ids_padded, char_ids_unpadded = encode_utf8_string(
# text=line,
# dic=dic,
# length=37,
# null_char_id=214)
# f.close()
# print(results.shape)
# print("!!!!!!!!!",char_ids_padded)
|
def remove(duplicate):
list_ = []
for num in duplicate:
if num not in list_:
list_.append(num)
return list_
duplicate = [2, 4, 10, 20, 5, 2, 20, 4]
print(remove(duplicate))
|
import numpy as np
import matplotlib.pyplot as plt
import copy
#Define sigmoid activation function
def sigmoid(x, derivative=False):
#activation function and derivative
#x: input
#derivative: boolean. If True will return the derivative
f = 1 / (1 + np.exp(-x))
#derivative
if derivative == True:
ds = (f * (1 - f))
return ds
return f
#Inputs and targeted outputs
inputs=[[-0.2,0.1,0.3,-0.4,1],[0.6,-0.1,0.7,-0.5,1],[0.8,0.1,-0.6,0,1]]
targets =[[0.4,0.6,0.5,0.7],[0.7,0.1,0.2,0.1],[0.1,0.3,0.2,0.9]]
#define weights of the input layer, 4 inputs + bias
weight_inputs=np.random.uniform(low=-0.5, high=0.5, size=(5,5) )
#define weights of the hidden layer, 5 hidden neurons + bias
weight_output=np.random.uniform(low=-0.5, high=0.5, size=(6,4) )
### start training ###
loss = []
for epoch in range(500):
predictions = []
for i in range(0,len(inputs)):
### Feed Forward ###
#weight sum from inputs layer
hidden_inputs=np.dot(inputs[i],weight_inputs)
#activation in the hidden layer
hidden_sig=sigmoid(hidden_inputs)
#add bias in the hidden layer
hidden_sig=np.append(hidden_sig,1)
#weight sum from hidden layer
output=np.dot(hidden_sig,weight_output)
#activtation in the output layer
predicted=sigmoid(output)
#store predicted output
predictions.append(predicted)
### Back Propogation ###
#calculate delta from output layer to hidden layer; lr=0.5
error = targets[i]-predicted
derivative_output = sigmoid(output,derivative=(True))
deltaK = error * derivative_output
#bias added
delta_weight_output=0.5*(np.dot(hidden_sig.reshape((6,1)),deltaK.reshape((1,4))))
#add momentum; mc=0.9
if epoch>1:
delta_weight_output +=0.9 * delta_weight_output_old
#store delta weight from previous iteration
delta_weight_output_old = copy.deepcopy(delta_weight_output)
#calculate delta from hidden layer to input layer
errorJ=np.dot(weight_output,deltaK.reshape(4,1))
derivative_outputJ=sigmoid(hidden_inputs,derivative=(True))
#exclude bias
errorJ=errorJ[:-1]
deltaJ= np.asarray(errorJ) * np.reshape(derivative_outputJ,(5,1))
inputs_array = np.asarray(inputs[i])
#add momentum
delta_weight=0.5*(np.dot(np.reshape(inputs_array, (5,1)),deltaJ.transpose()))
if epoch>1:
delta_weight +=0.9 * delta_weight_old
delta_weight_old = copy.deepcopy(delta_weight)
#update weight for the hidden layer
weight_output += delta_weight_output
#update weight for the input layer
weight_inputs += delta_weight
### calculate RMS ###
predictions=np.asarray(predictions)
sum_error = sum(sum((targets-predictions)**2))
rms = sum_error /(len(targets) * len(targets[0]))
loss.append(rms)
### Feed Forward Testing ###
hidden_inputs=np.dot(inputs[0],weight_inputs)
#activation in the hidden layer
hidden_sig=sigmoid(hidden_inputs)
#weight sum, add bias
hidden_sig=np.append(hidden_sig,1)
output=np.dot(hidden_sig,weight_output)
#predicted output
predicted=sigmoid(output)
print(predicted)
### plot loss function ###
plt.plot(np.arange(len(loss)),loss)
plt.show()
|
from getpass import getpass
from test_object import Test
class AccountsTest(Test):
username = None
password = None
def __init__(self):
self.username = raw_input("Username: ")
self.password = getpass()
def test_myaccount(self):
try:
self.driver.get("http://myaccount.arch.tamu.edu/accounts/login")
except Exception as e:
print "Error: Could not load myaccount login page!"
print e
try:
username_element = self.driver.find_element_by_id("id_username")
password_element = self.driver.find_element_by_id("id_password")
except Exception as e:
print "Error: Could not locate the username and password fields."
print e
else:
username_element.send_keys(self.username)
password_element.send_keys(self.password)
password_element.submit()
if "Login" in self.driver.title:
print "Warning: Incorrect username or password."
elif "My Profile" in self.driver.title:
self.test_site()
else:
print "Error: Could not load My Profile page."
print "Page: %s" % self.driver.title
return
def test_admin(self):
try:
self.driver.get("http://www.arch.tamu.edu/admin")
except Exception as e:
print "Error: Could not load http://www.arch.tamu.edu/admin!"
print e
try:
username_element = self.driver.find_element_by_id("id_username")
password_element = self.driver.find_element_by_id("id_password")
except Exception as e:
print "Error: Could not locate the username and password fields."
else:
username_element.send_keys(self.username)
password_element.send_keys(self.password)
password_element.submit()
if "Log in" in self.driver.title:
print "Warning: Incorrect username or password."
elif "Site administration" in self.driver.title:
return
else:
print "Error: Could not load the Site Administration page."
return
def change_user(self):
self.username = raw_input("Username: ")
self.password = getpass()
|
# like = open('likeCounter.txt','r').read()
# like = int(like)
# like += 1
# storeLike = str(like)
# openFile = open('likecounter.txt','w')
# openFile.write(storeLike)
# openFile.close()
# like = open('likeCounter.txt','r').read()
# print(like)
#function to increase the like counter...
#1
def likeMe(like):
like += 1
return like
#2
def disLikeMe(like):
like -= 1
return like
#3
def resetLikeCounter(like):
like = 0
return like
#main
print("Press 1 to like,\n")
print("Press 2 to dislike\n")
print("Press 3 to reset likes\n")
print("Press 4 to Display likes\n")
print("Press 0 to Exit\n")
option = int(input("Your Choice here: "))
if option == 1 :
like = open('./likeCounter.txt','r').read()
like = int(like)
like = likeMe(like)
storeLike = str(like)
openFile = open('./likecounter.txt','w')
openFile.write(storeLike)
openFile.close()
elif option == 2 :
like = open('./likeCounter.txt','r').read()
like = int(like)
like = disLikeMe(like)
storeLike = str(like)
openFile = open('./likecounter.txt','w')
openFile.write(storeLike)
openFile.close()
elif option == 3 :
like = open('./likeCounter.txt','r').read()
like = int(like)
like = resetLikeCounter(like)
storeLike = str(like)
openFile = open('./likecounter.txt','w')
openFile.write(storeLike)
openFile.close()
elif option == 4:
readlike = open('./likeCounter.txt','r').read()
print("Likes : ",readlike)
#end |
from pig import Dice, Player, Game, ComputerPlayer
dice = Dice()
player1 = Player()
player2 = ComputerPlayer()
def test_dice_exists():
new_dice = Dice()
assert type(new_dice) == Dice
def test_dice_equality():
dice1 = Dice()
dice2 = Dice()
assert dice1 == dice2
def test_dice_will_roll_within_params():
for _ in range(15):
assert dice.roll() in range(1,7)
def test_player_exists():
new_player = Player()
assert type(new_player) == Player
assert new_player.score == 0
def test_score_add():
new_player = Player()
new_cpu = ComputerPlayer()
new_player.add_points(20)
new_cpu.add_points(25)
assert new_player.score == 20
assert new_cpu.score == 25
def test_computer_exists():
new_computer = ComputerPlayer()
assert type(new_computer) == ComputerPlayer
assert new_computer.score == 0
def test_computer_decide_die_roll():
new_computer = ComputerPlayer()
assert new_computer.decide_roll_die(20) == False
assert new_computer.decide_roll_die(5) == True
def test_get_first_player():
new_game = Game()
assert new_game.choose_first_player() == new_game.player1 or new_game.cpu
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=200)
author = models.CharField(max_length=200)
price = models.CharField(max_length=20)
image = models.URLField(max_length=200)
description = models.CharField(max_length=500)
isbn = models.CharField(max_length=20,null=True)
#publisher = models.CharField(max_length=50)
category = models.CharField(max_length=50,null=True)
#class User(models.Model):
# name = models.CharField(max_length=250)
# emailid = models.CharField(max_length=100)
# password = models.CharField(max_length=100)
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper tff.aggregator for distributed DP with secure aggregation."""
import collections
import math
from typing import Optional
import warnings
import numpy as np
import tensorflow as tf
import tensorflow_privacy as tfp
from tensorflow_federated.python.aggregators import concat
from tensorflow_federated.python.aggregators import differential_privacy
from tensorflow_federated.python.aggregators import discretization
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import modular_clipping
from tensorflow_federated.python.aggregators import quantile_estimation
from tensorflow_federated.python.aggregators import robust
from tensorflow_federated.python.aggregators import rotation
from tensorflow_federated.python.aggregators import secure
from tensorflow_federated.python.core.impl.federated_context import federated_computation
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.tensorflow_context import tensorflow_computation
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import measured_process
# Supported DP mechanisms.
DP_MECHANISMS = [
# Distributed Discrete Gaussian (https://arxiv.org/abs/2102.06387).
'distributed_dgauss',
# Distributed Skellam (https://arxiv.org/abs/2110.04995).
'distributed_skellam',
]
# Supported random rotation operations.
ROTATION_TYPES = [
'hd', # Randomized Fast Walsh-Hadamard Transform.
'dft', # Randomized Discrete Fourier Transform.
]
# The maximum possible scaling factor before rounding is applied. This is needed
# because when the number of clients per round is small (<10) and/or the noise
# multiplier is very small (say < 0.001) and/or the number of bits is large (say
# > 18), the scale factor computed by `_heuristic_scale_factor(...)` becomes
# very large. For example, for a noise multiplier = 0, number of bits = 20, and
# 10 clients per round, the scaling factor is on the order of 1e8. Such a large
# scaling factor leads to overflows when computing the inflated and scaled l1/l2
# norm bounds using int32 bit representations. In practice, however, such very
# scaling factors do not offer any added value (in minimizing rounding errors
# and/or minimizing the inflation of the l1/l2 norms upon rounding). Capping the
# scaling factor to 1e6 avoids these overflow issues without compromising
# utility.
# TODO: b/223427213 - Adapt the bitwidth whenever the scale is too high.
MAX_SCALE_FACTOR = 1e6
class DistributedDpSumFactory(factory.UnweightedAggregationFactory):
"""An `UnweightedAggregationFactory` for distributed DP with SecAgg.
The created `tff.templates.AggregationProcess` serves as a wrapper that
encapsulates several component tff.aggregators into an implementation of
a distributed differential privacy (DP) algorithm with secure aggregation.
Distributed DP algorithms aim to "distribute trust" away from the central
server by allowing clients to add their own noise for differential privacy
while allowing comparable privacy/utility to be achieved compared to the
central DP model.
Currently, two specific distributed DP algorithms are supported: the
distributed discrete Gaussian mechanism (Algorithms 1 and 2 in
https://arxiv.org/abs/2102.06387) and the distributed
Skellam mechanism (Algorithm 1 in https://arxiv.org/abs/2110.04995).
This wrapper aggregator manages the nesting/composition of the components,
coordinate their states, surfaces relevant states/metrics, and integrates
auto-tuning algorithms to update relevant states/hyperparameters.
This aggregator accepts tensors or structures of tensors with integer or
floating dtypes, and keeps the value type/structure after aggregation.
TODO: b/221465205 - Add exact privacy accounting for DDP using new accounting
APIs.
To obtain concrete (epsilon, delta) guarantees, one could use the analysis
tools provided in tensorflow_privacy on the metrics generated in each round.
"""
def __init__(
self,
noise_multiplier: float,
expected_clients_per_round: int,
bits: int,
l2_clip: float,
modclip_prob: float = 1e-4,
beta: float = 0.0,
mechanism: str = 'distributed_skellam',
rotation_type: str = 'dft',
auto_l2_clip: bool = False,
auto_l2_target_quantile: float = 0.5,
auto_l2_lr: float = 0.2,
auto_l2_clip_count_stddev: Optional[float] = None,
):
"""Initializes the `DistributedDpSumFactory`.
Note that the `create` method of this factory needs to be executed in TF
eager mode. Please see the note in the docstring of the private function
`_clip_prob_to_num_stddevs` below.
Args:
noise_multiplier: A float specifying the noise multiplier (central noise
stddev / L2 clip norm) for model updates. Note that this is with respect
to the initial L2 clip norm, and the quantization procedure as part of
the DDP algorithm may inflated the L2 sensitivity. The specified noise
will be split into `expected_clients_per_round` noise shares to be added
locally on the clients. A value of 1.0 or higher may be needed for
strong privacy. Must be nonnegative. A value of 0.0 means no noise will
be added.
expected_clients_per_round: An integer specifying the expected number of
clients to participate in this round. This number dictates how much
noise is added locally. In particular, local noise stddev = central
noise stddev / `expected_clients_per_round`. Must be a positive integer.
bits: A positive integer specifying the communication bit-width B (where
2^B will be the field size for SecAgg operations). Note that this is for
the noisy quantized aggregate at the server and thus should account for
the number of clients. Must be in the inclusive range [1, 22], and
should be at least as large as log_2(expected_clients_per_round).
l2_clip: A float specifying the value of the L2 clipping norm. Must be
positive. If `auto_l2_clip` is set to True, a reasonable default is 0.1.
modclip_prob: (Optional) A float in the exclusive range (0, 1) specifying
the target probability for modular wrapping due to SecAgg's modulo
operations. Default to 0.01% (roughly 3.9 standard deviations of the
mean assuming roughly normally distributed aggregates at the server).
beta: (Optional) The conditional randomized rounding bias. Must be a float
in the range [0, 1). If `beta > 0`, we recommend using `exp(-0.5)` as it
has been shown to be a good choice in
https://arxiv.org/pdf/2102.06387.pdf. The larger the value, the less
post-rounding L2 sensitivity inflation. Defaults to 0, indicating that
(unconditional) randomized rounding is used. Please see Sections 4 of
https://arxiv.org/pdf/2102.06387.pdf for a detailed explanation of
conditional randomized rounding. However, note that in some cases
(e.g. when using TF Lite), a beta greater than 0 (e.g. exp(-0.5)) can
cause instabilities leading to infinite loops. We recommend keeping
the default.
mechanism: (Optional) The distributed DP mechanism to use. Possible
options are 'distributed_dgauss' (distributed discrete Gaussian
mechanism) or 'distributed_skellam' (distributed Skellam mechanism; the
default).
rotation_type: (Optional) The rotation operation used to spread out input
values across vector dimensions. Possible options are 'hd' (randomized
Hadamard transform) or 'dft' (discrete Fourier transform; the default).
auto_l2_clip: (Optional) A bool indicating whether to adaptively adjust
the L2 norm clipping (i.e., `l2_clip`) after each round. Note that this
involves private quantile estimation which would result in a larger
effective `noise_multiplier` for the actual client values. The algorithm
used is based on https://arxiv.org/pdf/1905.03871.pdf with the geometric
update method. Defaults to `False`.
auto_l2_target_quantile: (Optional) A float in the inclusive range [0, 1]
indicating the target quantile to which the L2 clipping norm should
adapt. A value of 0.8 means a clipping norm should be chosen such that
80% of the client values have norm below it. Defaults to 0.5. Ignored if
`auto_l2_clip` is `False`.
auto_l2_lr: (Optional) A float specifying the learning rate for the
adaptive L2 clipping process. Default to 0.2. Ignored if `auto_l2_clip`
is `False`.
auto_l2_clip_count_stddev: (Optional) The stddev of the noise added to the
clipped counts in the adaptive clipping algorithm. If None, defaults to
`0.05 * expected_clients_per_round` (unless `noise_multiplier` is 0, in
which case it is also 0). Ignored if `auto_l2_clip` is `False`.
Raises:
TypeError: If arguments have the wrong type(s).
ValueError: If arguments have invalid value(s).
"""
_check_nonnegative(noise_multiplier, 'noise_multiplier')
_check_positive(expected_clients_per_round, 'expected_clients_per_round')
_check_integer(expected_clients_per_round, 'expected_clients_per_round')
# While larger bits are possible, much of the DDP implementation relies on
# float32 which only represents ints <= 2^24. We cap at bits <= 22.
_check_in_range(bits, 'bits', 1, 22, True, True)
_check_integer(bits, 'bits')
_check_positive(l2_clip, 'l2_clip')
_check_in_range(modclip_prob, 'modclip_prob', 0, 1, False, False)
_check_in_range(beta, 'beta', 0.0, 1.0, True, False)
_check_str(mechanism, 'mechanism', DP_MECHANISMS)
_check_str(rotation_type, 'rotation_type', ROTATION_TYPES)
_check_bool(auto_l2_clip, 'auto_l2_clip')
if beta > 0.0:
warnings.warn(
f'The selected beta {beta} is greater than zero. This indicates that'
' conditional randomized rounding is used. Instabilities have been'
' observed with low-precision arithmetic or when running with'
' TFLite. Consider setting beta to 0 if you would like to avoid'
' those issues.'
)
if auto_l2_clip:
_check_in_range(
auto_l2_target_quantile, 'auto_l2_target_quantile', 0, 1, True, True
)
_check_positive(auto_l2_lr, 'auto_l2_lr')
if auto_l2_clip_count_stddev is not None:
_check_nonnegative(
auto_l2_clip_count_stddev, 'auto_l2_clip_count_stddev'
)
self._initial_l2_clip = l2_clip
self._noise_multiplier = noise_multiplier
self._num_clients = expected_clients_per_round
self._bits = bits
self._modclip_prob = modclip_prob
self._k_stddevs = _clip_prob_to_num_stddevs(modclip_prob)
self._beta = beta
self._mechanism = mechanism
self._rotation_type = rotation_type
self._auto_l2_clip = auto_l2_clip
# Value range checks based on the client count and the clip probability.
if bits < math.log2(expected_clients_per_round):
raise ValueError(
'bits should be >= log2(expected_clients_per_round). '
f'Found 2^b = 2^{bits} < {expected_clients_per_round}.'
)
if 2 ** (2 * bits) < expected_clients_per_round * self._k_stddevs**2:
raise ValueError(
f'The selected bit-width ({bits}) is too small for the '
'given parameters (expected_clients_per_round = '
f'{expected_clients_per_round}, modclip_prob = '
f'{modclip_prob}). You must decrease the '
'`expected_clients_per_round`, increase `bits`, or '
'increase `modclip_prob`.'
)
if auto_l2_clip:
self._l2_clip, self._value_noise_mult = self._build_auto_l2_clip_process(
auto_l2_target_quantile, auto_l2_lr, auto_l2_clip_count_stddev
)
else:
self._l2_clip = self._initial_l2_clip
self._value_noise_mult = self._noise_multiplier
def _build_auto_l2_clip_process(
self, target_quantile, learning_rate, clip_count_stddev
):
"""Builds a `tff.templates.EstimationProcess` for adaptive L2 clipping.
Specifically, we use the private quantile estimation algorithm described in
https://arxiv.org/abs/1905.03871 for choosing the adaptive L2 clip norm.
The default noise level for the procedure follows the paper and the
implementation of `tff.aggregators.DifferentiallyPrivateFactory`.
Note that for consistency with the use of secure aggregation for the client
values, the binary flags as part of the quantile estimation procedure
indicating whether client L2 norms are below the current estimate are also
securely aggregated.
Args:
target_quantile: See `auto_l2_target_quantile` at __init__ docstring.
learning_rate: See `auto_l2_lr` at __init__ docstring.
clip_count_stddev: See `auto_l2_clip_count_stddev` at __init__ docstring.
Returns:
The `EstimationProcess` for adaptive L2 clipping and the required noise
multiplier for the record aggregation.
"""
value_noise_mult, clip_count_stddev = (
differential_privacy.adaptive_clip_noise_params(
self._noise_multiplier, self._num_clients, clip_count_stddev
)
)
estimator_query = tfp.QuantileEstimatorQuery(
initial_estimate=self._initial_l2_clip,
target_quantile=target_quantile,
learning_rate=learning_rate,
below_estimate_stddev=clip_count_stddev,
expected_num_records=self._num_clients,
geometric_update=True,
)
# Note also that according to https://arxiv.org/abs/1905.03871, the binary
# flags for quantile estimation are shifted from [0, 1] to [-0.5, 0.5], so
# we set the SecAgg input bounds accordingly.
estimator_process = quantile_estimation.PrivateQuantileEstimationProcess(
quantile_estimator_query=estimator_query,
record_aggregation_factory=secure.SecureSumFactory(
upper_bound_threshold=0.5, lower_bound_threshold=-0.5
),
)
return estimator_process, value_noise_mult
def _build_aggregation_factory(self):
central_stddev = self._value_noise_mult * self._initial_l2_clip
local_stddev = central_stddev / math.sqrt(self._num_clients)
# Ensure dim is at least 1 only for computing DDP parameters.
self._client_dim = max(1, self._client_dim)
if self._rotation_type == 'hd':
# Hadamard transform requires dimension to be powers of 2.
self._padded_dim = 2 ** math.ceil(math.log2(self._client_dim))
rotation_factory = rotation.HadamardTransformFactory
else:
# DFT pads at most 1 zero.
self._padded_dim = math.ceil(self._client_dim / 2.0) * 2
rotation_factory = rotation.DiscreteFourierTransformFactory
scale = _heuristic_scale_factor(
local_stddev,
self._initial_l2_clip,
self._bits,
self._num_clients,
self._padded_dim,
self._k_stddevs,
).numpy()
# Very large scales could lead to overflows and are not as helpful for
# utility. See comment above for more details.
scale = min(scale, MAX_SCALE_FACTOR)
if scale <= 1:
warnings.warn(
f'The selected scale_factor {scale} <= 1. This may lead to'
'substantial quantization errors. Consider increasing'
f'the bit-width (currently {self._bits}) or decreasing the'
'expected number of clients per round (currently '
f'{self._num_clients}).'
)
# The procedure for obtaining inflated L2 bound assumes eager TF execution
# and can be rewritten with NumPy if needed.
inflated_l2 = discretization.inflated_l2_norm_bound(
l2_norm_bound=self._initial_l2_clip,
gamma=1.0 / scale,
beta=self._beta,
dim=self._padded_dim,
).numpy()
# Add small leeway on norm bounds to gracefully allow numerical errors.
# Specifically, the norm thresholds are computed directly from the specified
# parameters in Python and will be checked right before noising; on the
# other hand, the actual norm of the record (to be measured at noising time)
# can possibly be (negligibly) higher due to the float32 arithmetic after
# the conditional rounding (thus failing the check). While we have mitigated
# this by sharing the computation for the inflated norm bound from
# quantization, adding a leeway makes the execution more robust (it does not
# need to abort should any precision issues happen) while not affecting the
# correctness if privacy accounting is done based on the norm bounds at the
# DPQuery/DPFactory (which incorporates the leeway).
scaled_inflated_l2 = (inflated_l2 + 1e-5) * scale
# Since values are scaled and rounded to integers, we have L1 <= L2^2
# on top of the general of L1 <= sqrt(d) * L2.
scaled_l1 = math.ceil(
scaled_inflated_l2
* min(math.sqrt(self._padded_dim), scaled_inflated_l2)
)
# Build nested aggregation factory.
# 1. Secure Aggregation. In particular, we have 4 modular clips from
# nesting two modular clip aggregators:
# #1. outer-client: clips to [-2^(b-1), 2^(b-1)]
# Bounds the client values (with limited effect as scaling was
# chosen such that `num_clients` is taken into account).
# #2. inner-client: clips to [0, 2^b]
# Similar to applying a two's complement to the values such that
# frequent values (post-rotation) are now near 0 (representing small
# positives) and 2^b (small negatives). 0 also always map to 0, and
# we do not require another explicit value range shift from
# [-2^(b-1), 2^(b-1)] to [0, 2^b] to make sure that values are
# compatible with SecAgg's mod m = 2^b. This can be reverted at #4.
# #3. inner-server: clips to [0, 2^b]
# Ensures the aggregated value range does not grow by log_2(n).
# NOTE: If underlying SecAgg is implemented using the new
# `tff.federated_secure_modular_sum()` operator with the same
# modular clipping range, then this would correspond to a no-op.
# #4. outer-server: clips to [-2^(b-1), 2^(b-1)]
# Keeps aggregated values centered near 0 out of the logical SecAgg
# black box for outer aggregators.
# Note that the scaling factor and the bit-width are chosen such that
# the number of clients to aggregate is taken into account.
nested_factory = secure.SecureSumFactory(
upper_bound_threshold=2**self._bits - 1, lower_bound_threshold=0
)
nested_factory = modular_clipping.ModularClippingSumFactory(
clip_range_lower=0,
clip_range_upper=2**self._bits,
inner_agg_factory=nested_factory,
)
nested_factory = modular_clipping.ModularClippingSumFactory(
clip_range_lower=-(2 ** (self._bits - 1)),
clip_range_upper=2 ** (self._bits - 1),
inner_agg_factory=nested_factory,
)
# 2. DP operations. DP params are in the scaled domain (post-quantization).
if self._mechanism == 'distributed_dgauss':
dp_query = tfp.DistributedDiscreteGaussianSumQuery(
l2_norm_bound=scaled_inflated_l2, local_stddev=local_stddev * scale
)
else:
dp_query = tfp.DistributedSkellamSumQuery(
l1_norm_bound=scaled_l1,
l2_norm_bound=scaled_inflated_l2,
local_stddev=local_stddev * scale,
)
nested_factory = differential_privacy.DifferentiallyPrivateFactory(
query=dp_query, record_aggregation_factory=nested_factory
)
# 3. Discretization operations. This appropriately quantizes the inputs.
nested_factory = discretization.DiscretizationFactory(
inner_agg_factory=nested_factory,
scale_factor=scale,
stochastic=True,
beta=self._beta,
prior_norm_bound=self._initial_l2_clip,
)
# 4. L2 clip, possibly adaptively with a `tff.templates.EstimationProcess`.
nested_factory = robust.clipping_factory(
clipping_norm=self._l2_clip,
inner_agg_factory=nested_factory,
clipped_count_sum_factory=secure.SecureSumFactory(
upper_bound_threshold=1, lower_bound_threshold=0
),
)
# 5. Flattening to improve quantization and reduce modular wrapping.
nested_factory = rotation_factory(inner_agg_factory=nested_factory)
# 6. Concat the input structure into a single vector.
nested_factory = concat.concat_factory(inner_agg_factory=nested_factory)
return nested_factory
def _unpack_state(self, agg_state):
# Note: `agg_state` has a nested structure similar to the composed
# aggregator. Please print it to figure out how to correctly unpack the
# needed states. This is especially needed when you add, remove, or change
# any of the core composed aggregators.
# TODO: b/222162205 - Simplify how we compose states of nested aggregators.
rotation_state = agg_state # Concat has no states.
l2_clip_state, _ = rotation_state
discrete_state = l2_clip_state['inner_agg']
dp_state = discrete_state['inner_agg_process']
return l2_clip_state, discrete_state, dp_state
def _unpack_measurements(self, agg_measurements):
rotate_metrics = agg_measurements # Concat has no measurements.
l2_clip_metrics = rotate_metrics[self._rotation_type]
discrete_metrics = l2_clip_metrics['clipping']
dp_metrics = discrete_metrics['discretize']
return l2_clip_metrics, discrete_metrics, dp_metrics
def _autotune_component_states(self, agg_state):
"""Updates the nested aggregator state in-place.
This procedure makes the following assumptions: (1) this wrapper aggregator
has knowledge about the states of the component aggregators and their
Python containers, and can thus make in-place modifications directly; (2)
this aggregator has knowledge about the state of the `DPQuery` objects
(types and members) that are used by the `DifferentiallyPrivateFactory`, and
can thus update the members directly. Both assumptions should be revisited.
Args:
agg_state: The state of this aggregator, which is a nested object
containing the states of the component aggregators.
Returns:
The updated agg_state.
"""
@tensorflow_computation.tf_computation
def _update_scale(agg_state, new_l2_clip):
_, discrete_state, _ = self._unpack_state(agg_state)
new_central_stddev = new_l2_clip * self._value_noise_mult
new_local_stddev = new_central_stddev / math.sqrt(self._num_clients)
new_scale = _heuristic_scale_factor(
new_local_stddev,
new_l2_clip,
self._bits,
self._num_clients,
self._padded_dim,
self._k_stddevs,
)
# Very large scales could lead to overflows and are not as helpful for
# utility. See comment above for more details.
new_scale = tf.math.minimum(
new_scale, tf.constant(MAX_SCALE_FACTOR, dtype=tf.float64)
)
discrete_state['scale_factor'] = tf.cast(new_scale, tf.float32)
return agg_state
@tensorflow_computation.tf_computation
def _update_dp_params(agg_state, new_l2_clip):
_, discrete_state, dp_state = self._unpack_state(agg_state)
new_scale = discrete_state['scale_factor']
new_inflated_l2 = discretization.inflated_l2_norm_bound(
l2_norm_bound=new_l2_clip,
gamma=1.0 / new_scale,
beta=self._beta,
dim=self._padded_dim,
)
# Similarly include a norm bound leeway. See inline comment in
# `_build_aggregation_factory()` for more details.
new_scaled_inflated_l2 = (new_inflated_l2 + 1e-5) * new_scale
l1_fac = tf.minimum(math.sqrt(self._padded_dim), new_scaled_inflated_l2)
new_scaled_l1 = tf.math.ceil(new_scaled_inflated_l2 * l1_fac)
new_scaled_l1 = tf.cast(new_scaled_l1, tf.int32)
# Recompute noise stddevs.
new_central_stddev = new_l2_clip * self._value_noise_mult
new_local_stddev = new_central_stddev / math.sqrt(self._num_clients)
# Update DP params: norm bounds (uninflated/inflated) and local stddev.
dp_query_state = dp_state.query_state
if self._mechanism == 'distributed_dgauss':
new_dp_query_state = dp_query_state._replace(
l2_norm_bound=new_scaled_inflated_l2,
local_stddev=new_local_stddev * new_scale,
)
else:
new_dp_query_state = dp_query_state._replace(
l1_norm_bound=new_scaled_l1,
l2_norm_bound=new_scaled_inflated_l2,
local_stddev=new_local_stddev * new_scale,
)
new_dp_state = differential_privacy.DPAggregatorState(
new_dp_query_state,
dp_state.agg_state,
dp_state.dp_event,
dp_state.is_init_state,
)
discrete_state['inner_agg_process'] = new_dp_state
discrete_state['prior_norm_bound'] = new_l2_clip
return agg_state
l2_clip_state, _, _ = self._unpack_state(agg_state)
# NOTE(b/170893510): Explicitly declaring Union[float, EstimationProcess]
# for _l2_clip or doing isinstance() check still triggers attribute-error.
new_l2_clip = self._l2_clip.report(l2_clip_state['clipping_norm']) # pytype: disable=attribute-error
agg_state = intrinsics.federated_map(
_update_scale, (agg_state, new_l2_clip)
)
agg_state = intrinsics.federated_map(
_update_dp_params, (agg_state, new_l2_clip)
)
return agg_state
def _derive_measurements(self, agg_state, agg_measurements):
_, discrete_state, dp_state = self._unpack_state(agg_state)
l2_clip_metrics, _, dp_metrics = self._unpack_measurements(agg_measurements)
dp_query_state, _, _, _ = dp_state
actual_num_clients = intrinsics.federated_secure_sum_bitwidth(
intrinsics.federated_value(1, placements.CLIENTS), bitwidth=1
)
padded_dim = intrinsics.federated_value(
int(self._padded_dim), placements.SERVER
)
measurements = collections.OrderedDict(
l2_clip=l2_clip_metrics['clipping_norm'],
scale_factor=discrete_state['scale_factor'],
scaled_inflated_l2=dp_query_state.l2_norm_bound,
scaled_local_stddev=dp_query_state.local_stddev,
actual_num_clients=actual_num_clients,
padded_dim=padded_dim,
dp_query_metrics=dp_metrics['dp_query_metrics'],
)
return intrinsics.federated_zip(measurements)
def create(self, value_type):
# Checks value_type and compute client data dimension.
if isinstance(
value_type, computation_types.StructWithPythonType
) and type_analysis.is_structure_of_tensors(value_type):
num_elements_struct = type_conversions.structure_from_tensor_type_tree(
lambda x: x.shape.num_elements(), value_type
)
self._client_dim = sum(tf.nest.flatten(num_elements_struct))
elif isinstance(value_type, computation_types.TensorType):
self._client_dim = value_type.shape.num_elements()
else:
raise TypeError(
'Expected `value_type` to be `TensorType` or '
'`StructWithPythonType` containing only `TensorType`. '
f'Found type: {repr(value_type)}'
)
# Checks that all values are integers or floats.
if not (
type_analysis.is_structure_of_floats(value_type)
or type_analysis.is_structure_of_integers(value_type)
):
raise TypeError(
'Component dtypes of `value_type` must all be integers '
f'or floats. Found {repr(value_type)}.'
)
ddp_agg_process = self._build_aggregation_factory().create(value_type)
init_fn = ddp_agg_process.initialize
@federated_computation.federated_computation(
init_fn.type_signature.result, computation_types.at_clients(value_type)
)
def next_fn(state, value):
agg_output = ddp_agg_process.next(state, value)
new_measurements = self._derive_measurements(
agg_output.state, agg_output.measurements
)
new_state = agg_output.state
if self._auto_l2_clip:
new_state = self._autotune_component_states(agg_output.state)
return measured_process.MeasuredProcessOutput(
state=new_state,
result=agg_output.result,
measurements=new_measurements,
)
return aggregation_process.AggregationProcess(init_fn, next_fn)
def _clip_prob_to_num_stddevs(clip_prob):
"""Computes the number of stddevs for the target clipping probability.
This function assumes (approximately) normal distributions. It is implemented
using TensorFlow to avoid depending on SciPy's `stats.norm.ppf` and it thus
assumes eager TF execution. This can be replaced with the `statistics` package
from Python >= 3.8.
Args:
clip_prob: A float for clipping probability in the exclusive range (0, 1).
Returns:
The number of standard deviations corresponding to the clip prob.
"""
return math.sqrt(2) * tf.math.erfcinv(clip_prob).numpy()
def _heuristic_scale_factor(
local_stddev, l2_clip, bits, num_clients, dim, k_stddevs, rho=1.0
):
"""Selects a scaling factor by assuming subgaussian aggregates.
Selects scale_factor = 1 / gamma such that k stddevs of the noisy, quantized,
aggregated client values are bounded within the bit-width. The aggregate at
the server is assumed to follow a subgaussian distribution. Note that the
DDP algorithm is correct for any reasonable scaling factor, thus even if the
subgaussian assumption does not hold (e.g. in the case of distributed Skellam
which has sub-exponential tails), this function still provides a useful
heuristic. See Section 4.2 and 4.4 of https://arxiv.org/pdf/2102.06387.pdf
for more details.
Specifically, the implementation is solving for gamma using the following
expression:
2^b = 2k * sqrt(rho / dim * (cn)^2 + (gamma^2 / 4 + sigma^2) * n) / gamma.
Args:
local_stddev: The local noise standard deviation.
l2_clip: The initial L2 clip norm. See the __init__ docstring.
bits: The bit-width. See the __init__ docstring.
num_clients: The expected number of clients. See the __init__ docstring.
dim: The dimension of the client vector that includes any necessary padding.
k_stddevs: The number of standard deviations of the noisy and quantized
aggregate values to bound within the bit-width.
rho: (Optional) The subgaussian flatness parameter of the random orthogonal
transform as part of the DDP procedure. See Section 4.2 of the above paper
for more details.
Returns:
The selected scaling factor in tf.float64.
"""
bits = tf.cast(bits, tf.float64)
c = tf.cast(l2_clip, tf.float64)
dim = tf.cast(dim, tf.float64)
k_stddevs = tf.cast(k_stddevs, tf.float64)
n = tf.cast(num_clients, tf.float64)
sigma = tf.cast(local_stddev, tf.float64)
numer = tf.sqrt(2.0 ** (2.0 * bits) - n * k_stddevs**2)
denom = (
2.0 * k_stddevs * tf.sqrt(rho / dim * c**2 * n**2 + n * sigma**2)
)
scale_factor = numer / denom
return scale_factor
def _check_scalar(value, label):
is_bool = isinstance(value, bool)
is_py_scalar = isinstance(value, (int, float))
is_np_scalar = np.isscalar(value)
if is_bool or not (is_py_scalar or is_np_scalar):
raise TypeError(f'{label} must be a scalar. Found {repr(value)}.')
def _check_positive(value, label):
_check_scalar(value, label)
if value <= 0:
raise ValueError(f'{label} must be positive. Found {repr(value)}.')
def _check_nonnegative(value, label):
_check_scalar(value, label)
if value < 0:
raise ValueError(f'{label} must be nonnegative. Found {repr(value)}.')
def _check_integer(value, label):
_check_scalar(value, label)
if not isinstance(value, (int, np.integer)):
raise TypeError(f'{label} must be an integer. Found {repr(value)}.')
def _check_bool(value, label):
if not isinstance(value, bool):
raise TypeError(f'{label} must be a bool. Found {repr(value)}.')
def _check_str(value, label, options):
error_msg = f'`{label}` must be a string and one of {options}. Found {value}.'
if not isinstance(value, str):
raise TypeError(error_msg)
if value not in options:
raise ValueError(error_msg)
def _check_in_range(value, label, left, right, left_inclusive, right_inclusive):
"""Checks that a scalar value is in specified range."""
_check_scalar(value, label)
_check_bool(left_inclusive, 'left_inclusive')
_check_bool(right_inclusive, 'right_inclusive')
if left > right:
raise ValueError(f'left must be smaller than right; found {left}, {right}.')
left_cond = value >= left if left_inclusive else value > left
right_cond = value <= right if right_inclusive else value < right
if not left_cond or not right_cond:
raise ValueError(
f'{label} should be between {left} and {right} (with '
f'left_inclusive={left_inclusive} and right_inclusive='
f'{right_inclusive}). Found {value}.'
)
|
from interfaces.expr import Expr, UnaryOp, BinOp
from parsing.visitor import Visitor
def expr_size(e:Expr) -> int:
""" Counts the number of binary and unary operations. """
class Walker(Visitor):
def __init__(self):
self.size = 0
def visit_binary_op(self, binary_op:BinOp):
self.size += 1
return super().visit_binary_op(binary_op)
def visit_unary_op(self, unary_op:UnaryOp):
self.size += 1
return super().visit_unary_op(unary_op)
w = Walker()
w.dispatch(e)
return w.size
|
# Программ считает сумму товаров и делает скидку 5 % на товар,если его стоимость превышает 1000
price = float(input('Введите цену на товар:'))
cost = 0
while price >= 0:
if price > 1000:
cost = cost + (price - 0.05 * price)
else:
cost = cost + price
price = float(input('Введите цену на товар:'))
# Сигнал остановки - нуль или отрицательное число
print(cost)
|
import logging
from lncrawl.templates.novelmtl import NovelMTLTemplate
logger = logging.getLogger(__name__)
class WuxiaNHCrawler(NovelMTLTemplate):
base_url = "https://www.wuxianovelhub.com/"
|
import math as math #sqrt
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
## Settings ##
DATA_LOCATION = "Iris.csv"
DEPENDENT = "Species"
SKIP_PLOT_GENERATION = True
IGNORE = ["Id", DEPENDENT]
TRAINING_PERCENT = 0.8 # These should sum to 1.0
TESTING_PERCENT = 0.2 # These should sum to 1.0
if abs(TRAINING_PERCENT + TESTING_PERCENT - 1.0) > 0.01:
raise Exception("Bad data splitting values")
### Global Variables ###
labels_g = None # list of data labels
data_g = {} # dictionary of data as [attribute][list of data]
range_g = {} # dictionary of range for each attribute as [attribute][min,max]
standard_g = {} # standardized data
correlation_g = None # correlation matrix for heat map
training_g = {} # training section of data [default 80%]
testing_g = {} # testing section [default 10%]
datapointCount_g = 0 # how many data points are in the data?
dependentValues = {} # convert dependent values to integers - keet track of that conversion
dependentProbability_g = {} # how likily is each dependent
### Functions ###
def Min(a, b):
if a is None:
return b
if b is None:
return a
return min(a,b) #treat None as a special case - not absolute minimum
def Max(a, b):
if a is None:
return b
if b is None:
return a
return max(a,b)
def Clamp(value, mn, mx):
if value >= mx:
return mx
if value <= mn:
return min
return value #value is now in range of[min, max]
def Standardize(name, value):
mn = range_g[name][0]
mx = range_g[name][1]
#standardize a given value for a given name
return (value - mn) / (mx - mn) #standardize all data [0,1]
def StandardDeviation():
deviation = {} #[name][standard deviation]
for key in data_g:
count = len(data_g[key])
sum = 0.0
mean = (range_g[key][0] + range_g[key][1]) / 2
for value in data_g[key]:
sum += (value - mean) ** 2
deviation[key] = math.sqrt(sum / count)
return deviation #internal - build standard deviation for heatmap
def CalcDeviation(key,value):
count = len(data_g[key])
mean = (range_g[key][0] + range_g[key][1]) / 2
return math.sqrt(value / count)
def BuildCorrelation():
zValues = {}
deviation = StandardDeviation()
correlation_g = {}
# generate z values
for key in data_g:
zValues[key] = []
mean = (range_g[key][0] + range_g[key][1]) / 2
for value in data_g[key]:
zValues[key].append((value - mean) / deviation[key])
# generate r values
rValues = {}
count = 0
for key in zValues:
sum = 0
i = 0
for value in zValues[key]:
sum += value * zValues[DEPENDENT][i]
i += 1
count = len(data_g[key])
rValues[key] = Clamp(sum / (count - 1), -1.0, 1.0)
for keyB in zValues:
correlation_g[keyB] = {}
for keyA in zValues:
sum = 0
i = 0
for value in zValues[keyA]:
sum += value * zValues[keyB][i]
i += 1
correlation_g[keyB][keyA] = Clamp(sum / (count - 1), -1.0, 1.0) #internal - build correlation mat for heatmap
def GetMean(attribute, result):
sum = 0.0
i = 0
total = 0
for value in testing_g[attribute]:
total+=1
if result == testing_g[DEPENDENT][i]:
sum+=value
i+=1
return sum / total
def Evaluate(datapoint):
gauss ={}
results={}
for result in dependentValues:
for key in testing_g:
if key not in IGNORE:
mean = GetMean(key, dependentValues[result])
x = datapoint[key]
stdev = CalcDeviation(key, x)
exponent = math.exp(-0.5 * ((x - mean) / stdev)**2)
gauss[key] =(1 / (math.sqrt(2 * math.pi) * stdev)) * exponent
for key in gauss:
if dependentValues[result] not in results:
results[dependentValues[result]] = 1
value = gauss[key]
if value != 0:
results[dependentValues[result]] *= value
highest = 0
highestKey = 0
for key in results:
if results[key] > highest:
highest = results[key]
highestKey = key
return highestKey
def GetDepName(value):
for key in dependentValues:
if dependentValues[key] == value:
return key
def GetPortionOfData(percent, start):
portion = {}
start = int(start)
end = int(percent * datapointCount_g) + start
for key in data_g:
i = 0
portion[key] = []
for value in data_g[key]:
if i >= start:
portion[key].append(value)
i+=1
if i >= end:
break
return portion #split data into training, validation, testing
def BuildDataPoint(dataset, i):
test = {}
for key in dataset:
test[key] = dataset[key][i]
return test #build a datapoint[i]
def PrintAccuracy(isReal):
testingLen = len(testing_g[DEPENDENT])
if(not isReal):
for key in standard_g:
for i in range(0,len(testing_g)):
testing_g[key][i] = standard_g[key][i]
accuracy = 0.0
correct = 0
for i in range(0, testingLen):
datapoint = BuildDataPoint(testing_g, i)
# Built in library accuracy
evalValue = 0
if isReal == True:
list = []
for key in datapoint:
if key not in IGNORE:
list.append(datapoint[key])
evalValue = model.predict([list])[0]
else:
evalValue = Evaluate(datapoint)
correctValue = testing_g[DEPENDENT][i]
if evalValue == correctValue : correct+=1
accuracy = correct / testingLen
if(isReal) : print("Evaluating Built in library:")
else : print("Evaluating hand build functions:")
print("Correct:",correct," total:",testingLen)
print("Accuracy:",round(100*accuracy,2), "% on Testing set\n") #print accuracy data
return accuracy
### Load the data ###
print("Loading data...\n")
dataFile = open(DATA_LOCATION, "r")
rawData = dataFile.readlines()
for row in rawData:
tokList = row.split("\n")[0].split(",")
if labels_g is None:
labels_g = tokList
else:
i = 0
for label in labels_g:
if label not in data_g:
data_g[label] = []
range_g[label] = (None, None) #(Min, Max)
try:
asf = float(tokList[i])
data_g[label].append(asf)
range_g[label] = (Min(asf, range_g[label][0]), Max(asf, range_g[label][1]))
except:
if tokList[i] not in dependentValues:
dependentValues[tokList[i]] = len(dependentValues)
data_g[label].append(dependentValues[tokList[i]])
range_g[label] = (Min(dependentValues[tokList[i]], range_g[label][0]), Max(dependentValues[tokList[i]], range_g[label][1]))
i = i+1
### Check for missing values ###
for key in data_g:
length = len(data_g[key])
if datapointCount_g != 0 and datapointCount_g != length:
print("ERROR!\ns" + key + " has " + str(length) + ' values! Everyone else has ' + str(datapointCount_g))
raise Exception("Missing Value")
datapointCount_g = length
print("Loading file complete - no missing values")
### Standardize data ###
for key in data_g:
if key not in IGNORE:
standard_g[key] = []
for value in data_g[key]:
standard_g[key].append(Standardize(key, value))
print("All values standardized [0,1]")
### Plot Heatmaps ###
StandardDeviation()
if SKIP_PLOT_GENERATION == False:
print("Creating heatPlot...\n")
#correlation_g = BuildCorrelation()
plt.figure(figsize=(16,16))
df = pd.read_csv(DATA_LOCATION)
corr = df.corr()
heatmap = sns.heatmap(corr, vmin=-1, vmax=1, center=0, cmap="coolwarm")
figure = heatmap.get_figure()
figure.savefig("heat.png")
figure.clf()
else:
print("Plot generation omitted")
### split data ###
training_g = GetPortionOfData(TRAINING_PERCENT,0)
testing_g = GetPortionOfData(TRAINING_PERCENT,(TRAINING_PERCENT) * datapointCount_g)
### prior probabilities P(Y)###
for value in data_g[DEPENDENT]:
if value not in dependentProbability_g:
dependentProbability_g[value] = 0
dependentProbability_g[value]+=1
for key in dependentProbability_g:
dependentProbability_g[key]/=datapointCount_g
### Real Evaluation ###
model = GaussianNB()
features =[]
index = int(TRAINING_PERCENT * datapointCount_g)
for i in range(0,index ):
list = []
for key in data_g:
if key not in IGNORE:
list.append(data_g[key][i])
features.append(tuple(list))
label = data_g[DEPENDENT][:index]
model.fit(features,label)
PrintAccuracy(True)
PrintAccuracy(False)
### Evaluate ###
inp = None
while(inp != "n"):
print("Do you want to enter data to make a prediction? [Y]es No")
inp = input()
inp = inp.upper()
if inp == "N":
break
string = "\n"
for label in labels_g:
if label not in IGNORE:
string += label + " "
print(string)
print("enter a comma seperated continuous list of data")
toks = input().split("\n")[0].split(",")
if(len(toks) == len(labels_g)-len(IGNORE)):
datapoint = {}
i = 0
for label in labels_g:
if label not in IGNORE:
denom = range_g[label][1] - range_g[label][0]
try:
datapoint[label] = (float(toks[i]) - range_g[label][0]) / denom
except:
datapoint[label] = (ord(toks[i]) - range_g[label][0]) / denom
i+=1
print("Result: ", GetDepName(Evaluate(datapoint)))
else:
print("Mismatching data point, you are probably missing a column or a comma")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'modificacionDeStockHerramientas.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(842, 663)
self.he_input_buscar = QtWidgets.QLineEdit(Form)
self.he_input_buscar.setGeometry(QtCore.QRect(310, 80, 251, 31))
self.he_input_buscar.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";")
self.he_input_buscar.setText("")
self.he_input_buscar.setObjectName("he_input_buscar")
self.he_btn_buscar = QtWidgets.QPushButton(Form)
self.he_btn_buscar.setGeometry(QtCore.QRect(570, 80, 131, 31))
self.he_btn_buscar.setStyleSheet("background-color: rgb(199, 199, 199);\n"
"color:black;\n"
"font-size:10pt;\n"
"border:none;")
self.he_btn_buscar.setObjectName("he_btn_buscar")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(120, 80, 171, 31))
self.label.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(10, 10, 831, 41))
self.label_2.setStyleSheet("font-size:20px;\n"
"")
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.he_input_3 = QtWidgets.QLineEdit(Form)
self.he_input_3.setEnabled(False)
self.he_input_3.setGeometry(QtCore.QRect(330, 420, 251, 31))
self.he_input_3.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";\n"
"color: rgb(0, 0, 0);")
self.he_input_3.setText("")
self.he_input_3.setObjectName("he_input_3")
self.he_input_2 = QtWidgets.QLineEdit(Form)
self.he_input_2.setEnabled(False)
self.he_input_2.setGeometry(QtCore.QRect(330, 370, 251, 31))
self.he_input_2.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";\n"
"color: rgb(0, 0, 0);")
self.he_input_2.setText("")
self.he_input_2.setObjectName("he_input_2")
self.he_input_1 = QtWidgets.QLineEdit(Form)
self.he_input_1.setEnabled(False)
self.he_input_1.setGeometry(QtCore.QRect(330, 320, 251, 31))
self.he_input_1.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";\n"
"color: rgb(0, 0, 0);")
self.he_input_1.setObjectName("he_input_1")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(200, 320, 111, 31))
self.label_3.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label_3.setObjectName("label_3")
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(200, 420, 111, 31))
self.label_5.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label_5.setObjectName("label_5")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(200, 370, 121, 31))
self.label_4.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label_4.setObjectName("label_4")
self.he_btn_confirmar = QtWidgets.QPushButton(Form)
self.he_btn_confirmar.setGeometry(QtCore.QRect(270, 590, 131, 31))
self.he_btn_confirmar.setStyleSheet("background-color: rgb(99, 206, 104);\n"
"color:white;\n"
"font-size:10pt;\n"
"border:none;")
self.he_btn_confirmar.setObjectName("he_btn_confirmar")
self.he_btn_cancelar = QtWidgets.QPushButton(Form)
self.he_btn_cancelar.setGeometry(QtCore.QRect(430, 590, 131, 31))
self.he_btn_cancelar.setStyleSheet("color:white;\n"
"font-size:10pt;\n"
"border:none;\n"
"background-color:#ff4e4e;")
self.he_btn_cancelar.setObjectName("he_btn_cancelar")
self.label_6 = QtWidgets.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(200, 470, 111, 31))
self.label_6.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label_6.setObjectName("label_6")
self.he_input_4 = QtWidgets.QTextEdit(Form)
self.he_input_4.setEnabled(False)
self.he_input_4.setGeometry(QtCore.QRect(330, 470, 251, 31))
self.he_input_4.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";\n"
"color: rgb(0, 0, 0);")
self.he_input_4.setObjectName("he_input_4")
self.he_tabla = QtWidgets.QTreeWidget(Form)
self.he_tabla.setGeometry(QtCore.QRect(10, 120, 821, 181))
self.he_tabla.setObjectName("he_tabla")
font = QtGui.QFont()
font.setPointSize(12)
self.he_tabla.headerItem().setFont(0, font)
font = QtGui.QFont()
font.setPointSize(11)
self.he_tabla.headerItem().setFont(1, font)
font = QtGui.QFont()
font.setPointSize(11)
self.he_tabla.headerItem().setFont(2, font)
font = QtGui.QFont()
font.setPointSize(11)
self.he_tabla.headerItem().setFont(3, font)
font = QtGui.QFont()
font.setPointSize(11)
self.he_tabla.headerItem().setFont(4, font)
font = QtGui.QFont()
font.setPointSize(12)
self.he_tabla.headerItem().setFont(5, font)
font = QtGui.QFont()
font.setPointSize(11)
self.he_tabla.headerItem().setFont(6, font)
self.label_7 = QtWidgets.QLabel(Form)
self.label_7.setGeometry(QtCore.QRect(200, 520, 111, 31))
self.label_7.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label_7.setObjectName("label_7")
self.he_input_5 = QtWidgets.QLineEdit(Form)
self.he_input_5.setGeometry(QtCore.QRect(330, 520, 251, 31))
self.he_input_5.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";")
self.he_input_5.setText("")
self.he_input_5.setObjectName("he_input_5")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Modificación de Stock"))
self.he_btn_buscar.setText(_translate("Form", "Buscar"))
self.label.setText(_translate("Form", "Ingrese código del articulo"))
self.label_2.setText(_translate("Form", "Modificacion de Stock (Herramientas)"))
self.he_input_1.setText(_translate("Form", "123"))
self.label_3.setText(_translate("Form", "Cantidad actual"))
self.label_5.setText(_translate("Form", "Cantidad minima"))
self.label_4.setText(_translate("Form", "Cantidad maxima"))
self.he_btn_confirmar.setText(_translate("Form", "Confirmar"))
self.he_btn_cancelar.setText(_translate("Form", "Cancelar"))
self.label_6.setText(_translate("Form", "Descripción"))
self.he_tabla.headerItem().setText(0, _translate("Form", "Codigo"))
self.he_tabla.headerItem().setText(1, _translate("Form", "Descripcion"))
self.he_tabla.headerItem().setText(2, _translate("Form", "Cantidad de Stock disponible"))
self.he_tabla.headerItem().setText(3, _translate("Form", "Estado"))
self.he_tabla.headerItem().setText(4, _translate("Form", "Stock utilizado en los ultimos 30 dias"))
self.he_tabla.headerItem().setText(5, _translate("Form", "Stock Minimo permitido"))
self.he_tabla.headerItem().setText(6, _translate("Form", "Stock Maximo permitido"))
self.label_7.setText(_translate("Form", "Ingreso"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
from instapy_cli import client
from postObject import post
import json
class agent:
def __init__(self, username: str, password: str):
self.username = username
self.password = password
def upload_to_instagram(self, post):
with client(self.username, self.password) as cli:
cli.upload(post.path, post.caption)
if __name__ == "__main__":
with open('login.config') as json_data:
data = json.load(json_data)
username = data['username']
password = data['password']
print(username, password)
instpost = post('pics/AAGxnz2x.jpg', 'test upload', True)
print('post:', instpost)
uploader = agent(username, password)
print('attempt upload')
uploader.upload_to_instagram(instpost)
print('finished upload')
|
from Reader import reader, make_arrays
from visualization import plot_graphics
from scr.algorithms import correlation_function, normalization
if __name__ == '__main__':
data = reader("../data/21022518.txt")
arr1, arr2, arr3, arr4, arr5, arr6, arr7, arr8, arr9, arr10, arr11, arr12 = make_arrays(data)
plot_graphics(arr9, arr12, 'plasma_pos', 't, мс', 'plasma_pos')
plot_graphics(arr9, arr8, 'neutron_glob14', 't, мс', 'neutron_glob14')
plot_graphics(arr9, arr10, 'neutron_glob12', 't, мс', 'neutron_glob12')
tau, corr = correlation_function(arr8, arr12)
plot_graphics(tau, corr, 'corr_func', 'tau', 'corr_14')
corr = normalization(corr)
plot_graphics(tau, corr, 'norm_corr_func', 'tau', 'n_corr_14')
tau, corr = correlation_function(arr10, arr12)
plot_graphics(tau, corr, 'corr_func', 'tau', 'corr_12')
corr = normalization(corr)
plot_graphics(tau, corr, 'norm_corr_func', 'tau', 'n_corr_12')
tau, corr = correlation_function(arr8, arr10)
|
# Generated by Django 2.0.3 on 2018-03-28 12:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0012_auto_20180328_1523'),
]
operations = [
migrations.AlterField(
model_name='project',
name='status',
field=models.IntegerField(choices=[(1, 'Unknown'), (2, 'Mobilization'), (3, 'Active'), (4, 'Rectification'), (5, 'Suspended'), (6, 'Terminated'), (7, 'Closed')], default=1, verbose_name='Project Status'),
),
]
|
#MAIN PROGRAM FILE
import numpy as np
from itertools import combinations
#finds the intersection between two lists
def intersectscore(i,j):
return len(set(i).intersection(j))
def v_combiner(pd,id,combinations_of_pairs):
array_of_intersect_score=[]
no_of_pic=id["N"]
pic_array=a=[i for i in range(no_of_pic)]
for i in combinations_of_pairs:
array_of_intersect_score.append(intersectscore(pd[str(i[0])],pd[str(i[1])]))
return sum(array_of_intersect_score)
def choose_best_v(arr):
combination_array=[]
for j in arr:
combination_array.append(v_combiner(pd,id,j))
return combination_array.index(max(combination_array))
def interest_factor_func(index_1, index_2):
""" CREATES INTEREST FACTOR FROM THE INDEXES (WHICH MAY BE A TUPLE) OF THE TWO SLIDE CANDIDATES """
# find tags_1 and tags_2
if type(index_1) == int:
tags_1 = inputData[str(index_1)][2:]
else:
tags_1 = inputData[str(index_1[0])][2:] + inputData[str(index_1[1])][2:]
if type(index_2) == int:
tags_2 = inputData[str(index_2)][2:]
else:
tags_2 = inputData[str(index_2[0])][2:] + inputData[str(index_2[1])][2:]
interest_factor_1 = set(tags_1 + tags_2) # Common tags
intersect = tags_1 + tags_2
for tag in interest_factor_1:
intersect.remove(tag)
interest_factor_2 = tags_1
interest_factor_3 = tags_2
for tag in intersect:
interest_factor_2.remove(tag)
interest_factor_3.remove(tag)
return min([len(interest_factor_1), len(interest_factor_2), len(interest_factor_3)])
def find_common_tags(index, INDEXES): # INDEXES represents every available slide that can occur =>> a tuple represents a pair of pictures for a vertical picture
""" TAKES LIST OF INDEXES OF PHOTOS (INC TUPLES REPRESENTING VERTICAL SLIDES)
AND THE CURRENT TAGS AND RETURNS A LIST OF INDEXES WHICH HAVE A TAG IN COMMON """
global inputData
INDEXES_with_common_tags = []
# tags_current is a list of the tags of the current slide
if type(index) == int:
tags_current = inputData[str(index)][2:]
else:
tags_current = inputData[str(index[0])][2:] + inputData[str(index[1])][2:]
for tag in tags_current: # for each tag of the current slide
for index in INDEXES: # and for each index
if type(index) == int: # if index is int type
if tag in inputData[str(index)][2:]: # test if tag in this indexed photo
INDEXES_with_common_tags.append(index)
else: # if index is tuple type
for sub_index in index: # for each index in the double-photo slide
if tag in inputData[sub_index][2:]: # test if tag in this indexed photo
INDEXES_with_common_tags.append(index)
return INDEXES_with_common_tags
def produce_referencable_edge_weights_array():
""" FROM inputData IT PRODUCES A LARGE ARRAY OF THE WEIGHTS OF THE EDGES BETWEEN THE POSSIBLE SLIDES IT CAN GO FROM AND TO! """
global inputData
N = np.zeros([len(inputData), len(inputData)])
for f, index_from in enumerate(inputData):
for t, index_to in enumerate(inputData):
N[f, t] = interest_factor_func(index_from, index_to)
return N
def slide_combiner_3(slide_id_list):
# a list of slide ids for output
output = [0]
# start with slide 0
i = 0
# Iterate through each slide from i=0 and find the most compatible subsequent slide.
while(len(slide_id_list) > 1):
max_score = 0
max_score_id = None
# find compatible slides
compatible_slides = find_common_tags(i, slide_id_list)
# remove compatible slides that are already in output
compatible_slides = [x for x in compatible_slides if x not in output]
# Iterate through each compatible slide to find the best one
for j in range(len(compatible_slides)):
#if compatible_slides[j] in output:
# pass
#else:
score = interest_factor_func(i, compatible_slides[j])
# Update max_score_id for the best slide
if score > max_score:
max_score = score
max_score_id = compatible_slides[j]
output.append(max_score_id)
try:
slide_id_list.remove(max_score_id)
except:
pass
i = max_score_id
return output
def output_file(output):
with open("output.txt", 'w') as fileout:
fileout.write("{}\n".format(len(output)))
for i in output:
if type(i) == list:
fileout.write("{} {}\n".format(i[0], i[1]))
else:
fileout.write("{}\n".format(i))
fileout.close()
#declare empty dictionary to hold input data
inputData = {}
#open the input file for reading
filename = "C:\\Users\\Theodore\\Documents\\Coding Projects\\Google-Hash-Code-2019\\theo\\a_example.txt"
inputFile = open(filename,"r")
inputData["N"] = int(inputFile.readline())
#get input data for each pictures
for i in range(inputData["N"]):
picData = inputFile.readline().rstrip('\n')
picDataList = picData.split(" ")
inputData[str(i)]=picDataList
#create empty arrays to hold id's of horizontally and vertically oriented pictures
picturesH = []
picturesV = []
for pictureNo in range(inputData["N"]):
if inputData[str(pictureNo)][0] == 'H':
picturesH.append(pictureNo)
else:
picturesV.append(pictureNo)
#create new dictionary to hold list of tags
pictureTags={}
for pictureNo in range(inputData["N"]):
pictureTags[str(pictureNo)] = inputData[str(pictureNo)][2:]
#kisekis function
# Get all combinations of [1, 2, 3]
# and length 2
#combinations_of_pairs = list(combinations(pic_array, 2))
slider_input_list = picturesH
outputIDs = slide_combiner_3(picturesH)
output_file(outputIDs)
|
from django.core.management.base import BaseCommand
from django.contrib.gis.geos import Polygon
from django.db import transaction
from api.models import Region
class Command(BaseCommand):
help = "update regions with bbox. To run, python manage.py update-region-bbox"
@transaction.atomic
def handle(self, *args, **options):
regions = [
{
'id': 0,
'bbox': (
-32.99047851563364,
-47.08120743260383,
67.90795898435852,
41.72638107989897
)
},
{
'id': 1,
'bbox': (
-128.9670410156471,
-57.359018263521115,
-5.217041015634948,
67.04395625933893
)
},
{
'id': 2,
'bbox': (
40.44219970775032,
-60.71287461544427,
201.6028442389869,
54.2178334601123
)
},
{
'id': 3,
'bbox': (
-27.66226811933268,
16.635693975568614,
49.79104043156599,
63.5750514523651
)
},
{
'id': '4',
'bbox': (
-29.826416015646174,
10.082644243860557,
72.20141321303817,
52.44608914954304
)
}
]
results = Region.objects.all()
for region in results:
bbox = Polygon.from_bbox(regions[region.name.value]['bbox'])
region.bbox = bbox
region.save() |
import os, sys, time
from psychopy import visual, core, data, logging
from .task_base import Task
from ..shared import config
STIMULI_DURATION = 4
BASELINE_BEGIN = 5
BASELINE_END = 5
ISI = 1
IMAGES_FOLDER = "/home/basile/data/projects/task_stimuli/BOLD5000_Stimuli/Scene_Stimuli/Presented_Stimuli/ImageNet"
STIMULI_SIZE = (400, 400)
quadrant_id_to_pos = [(-200, 100), (200, 100), (-200, -100), (200, -100)]
class ImagePosition(Task):
DEFAULT_INSTRUCTION = """You will be presented a set of items in different quadrant of the screen.
Try to remember the items and their location on the screen."""
def __init__(self, items_list, *args, **kwargs):
super().__init__(**kwargs)
# TODO: image lists as params, subjects ....
self.item_list = data.importConditions(items_list)
def _instructions(self, exp_win, ctl_win):
screen_text = visual.TextStim(
exp_win,
text=self.instruction,
alignText="center",
color="white",
wrapWidth=config.WRAP_WIDTH,
)
for frameN in range(config.FRAME_RATE * config.INSTRUCTION_DURATION):
screen_text.draw(exp_win)
if ctl_win:
screen_text.draw(ctl_win)
yield ()
def _run(self, exp_win, ctl_win):
trials = data.TrialHandler(self.item_list, 1, method="sequential")
img = visual.ImageStim(exp_win, size=STIMULI_SIZE, units="pix")
exp_win.logOnFlip(
level=logging.EXP, msg="memory: task starting at %f" % time.time()
)
for frameN in range(config.FRAME_RATE * BASELINE_BEGIN):
yield ()
for trial in trials:
image_path = trial["image_path"]
img.image = image_path
img.pos = quadrant_id_to_pos[trial["quadrant"]]
exp_win.logOnFlip(
level=logging.EXP,
msg="memory: display %s in quadrant %d"
% (image_path, trial["quadrant"]),
)
for frameN in range(config.FRAME_RATE * STIMULI_DURATION):
img.draw(exp_win)
if ctl_win:
img.draw(ctl_win)
yield ()
exp_win.logOnFlip(level=logging.EXP, msg="memory: rest")
for frameN in range(config.FRAME_RATE * ISI):
yield ()
for frameN in range(config.FRAME_RATE * BASELINE_END):
yield ()
|
str = "www.tutorialspoint.com"
print ("Min character: " + min(str))
str = "TUTORIALSPOINT"
print ("Min character: " + min(str))
|
import gym
import DQN_model
#from tensegrityEnvironment import *
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.deepq.policies import LnMlpPolicy, MlpPolicy
from stable_baselines import DQN
# Instantiate and wrap the env
#env = DummyVecEnv([lambda: tensegrityEnvironment])
env = gym.make('tensegrity-v0')
env = DummyVecEnv([lambda: env])
# Define and Train the agent
model = DQN('LnMlpPolicy', env, verbose=1) # , prioritized_replay=True
model.learn(total_timesteps=100000) # 25000
model.save("agentTraj_2")
del model # remove to demonstrate saving and loading
model = DQN.load("agentTraj_2")
obs = env.reset()
while True:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render() |
"""
Core functions to perform clustering
"""
#######################################################################
## Imports
#######################################################################
import numpy as np
import numexpr as ne
import datetime
#######################################################################
## Learning functions
#######################################################################
def NaiveBayesClustering(X, k, Eps=0.01, verbose=False, use_parallel=False):
"""
Performs clustering of the dataset
@param X: the dataset
@param k: the number of clusters
@param Eps: The stopping criterion for EM
@param verbose: wether to show or not the error
"""
#First calculate the parameters of the model
M,omega = ASVTD(X, k)
#Use the plugs the parameters into EM
if not use_parallel:
M, omega, assignment = EM(X,M,omega, Eps, verbose)
else:
M, omega, assignment = EM_parallel(X,M,omega, Eps, verbose)
#From EM obtains the clustering
CL = np.argmax(assignment, 1)
return M, omega, CL
def ASVTD(X, k):
"""
Learn an approximate pair M, omega
@param X: the dataset
@param k: the number of clusters
"""
N, n = X.shape
E = np.sum(X, 0) / N
u,s,v = np.linalg.svd(np.transpose(X).dot(X) / N)
u = u[:,:k].dot((np.diag(np.sqrt(s[:k]))))
pu = np.linalg.pinv(u)
Z = pu.dot(X.T)
HMin = 0
H = []
M = np.zeros([n, k])
for i in range(0, n):
Y = X[:, i].reshape((N, 1))
H.append((Z*Y.T).dot(Z.T)/N)
h, s, v = np.linalg.svd(H[i])
if np.min(-np.diff(s)) > HMin:
HMin = np.min(-np.diff(s))
O = h
for i in range(0, n):
s = np.diag(np.transpose(O).dot(H[i]).dot(O))
M[i, :] = s
x = np.linalg.lstsq(M, E)
omega = x[0] ** 2
omega = omega / sum(omega)
return M, omega
def numexpr_app(X, a, b):
XT = X.T
return ne.evaluate('log(XT * b + a)').sum(0)
def numexpr_app2(X, mu):
if len(mu)>0:
log2 = ne.evaluate('log(1-mu)')
log1 = ne.evaluate('log(mu) - log2')
else:
log2 = np.log(1-mu)
log1 = np.log(mu) - log2
return (X.T * log1).sum(0) + (log2).sum(0)
def EM(X, M, omega, Eps=0.001, verbose=False):
"""
Implementation of EM to learn a NBM with binary variables
@param X: the dataset
@param M: the centers of the mixture
@param omega: the mixing weights
@param Eps: the stopping criterion
@param verbose: wether to show or not the error
"""
n,k = M.shape
N,n = X.shape
it = 1
wM = M.copy()
womega = omega.copy()
womega[womega<0] = 0.000001
womega = womega/womega.sum()
omega_old = womega.copy()+1
while np.sum(np.abs(womega-omega_old)) > Eps:
assignments = np.zeros((N, k))
for i in range(k):
mu = wM[:,i].reshape(n, 1)
mu[mu <= 0.00000001] = 0.00000001
mu[mu > 1] = 0.99999
a = 1 - mu
b = (2 * mu - 1)
assignments[:, i] = numexpr_app(X, a, b)+ np.log(womega[i])
assignments -= np.max(assignments, 1).reshape(len(assignments), 1)
assignments = np.exp(assignments)
assignments /= np.sum(assignments,1).reshape(N,1)
omega_old = womega.copy()
womega = np.sum(assignments,0)/np.sum(assignments)
if verbose:
print(np.sum(np.abs(womega-omega_old)))
print(womega)
wM = X.T.dot(assignments)/np.sum(assignments,0)
it+=1
return wM,womega,assignments
def EM_parallel(X, M, omega, Eps=0.001, verbose=False):
"""
Implementation of EM to learn a NBM with binary variables
@param X: the dataset
@param M: the centers of the mixture
@param omega: the mixing weights
@param Eps: the stopping criterion
@param verbose: wether to show or not the error
"""
n, k = M.shape
N, n = X.shape
it = 1
wM = M.copy()
womega = omega.copy()
womega[womega < 0] = 0.000001
womega = womega / womega.sum()
omega_old = womega.copy() + 1
XT = X.T.reshape((n, N, 1))
while np.sum(np.abs(womega - omega_old)) > Eps:
wM[wM <= 0.00001] = 0.00001
wM[wM >= 0.99999] = 0.99999
log2 = ne.evaluate('log(1 - wM)').reshape((n, 1, k))
log1 = ne.evaluate('log(wM)').reshape((n, 1, k)) - log2
assignments = (XT * log1).sum(0) + (log2).sum(0)
assignments -= np.max(assignments, 1).reshape(len(assignments), 1)
assignments = np.exp(assignments)
assignments /= np.sum(assignments, 1).reshape(N, 1)
omega_old = womega.copy()
womega = np.sum(assignments, 0) / np.sum(assignments)
if verbose:
print(np.sum(np.abs(womega - omega_old)))
print(womega)
wM = X.T.dot(assignments) / np.sum(assignments, 0)
it += 1
return wM, womega, assignments
def EM_process_cluster(X, i, wM, womega):
n, d = X.shape
mu = wM[:, i].reshape(d, 1)
mu[mu <= 0.00000001] = 0.00000001
mu[mu > 1] = 0.99999
a = 1 - mu
b = (2 * mu - 1)
return (i,numexpr_app(X, a, b) + np.log(womega[i]))
|
import requests,openpyxl
# 发送请求
def api_func(url_api,data_api):
header = {'X-Lemonban-Media-Type':'lemonban.v2','Content-Type':'application/json'}
response=requests.post(url=url_api,json=data_api,headers=header)
result=response.json()
return result
# 写入数据
def write_result(filename,sheetname,final_result,row,column):
wb = openpyxl.load_workbook(filename)
sheet = wb[sheetname]
sheet.cell(row=row,column=column).value = final_result
wb.save(filename)
# 读取表格
def read_data(filename,sheetname):
wb = openpyxl.load_workbook(filename)
sheet = wb[sheetname]
# 获取最大行数
max_row = sheet.max_row
cases = []
for i in range(2,max_row+1):
case = dict(
case_id = sheet.cell(row=i,column=1).value,
url = sheet.cell(row=i,column=5).value,
data = sheet.cell(row=i,column=6).value,
expect_result = sheet.cell(row=i,column=7).value)
cases.append(case)
return cases
def excute_func(filename,sheetname):
cases=read_data(filename,sheetname) # 赋值
for case in cases: # for循环取出每一条用例
case_id = case.get('case_id') # 获取id,url,data,expect
url = case.get('url')
data = case['data']
data1 = eval(data)
expect = case.get('expect_result')
expect1 = eval(expect)
expect_msg = expect1.get('msg')
real_result = api_func(url_api=url,data_api=data1) # 用获取的数据发送接口请求
real_msg = real_result.get('msg') # 获取实际的响应信息
print("期望结果是:{}".format(expect_msg))
print("实际结果是:{}".format(real_msg))
if real_msg == expect_msg:
print("第{}条测试用例通过".format(case_id)) # print("这条测试用例执行通过")
final_result = "Passed"
else:
print("第{}条测试用例不通过".format(case_id))
final_result = "Failed"
print("*"*20)
write_result(filename,sheetname,final_result,case_id+1,8)
excute_func('test_case_api.xlsx','login')
|
# Example of python script to use on mongo-converter
def parser_field(field, row=None, configuration=None,
mongo_column=None,
oracleConnection=None,
mongoClient=None,
context=None,
operator=None):
print("Handle field", field, ' of column', mongo_column)
# skip operator
# if op:
# op.skip_column = True
ans = '%s changed' % field
# Store context field for retrieve it on
# next raw
context['my_reusable_obj'] = 'XXXX'
# Return value to store
return ans
# vim: ts=4 sw=4 expandtab |
from django.contrib import admin
from .models import CarouselImage, Product
# Register your models here.
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ('name', 'manufacturer', 'sku',
'tag_list', 'has_sizes', 'price', 'rating')
search_fields = ('name', 'manufacturer')
ordering = ('name', 'manufacturer')
# Show the product tags
def get_queryset(self, request):
return super().get_queryset(request).prefetch_related('tags')
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
@admin.register(CarouselImage)
class CarouselImageAdmin(admin.ModelAdmin):
list_display = ('product_sku', 'image_url')
def product_sku(self, obj):
return obj.product.sku
|
from django.test import TestCase
from django.apps import apps
from app.users.apps import UsersConfig
from django.contrib.auth.models import User
class TaskTest(TestCase):
# def setUp(self):
# admin = User.objects.create_user("admin")
# Task.objects.create(title="Task 1", description="Example task", creator=admin)
def test_apps(self):
self.assertEqual(UsersConfig.name, "users")
self.assertEqual(apps.get_app_config("users").name, "app.users")
|
"""Utility functions.
:class EntryList: Various CLI tools relevant to displaying valid query entries.
"""
from data.dataset import valid_entries
class EntryList:
"""Command-line tools that assist with identifying valid entries.
These tools do things like:
- List all valid entries in sorted order, by different sorting key.
- Allow the user to index into available 'query-able' entries without
having to explicitly type them out, therefore enabling faster query
construction.
Full usability is described in the documentation.
"""
__slots__ = ['keys', 'key_list']
def __init__(self):
self.keys = valid_entries.keys()
self.key_list = [key for key in self.keys]
def sort(self, sort_key):
"""Sort valid entries.
:param sort_key: Specifies how to sort entries.
:type sort_key: str
:return: Sorted entries.
:rtype: dict
All entries follow this syntax:
'#year #make #model'
To sort entries by make, split is called on each entry via lambda
function, resulting in:
s = ['#year', '#make', '#model]
Therefore, a slice on 's' removing index 0 is the sort key.
The returned dict 'result' passes the sorted entries as values to
keys in range(0, len(entries)). This allows the CLI entry indexing
utility described in the class docstring.
"""
result = {}
if sort_key == 'year':
self.key_list.sort()
elif sort_key == 'make':
self.key_list.sort(key=lambda s: s.split()[1:])
for index, entry_key in enumerate(self.key_list):
result[index] = entry_key
return result
|
from collections import defaultdict
with open ('/Users/anthonynguyen/Desktop/Advent-Of-Code-2019/Day 6 - Universal Orbit Map/Orbits.txt') as file:
#with open ('/Users/anthonynguyen/Desktop/Advent-Of-Code-2019/Day 6 - Universal Orbit Map/Test2.txt') as file:
Orbits = file.read().splitlines()
def build_tuple_list(orbits_list):
Orbits =[]
#create list of tuples of each paired orbits
for orbit_pair in orbits_list:
Orbits.append(tuple(orbit_pair.split(')')))
return Orbits
orbits_tuple_list = build_tuple_list(Orbits)
def build_tree(orbit_list):
#implements default value of list
tree = defaultdict(list)
for pair in orbit_list:
center, orbiter = pair
tree[center].append(orbiter)
return tree
orbit_tree = build_tree(orbits_tuple_list)
def find_all_path(tree, start_node, path=[], total_path=[]):
path.append(start_node)
if len(tree[start_node]) == 0:
total_path.append(str(path))
path.pop()
else:
for child in tree[start_node]:
find_all_path(tree,child,path,total_path)
## removes items from paths list as recursion is finished, so start at child
path.pop()
return total_path
root_path = []
total_path = []
paths_from_root = find_all_path(orbit_tree,'COM', root_path,total_path)
lists_of_path = [eval(path) for path in paths_from_root]
def path_finder(paths, endpoint):
for path in lists_of_path:
if path[-1] == endpoint:
return path
path_to_SAN = path_finder(lists_of_path, 'SAN')
path_to_YOU = path_finder(lists_of_path, 'YOU')
path_diffs = [[node for node in path_to_SAN if node not in path_to_YOU], [node for node in path_to_YOU if node not in path_to_SAN]]
nodes_list = [node for sub_path_diff in path_diff for node in sub_path_diff]
orbital_transfers = len(nodes_list) - 2
print(orbital_transfers)
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Thu May 16 11:41:49 CEST 2013
"""Measures for calibration"""
import math
import numpy
def cllr(negatives, positives):
"""Cost of log likelihood ratio as defined by the Bosaris toolkit
Computes the 'cost of log likelihood ratio' (:math:`C_{llr}`) measure as
given in the Bosaris toolkit
Parameters:
negatives (array): 1D float array that contains the scores of the
"negative" (noise, non-class) samples of your classifier.
positives (array): 1D float array that contains the scores of the
"positive" (signal, class) samples of your classifier.
Returns:
float: The computed :math:`C_{llr}` value.
"""
sum_pos, sum_neg = 0.0, 0.0
for pos in positives:
sum_pos += math.log(1.0 + math.exp(-pos), 2.0)
for neg in negatives:
sum_neg += math.log(1.0 + math.exp(neg), 2.0)
return (sum_pos / len(positives) + sum_neg / len(negatives)) / 2.0
def min_cllr(negatives, positives):
"""Minimum cost of log likelihood ratio as defined by the Bosaris toolkit
Computes the 'minimum cost of log likelihood ratio' (:math:`C_{llr}^{min}`)
measure as given in the bosaris toolkit
Parameters:
negatives (array): 1D float array that contains the scores of the
"negative" (noise, non-class) samples of your classifier.
positives (array): 1D float array that contains the scores of the
"positive" (signal, class) samples of your classifier.
Returns:
float: The computed :math:`C_{llr}^{min}` value.
"""
# first, sort both scores
neg = sorted(negatives)
pos = sorted(positives)
N = len(neg)
P = len(pos)
II = N + P
# now, iterate through both score sets and add a 0 for negative and 1 for
# positive scores
n, p = 0, 0
ideal = numpy.zeros(II)
neg_indices = [0] * N
pos_indices = [0] * P
for i in range(II):
if p < P and (n == N or neg[n] > pos[p]):
pos_indices[p] = i
p += 1
ideal[i] = 1
else:
neg_indices[n] = i
n += 1
# compute the pool adjacent violaters method on the ideal LLR scores
ghat = numpy.ndarray(ideal.shape, dtype=numpy.float)
raise NotImplementedError("No pavx implementation")
pavx(ideal, ghat) # noqa: F821
# disable runtime warnings for a short time since log(0) will raise a warning
old_warn_setup = numpy.seterr(divide="ignore")
# ... compute logs
posterior_log_odds = numpy.log(ghat) - numpy.log(1.0 - ghat)
log_prior_odds = math.log(float(P) / float(N))
# ... activate old warnings
numpy.seterr(**old_warn_setup)
llrs = posterior_log_odds - log_prior_odds
# some weired addition
# for i in range(II):
# llrs[i] += float(i)*1e-6/float(II)
# unmix positive and negative scores
new_neg = numpy.zeros(N)
for n in range(N):
new_neg[n] = llrs[neg_indices[n]]
new_pos = numpy.zeros(P)
for p in range(P):
new_pos[p] = llrs[pos_indices[p]]
# compute cllr of these new 'optimal' LLR scores
return cllr(new_neg, new_pos)
|
import sqlite3
import csv
def accountExists(username, password):
DB_FILE= "accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT userID, username FROM USERNAMES WHERE username = \"{}\" AND password = \"{}\";".format(username, password)
c.execute(command)
q = c.fetchall()
db.commit() #save changes
db.close() #close database
if len(q) == 0:
return -1 #return -1 if it doesn't exist
else:
return q[0][0] #return ID of user if exists
def userExists(username):
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT userID, username FROM USERNAMES WHERE username = \"{}\";".format(username)
c.execute(command)
q = c.fetchall()
db.commit() #save changes
db.close() #close database
if len(q) == 0:
return -1 #return -1 if it doesn't exist
else:
return q[0][0] #return ID of user if exists
def addUser(username, password):
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT userID, username FROM USERNAMES WHERE username = \"{}\";".format(username)
c.execute(command)
new = c.fetchall()
if len(new) == 0:
command = "SELECT userID FROM USERNAMES;"
c.execute(command)
q = c.fetchall()
command = "INSERT INTO USERNAMES VALUES({}, \"{}\", \"{}\");".format(q[len(q)-1][0]+1,username,password)
c.execute(command)
db.commit() #save changes
db.close() #close database
return True
else:
db.commit() #save changes
db.close() #close database
return False
def addStory(title, userID, text):
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT storyID, title FROM STORIES WHERE title = \"{}\";".format(title)
c.execute(command)
new = c.fetchall()
if len(new) == 0:
command = "SELECT storyID FROM STORIES;"
c.execute(command)
q = c.fetchall()
command = "INSERT INTO STORIES VALUES({}, \"{}\");".format(q[len(q)-1][0]+1,title)
c.execute(command)
command = "INSERT INTO STORYEDITS VALUES({}, \"{}\",\"{}\");".format(q[len(q)-1][0]+1,userID,text)
c.execute(command)
db.commit() #save changes
db.close() #close database
return True
else:
db.commit() #save changes
db.close() #close database
return False
def canAdd(userID, storyID):
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
c.execute('SELECT * FROM STORYEDITS WHERE userID = ? AND storyID = ?', (userID, storyID))
boss = c.fetchone()
db.commit() #save changes
db.close() #close database
return boss == None
def addToStory(storyID, userID, text):
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT storyID, storyID FROM STORIES WHERE storyID = \"{}\";".format(storyID)
c.execute(command)
new = c.fetchall()
if (canAdd(userID, storyID) == True):
if len(new) == 1:
command = "SELECT storyID FROM STORYEDITS;"
c.execute(command)
q = c.fetchall()
command = "INSERT INTO STORYEDITS VALUES(\"{}\",\"{}\",\"{}\");".format(storyID,userID,text)
c.execute(command)
db.commit() #save changes
db.close() #close database
return True
else:
db.commit() #save changes
db.close() #close database
return False
def getStory(title):
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = ""
if len(title) > 0:
command = "SELECT * FROM STORIES WHERE title = \"{}\";".format(title)
else:
command = "SELECT * FROM STORIES;"
c.execute(command)
new = c.fetchall()
db.commit() #save changes
db.close() #close database
return new
def getUserID(username):
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT * FROM USERNAMES;"
c.execute(command)
new = c.fetchall()
for row in new:
if row[1] == username:
db.commit() #save changes
db.close() #close database
return row[0]
db.commit() #save changes
db.close() #close database
def almagate(): #the list it returns should be in order
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT * FROM STORYEDITS;"
c.execute(command)
new = c.fetchall()
command = "SELECT storyID FROM STORIES;"
c.execute(command)
storyIDs = c.fetchall()
l = []
oldtext = ""
for storyID in storyIDs:
if (True == True):
for row in new:
if storyID[0] == row[0]:
oldtext += row[2] + " "
l.append(oldtext)
oldtext = ""
db.commit() #save changes
db.close() #close database
return l
def recent(): #the list it returns should be in order
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT * FROM STORYEDITS;"
c.execute(command)
new = c.fetchall()
command = "SELECT storyID FROM STORIES;"
c.execute(command)
storyIDs = c.fetchall()
l = []
oldtext = ""
for storyID in storyIDs:
for row in new:
if storyID[0] == row[0]:
oldtext = row[2]
if (True == True):
l.append(oldtext)
oldtext = ""
db.commit() #save changes
db.close() #close database
return l
def getStory1():
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT * FROM STORIES;"
c.execute(command)
new = c.fetchall()
db.commit() #save changes
db.close() #close database
return new
def getUSERNAMES1():
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT * FROM USERNAMES"
c.execute(command)
new = c.fetchall()
db.commit() #save changes
db.close() #close database
return new
def getSTORYEDITS1():
DB_FILE="accounts.db"
db = sqlite3.connect(DB_FILE)
c = db.cursor()
command = "SELECT * FROM STORYEDITS;"
c.execute(command)
new = c.fetchall()
db.commit() #save changes
db.close() #close database
return new
|
#PF-Assgn-36
def create_largest_number(number_list):
n = str(number_list[0]) + str(number_list[1]) + str(number_list[2])
n1 = str(number_list[1]) + str(number_list[2]) + str(number_list[0])
n2 = str(number_list[2]) + str(number_list[0]) + str(number_list[1])
n3 = str(number_list[0]) + str(number_list[2]) + str(number_list[1])
n4 = str(number_list[1]) + str(number_list[0]) + str(number_list[2])
n5 = str(number_list[2]) + str(number_list[1]) + str(number_list[0])
if
return n
number_list=[23,45,67]
largest_number=create_largest_number(number_list)
print(largest_number) |
#!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
import re
import os
"""Gets text of speeches from presidential elections"""
app_url = "http://www.presidency.ucsb.edu/"
def get_available_elections():
"""Gets dict like {election year: election url} for all available elections"""
docs_url = app_url + "index_docs.php"
docs_page = requests.get(docs_url)
docs_content = BeautifulSoup(docs_page.text, 'html.parser')
elections_title = docs_content.find('span', class_='doctitle',
string="Documents Related to Presidential Elections")
elections_available_list = elections_title.parent.ul.select('li')
election_links = {election.text.replace(' Election',''):
app_url + election.a.get('href')
for election in elections_available_list}
return election_links
def get_candidate_speech_links(election_url):
"""
Gets dict like {candidate name: speech link} for all candidates in a
given election
"""
election_page = requests.get(election_url)
election_content = BeautifulSoup(election_page.text, 'html.parser')
# Get list of span tags with candidate names
# TODO: find a better selector for the td, to avoid .parent.parent later
candidate_names = election_content.select('td.doctext p span.roman')
# Helper function to find tags with text including "campaign speeches"
def find_speech_links(string):
return string and 'campaign speeches' in string.lower()
# Construct dict with keys of candidate names and vals of url
candidate_links = {candidate.text: app_url + candidate.parent.parent.find(
'a', string=find_speech_links).get('href')
for candidate in candidate_names}
return candidate_links
def save_candidate_speeches(candidate_name, candidate_url, election_year):
"""
Given a url to a page with a list of links to speech transcripts, saves
all such transcripts to a folder structure organized by election year and
candidate. Also saves a single file with all speech text for that candidate.
"""
candidate_page = requests.get(candidate_url)
candidate_content = BeautifulSoup(candidate_page.text, 'html.parser')
speech_links = [app_url + (link.get('href')[3:]) for link in
candidate_content.select('td.listdate a')]
# make sure we have a directory to which to save transcripts
candidate_path = os.path.join(os.getcwd(), 'speeches', election_year, candidate_name)
if not os.path.isdir(candidate_path):
os.makedirs(candidate_path)
# save transcripts and combined file
for link in speech_links:
speech_id = re.search('\?pid=(?P<pid>[0-9]+)$', link).group('pid')
# Only re-download (and more importantly, append to all_speeches) if
# this has not already been done for this file. Assumes files don't
# change over time (better transcriptions, etc.).
if not os.path.isfile(os.path.join(candidate_path, speech_id + '.txt')):
speech_page = requests.get(link)
speech_content = BeautifulSoup(speech_page.text, 'html.parser')
speech_title = speech_content.select_one('span.paperstitle').text
speech_date = speech_content.select_one('span.docdate').text
speech = speech_content.select_one('span.displaytext').text
with open(os.path.join(candidate_path, speech_id + '.txt'), 'w') as f:
f.write(os.linesep.join([speech_title, speech_date, speech]))
# unclear given the size of these writes whether keeping all speech text
# in a giant string and writing to all_speeches.txt once, or
# continually opening and appending to a running file is better. The
# latter is slower, but safer if errors occur during this loop.
with open(os.path.join(candidate_path, 'all_speeches.txt'), 'a') as fall:
fall.write(speech + os.linesep)
def main(args):
elections = get_available_elections()
election_year = str(args.year)
if election_year not in elections:
year_options = "Your options are: {}".format(
", ".join(sorted(elections.keys())))
raise ValueError("No speeches for this year. "+year_options)
candidates = get_candidate_speech_links(elections[election_year])
candidate_name = None
candidate_url = None
search_string = args.candidate.strip()
while not search_string:
# prompt for candidate based on available ones
prompt = "Pick a candidate from {}: ".format(
election_year, ", ".join(candidates.keys()))
search_string = input(prompt)
while not candidate_name:
# try to match search string
search_matches = [candidate for candidate in candidates
if search_string.lower() in candidate.lower()]
if not search_matches:
prompt = ("Sorry, no matches to your candidate search string. "
"It must be one of {}: ".format(", ".join(candidates.keys())))
search_string = input(prompt)
elif len(search_matches) > 1:
prompt = ("Multiple candidates match your search string: {}. "
"Please enter a more specific search string: ".format(
", ".join(search_matches)))
search_string = input(prompt)
else:
candidate_name = search_matches[0]
candidate_url = candidates[candidate_name]
save_candidate_speeches(candidate_name, candidate_url, election_year)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(
description="Save speech transcripts given an election year and candidate")
parser.add_argument('-y', '--year', type=int, required=True,
help="Election year from which to scrape speech transcripts")
parser.add_argument('-c', '--candidate', type=str,
help="Candidate name search string. Remember to quote if it has spaces")
args = parser.parse_args()
main(args)
|
from json import loads, dumps
read = open('pelis.json', 'r')
# Forma 1
films = []
title = ''
actors = ''
rating = ''
boxOffice = ''
for i in range(1, 123):
line = read.readline().split(': ')
value = line[0][8:]
if value == '"Title"':
title = line[1][1:-3]
elif value == '"Actors"':
actors = line[1][1:-3]
elif value[8:] == '"Source"' and line[1][1:-3] == 'Rotten Tomatoes':
line = read.readline().split(': ')
rating = line[1][1:-2]
elif value == '"BoxOffice"':
boxOffice = line[1][1:-3]
elif title != '' and actors != '' and rating != '' and boxOffice != '':
films.append([title, actors, rating, boxOffice])
title = ''
actors = ''
rating = ''
boxOffice = ''
table = open('pelis.csv', 'w')
table.write('Nombre; Actor; Raiting; Recaudación\n')
for i in range(3):
table.writelines(films[i][0]+'; '+films[i][1] +
'; '+films[i][2]+'; '+films[i][3]+'\n')
"""
# Forma 2
jsonRead = read
data = loads(jsonRead.read())
films = []
for i in data:
films.append([i['Title'], i['Actors'], i['Ratings']
[1]['Value'], i['BoxOffice']])
table = open('pelis.csv', 'w')
table.write('Nombre; Actor; Raiting; Recaudación\n')
for i in range(3):
table.writelines(films[i][0]+'; '+films[i][1] +
'; '+films[i][2]+'; '+films[i][3]+'\n')
"""
|
import sqlite3
class SqliteRepository(object):
def __init__(self):
self.conn = sqlite3.connect(':memory:')
self._create()
def _create(self):
self.conn.execute('''CREATE TABLE IF NOT EXISTS Person (name text, surname text, phone_number text, email text) ''')
def put(self, name, surname, phone_number, email):
self.conn.execute('''INSERT INTO person VALUES (?, ?, ? , ?)''', (name, surname, phone_number, email))
def find_all(self):
cursor = self.conn.execute('''SELECT * FROM person''')
return cursor.fetchall()
def find_by_email(self, email):
cursor = self.conn.execute('''SELECT * FROM person WHERE email="{email}"'''.format(email=email))
name, surname, phone_number, email = cursor.fetchone()
return {'name': name,
'surname': surname,
'phone_number': phone_number,
'email': email}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-12 23:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('galerija', '0010_auto_20170113_0044'),
]
operations = [
migrations.AlterModelOptions(
name='picture',
options={'ordering': ['-pub_date']},
),
]
|
import os
import signal
import sys
import socket, time
import cv
from PIL import Image
from numpy import array
# This makes sure the path which python uses to find things when using import
# can find all our code.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# Import qt modules (platform independant)
import ardrone.util.qtcompat as qt
QtCore = qt.import_module('QtCore')
QtNetwork = qt.import_module('QtNetwork')
# Import other objects
from ardrone.core.controlloop import ControlLoop
from ardrone.platform import qt as platform
import ardrone.core.videopacket as videopacket
from ardrone.aruco import detect_markers
class imageProcessor(object):
def __init__(self):
pass
def detect_markers (self, frame):
#convert prep
cv.SaveImage("frame.png", frame)
frame = Image.open("frame.png").convert('RGB')
# Convert image into aruco-friendly format (array)
arr = array(frame)
# Detect and draw on markers
[m.draw(arr) for m in detect_markers(frame)]
# Convert back to OpenCV-friendly format (RGB888)
stringImage = Image.fromarray(arr).tostring()
cvImage = cv.CreateImageHeader((320,240), cv.IPL_DEPTH_8U, 3)
cv.SetData(cvImage, stringImage)
# Return processed image
return cvImage
class imageViewer(object):
win_title = "Drone Video Feed"
def __init__(self):
# Create a QtCoreApplication loop (NB remember to use QApplication instead if wanting GUI features)
self.app = QtCore.QCoreApplication(sys.argv)
# Wire up Ctrl-C to call QApplication.quit()
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
# Initialise the drone control loop and attempt to open a connection.
connection = platform.Connection()
self._control = ControlLoop(connection, video_cb=None, navdata_cb=None)
# Create a window in which to place frames
cv.NamedWindow(self.win_title, cv.CV_WINDOW_AUTOSIZE) #probably no need to autosize
# Set up a UDP listening socket on port 5562 which calls readData upon socket activity
self.socket = QtNetwork.QUdpSocket()
if not self.socket.bind(QtNetwork.QHostAddress.Any, 5562):
raise RuntimeError('Error binding to port: %s' % (self.socket.errorString()))
self.socket.readyRead.connect(self.readData)
# Create decoder object
self._vid_decoder = videopacket.Decoder(self.showImage)
# Create imageProcessor object
self._img_processor = imageProcessor()
# Start video on drone
self._control.start_video()
def run(self):
self.app.exec_()
def readData(self):
"""Called when there is some interesting data to read on the video socket."""
while self.socket.hasPendingDatagrams():
sz = self.socket.pendingDatagramSize()
(data, host, port) = self.socket.readDatagram(sz)
# Some hack to account for PySide vs. PyQt differences
if qt.USES_PYSIDE:
data = data.data()
# Decode video data and pass result to showImage
self._vid_decoder.decode(data)
def showImage(self, data):
"""
Displays argument image in window using openCV.
data argument must be a string containing a 16 bit unsigned RGB image (RGB16 == RGB565).
"""
# Create OpenCV header and read in drone video data as RGB565
iplimage = cv.CreateImageHeader((320,240), cv.IPL_DEPTH_8U, 2)
cv.SetData(iplimage, data)
# Convert image to RGB888 which is more OpenCV friendly
RGBimage = cv.CreateImage((320,240), cv.IPL_DEPTH_8U, 3)
cv.CvtColor(iplimage, RGBimage, cv.CV_BGR5652BGR)
# Add labels for any markers present
RGBimage = self._img_processor.detect_markers(RGBimage)
# Show image
cv.ShowImage(self.win_title, RGBimage)
if (__name__ == '__main__'):
image_app = imageViewer()
image_app.run()
|
"""
Scintillator
Abstract class for scintillators
"""
from abc import ABCMeta, abstractmethod
from Centella.physical_constants import *
import sys
from Util import *
from Xenon import *
nm = nanometer
mol = mole
micron = micrometer
LXeRefractionIndex =[
[6.4, 1.58587, 0.0964027],
[6.6, 1.61513, 0.508607],
[6.8, 1.6505, 1.33957],
[7, 1.69447, 1.69005],
[7.2, 1.75124, 1.02138],
[7.4, 1.82865, 0.295683],
[7.6, 1.94333, 0.098714]
]
LysoScintSpectrum =[
[2.2543, 0.0643],
[2.4797, 0.1929],
[2.6436, 0.4],
[2.8700, 1.],
[3.0996, 0.4071]
]
############################################################
def sortRI(elem):
"""
A helper function used to sort the hits. The hits are organises like this:
(id, [x,y,z,A,t]): the function returns the time as key to sort the hit list
"""
return elem[0]
class Scintillator(object):
__metaclass__ = ABCMeta
@abstractmethod
def Name(self):
"""
Name of scintillator
"""
pass
@abstractmethod
def EffectiveAtomicNumber(self):
"""
EffectiveAtomicNumber
"""
pass
@abstractmethod
def RadiationLength(self):
"""
Radiation length/Density
"""
pass
@abstractmethod
def RefractionIndex(self):
"""
returns the refraction index to the scintillation light
"""
pass
@abstractmethod
def RefractionIndexL(self,lamda):
"""
returns the refraction index to the scintillation light
"""
pass
@abstractmethod
def RefractionIndexBlue(self):
"""
returns the refraction index to the blue scintillation light
"""
pass
@abstractmethod
def DecayConstant(self):
"""
returns the decay time of the scintillation light
"""
pass
@abstractmethod
def Density(self):
"""
Density
"""
pass
@abstractmethod
def ScintillationWavelength(self):
"""
average scintillation wavelength
"""
pass
@abstractmethod
def PhotoelectricFractionAt511KeV(self):
"""
Photoelectric Xs
"""
pass
@abstractmethod
def AttenuationAt511KeV(self,Z):
"""
Attenuation of a beam of energy E 511 keV in a thickness Z
"""
pass
@abstractmethod
def EfficiencyAt511KeV(self,Z):
"""
Fraction of gammas of energy E 511 keV interacting in thickness Z
"""
pass
#@abstractmethod
def PhotoelectricEfficiencyAt511KeV(self,Z):
"""
Fraction of gammas of energy E 511 keV interacting in thickness Z
"""
return self.EfficiencyAt511KeV(Z)*self.PhotoelectricFractionAt511KeV()
@abstractmethod
def ScintillationPhotonsAt511KeV(self):
"""
Number of scintillation photons produced by a photon of energy 511 keV
"""
pass
def DisplayProperties(self):
s= """
Name = %s Z = %d
Density = %7.4g g/cm3 X0= %7.4g cm
Refraction Index = %7.2f
Decay constant = %7.2f ns
Number of scintillation photons = %7.2f
Scintillation wavelength = %7.2f nm
Photoelectric fraction at 511 keV = %7.2f
Attenuation per cm = %7.2f
Efficiency per cm = %7.2f
"""%(self.Name(), self.EffectiveAtomicNumber(),
self.Density()/(g/cm3),self.RadiationLength()/cm,
self.RefractionIndex(),
self.DecayConstant()/ns,
self.ScintillationPhotonsAt511KeV(),
self.ScintillationWavelength()/nm,
self.PhotoelectricFractionAt511KeV(),
self.AttenuationAt511KeV(1*cm),
self.EfficiencyAt511KeV(1*cm))
return s
def __str__(self):
return self.DisplayProperties()
class LXe(Scintillator):
def __init__(self,wi=15.6*eV,ws=16.6*eV,lambdaScint=172*nm,rayleigh=36*mm,
tau1=2.2*ns,tau2=27*ns,tau3=45*ns,
rtau1=0.065,rtau2=0.935,rtau3=0.0,nUV=1.70,nBlue=1.4):
self.Z = 54
self.A = 131.29*g/mol
self.T = 160*kelvin
self.x0 = 8.48 * g/cm2
self.rho = 3*g/cm3
self.dedx=0.35*keV/micron
self.wi=wi
self.ws=ws
self.lambdaScint = lambdaScint
self.rayleigh = rayleigh
self.tau1=tau1
self.tau2=tau2
self.tau3=tau3
self.rtau1=rtau1
self.rtau2=rtau2
self.rtau3=rtau3
self.nUV = nUV
self.nBlue=nBlue
lxri =[] #transform to nm
for elem in LXeRefractionIndex:
ene = elem[0]
n = elem[1]
f = elem[2]
x =[(1240./ene)*nm,n,f]
#print x[0]/nm
lxri.append(x)
self.LXRI = sorted(lxri, key=sortRI)
#print self.LXRI
l,n = self.AverageLamdaAndRI()
self.AverageLamda = l
self.AverageRefractionIndexUV = n
def Name(self):
"""
Interface: Name
"""
return "LXE"
def EffectiveAtomicNumber(self):
"""
Interface: Xenon atomic number
"""
return self.Z
def RadiationLength(self):
"""
Interface: X0/rho
"""
return self.x0/self.rho
def RefractionIndex(self):
"""
Interface: Take the UV
"""
return self.AverageRefractionIndexUV
def DecayConstant(self):
"""
Interface: returns the decay time of the scintillation light
In LXe one has 2 constants, return the weighted average
"""
return (self.Lifetime(1)*self.LifetimeRatio(1)+
self.Lifetime(2)*self.LifetimeRatio(2))/2.
def Density(self):
"""
Interface: Density of LXe
"""
return self.rho
def ScintillationWavelength(self):
"""
Interface: average scintillation wavelength
"""
return self.lambdaScint
def PhotoelectricFractionAt511KeV(self):
"""
Interface: PE fraction
"""
return self.PhotoelectricFraction(511*keV)
def AttenuationAt511KeV(self,Z):
"""
Interface: Attenuation of a beam of energy 511 keV in a thickness Z
"""
return self.Attenuation(511*keV,Z)
def EfficiencyAt511KeV(self,Z):
"""
Interface. Fraction of gammas of energy 511 keV interacting in thickness E
"""
return self.Efficiency(511*keV,Z)
def ScintillationPhotonsAt511KeV(self):
"""
Interface: Number of scintillation photons produced by a photon of energy 511 keV
"""
return self.ScintillationPhotons(511*keV)
def AverageLamdaAndRI(self):
"""
Returns the average lamda and refraction index
"""
l=0.
n=0.
w=0.
for elem in self.LXRI:
l+=elem[0]*elem[2]
n+=elem[1]*elem[2]
w+=elem[2]
return (l/w,n/w)
def RmsRI(self):
"""
Returns the rms of the refraction index
"""
ns=0.
w=0.
lw, nw = self.AverageLamdaAndRI()
print "nw = %7.2f"%(nw)
for elem in self.LXRI:
ns+=elem[2]*(elem[1] - nw)**2
w+=elem[2]
print " ni = %7.4g, ni - nw = %7.4g, wi =%7.4g ns = %7.4g"%(
elem[1],elem[1] - nw,elem[2],ns)
N=len(self.LXRI)
a = N*ns
b = (N-1)*w
sw = sqrt(a/b)
print " N = %7.2f, ns = %7.2f, w = %7.2f, a = %7.2f b = %7.2f"%(
N,ns,w,a,b)
return sw
def RefractionIndexL(self,lamda):
"""
returns the refraction index
"""
if lamda < self.LXRI[0][0]:
return self.LXRI[0][1]
elif lamda > self.LXRI[6][0]:
return self.LXRI[6][1]
else:
for i in xrange(len(self.LXRI)-1):
elem = self.LXRI[i]
x0 = elem[0]
y0 = elem[1]
elem = self.LXRI[i+1]
x1 = elem[0]
y1 = elem[1]
if lamda >= x0 and lamda < x1:
break
return lin(lamda,x0,y0,x1,y1)
def AtomicNumber(self):
"""
Xenon atomic number
"""
return self.Z
def AtomicMass(self):
"""
Xenon atomic mass
"""
return self.A
def X0(self):
"""
X0 in gr/cm2
"""
return self.x0
def TemperatureAt1Bar(self):
"""
LXe Temperature
"""
return self.T
def RefractionIndexUV(self):
return self.AverageRefractionIndexUV
def RefractionIndexBlue(self):
return self.nBlue
def Lifetime(self,i):
"""
i ->(1,3) for the three lifetimes.
"""
if i == 1:
return self.tau1
elif i == 2:
return self.tau2
elif i == 3:
return self.tau3
else:
print "index must be 1,2 or 3"
sys.exit(0)
def LifetimeRatio(self,i):
"""
i ->(1,3) for the three lifetimes.
"""
if i == 1:
return self.rtau1
elif i == 2:
return self.rtau2
elif i == 3:
return self.rtau3
else:
print "index must be 1,2 or 3"
sys.exit(0)
def Wi(self):
"""
Energy needed to produce an ionization pair
"""
return self.wi
def Ws(self):
"""
Energy needed to produce scintillation photons
"""
return self.ws
def Rayleigh(self):
"""
Attenuation due to Rayleigh Scattering
"""
return self.rayleigh
def dEdX(self):
return self.dedx
def ComptonCrossSection(self,E):
"""
Compton = Incoherent Scattering
"""
return ScatterIncoherent(E)
def PhotoelectricCrossSection(self,E):
"""
Photoelectric Xs
"""
return Photoelectric(E)
def TotalCrossSection(self,E):
"""
Total Xs
"""
return TotalInteraction(E)
def PhotoelectricFraction(self,E):
"""
Interface: PE fraction
"""
return self.PhotoelectricCrossSection(E)/self.TotalCrossSection(E)
def Attenuation(self,E,Z):
"""
Attenuation of a beam of energy E in a thickness Z
"""
return TransmittedBeam(E,Z,self.Density())
def Efficiency(self,E,Z):
"""
Fraction of gammas of energy E interacting in thickness E
"""
return InteractionFraction(E,Z,self.Density())
def GammaPathLength(self,E):
"""
gamma path length in xenon
"""
xz = self.TotalCrossSection(E)*self.Density()
return 1./xz
def ScintillationPhotons(self,E):
"""
Number of scintillation photons produced by a photon of energy E
"""
return E/self.Ws()
def SPhotonsAt511KeV(self,i):
if i == 1:
return self.ScintillationPhotons(511*keV)*self.LifetimeRatio(1)
elif i == 2:
return self.ScintillationPhotons(511*keV)*self.LifetimeRatio(2)
elif i == 3:
return self.ScintillationPhotons(511*keV)*self.LifetimeRatio(3)
else:
print "index must be 1,2 or 3"
sys.exit(0)
def IonizationElectrons(self,E):
"""
Number of ionization electrons produced by a photon of energy E
"""
return E/self.Wi()
def CostPerGram(self):
"""
Cost per gram
"""
return 1.0 #in euro
def __str__(self):
s= """
Name = LXe Z = %d A = %7.4g g/mole
Temperature at atm pressure (1 bar) = %7.2f kelvin
Density = %7.4g g/cm3 X0= %7.4g g/cm2 X1= %7.4g cm
de/dx = %7.4g keV/cm Ws = %7.4g eV Wi = %7.4g eV
Rayleigh Scattering = %7.2g cm
Scintillation wavelength = %7.2f nm
Refraction Index (UV) = %7.2f
Refraction Index (Blue) = %7.2f
Lifetimes:
tau1 = %7.2f ns, ratio tau 1 = %7.2f
tau2 = %7.2f ns, ratio tau 2 = %7.2f
tau3 = %7.2f ns, ratio tau 3 = %7.2f
"""%(self.AtomicNumber(),self.AtomicMass()/(g/mol),
self.TemperatureAt1Bar()/kelvin,
self.Density()/(g/cm3),
self.X0()/(g/cm2),(self.X0()/self.Density())/cm,
self.dEdX()/(keV/cm),self.Ws()/eV, self.Wi()/eV,
self.Rayleigh()/cm, self.ScintillationWavelength()/nm, self.RefractionIndex(),
self.nBlue,
self.tau1/ns, self.rtau1,self.tau2/ns,self.rtau2,
self.tau3/ns, self.rtau3)
return s
class LYSO(Scintillator):
def __init__(self,Z=54,rho=7.3*g/cm3, n=1.82, X0=1.16*cm, LambdaAtt = 0.87*(1./cm),
LambdaPeak=420*nm, tau = 50*ns, PhotoFraction = 0.3, Nphot = 15000):
"""
Represents lyso
"""
self.x0 = X0
self.Z = Z
self.rho = rho
self.tau=tau
self.n = n
self.mu=LambdaAtt
self.lambdaScint = LambdaPeak
self.tau = tau
self.photoF = PhotoFraction
self.Nphot = Nphot
lysct =[] #transform to nm
for elem in LysoScintSpectrum:
ene = elem[0]
w =elem[1]
x =[(1240./ene)*nm,w]
lysct.append(x)
self.LYSC = sorted(lysct, key=sortRI)
print "scintillation spectrum"
for elem in self.LYSC:
print " lambda = %7.2f nm w= %7.2g"%(elem[0]/nm,elem[1])
l = self.AverageLamda()
def Name(self):
"""
Interface: Name
"""
return "LYSO"
def EffectiveAtomicNumber(self):
"""
Interface: Lyso atomic number
"""
return self.Z
def RadiationLength(self):
"""
Interface: X0/rho
"""
return self.x0/cm
def RefractionIndex(self):
"""
Interface:
"""
return self.n
def RefractionIndexL(self,lamda):
"""
returns the refraction index
"""
return self.n
def RefractionIndexBlue(self):
"""
returns the refraction index
"""
return self.n
def DecayConstant(self):
"""
Interface
"""
return self.tau
def Density(self):
"""
Interface: Density of LXe
"""
return self.rho
def ScintillationWavelength(self):
"""
Interface: average scintillation wavelength
"""
return self.lambdaScint
def PhotoelectricFractionAt511KeV(self):
"""
Interface Photoelectric Xs
"""
return self.photoF
def AttenuationAt511KeV(self,Z):
"""
Interface: Attenuation of a beam of energy E 511 keV in a thickness Z
"""
return exp(-Z*self.mu)
def EfficiencyAt511KeV(self,Z):
"""
Interface: Fraction of gammas of energy E 511 keV interacting in thickness Z
"""
return 1. - self.AttenuationAt511KeV(Z)
def ScintillationPhotonsAt511KeV(self):
"""
Number of scintillation photons produced by a photon of energy 511 keV
"""
return self.Nphot
def AverageLamda(self):
"""
Returns the average lamda
"""
l=0.
w=0.
for elem in self.LYSC:
l+=elem[0]*elem[1]
w+=elem[1]
return (l/w)
def X0(self):
"""
EffectiveAtomicNumber
"""
return self.x0
def Lifetime(self):
"""
returns the lifetime
"""
return self.tau
def __str__(self):
s= """
Name = LYSO Z = %d
Density = %7.4g g/cm3 X0= %7.4g g/cm2
Scintillation wavelength = %7.2f nm
Refraction Index (Blue) = %7.2f
Lifetime = %7.2f ns
ScintillationPhotons = %7.2f
Attenuation in 1 cm = %7.2f
PhotoelectricFraction = %7.2f
"""%(self.EffectiveAtomicNumber(),
self.Density()/(g/cm3),
self.X0()/cm,
self.ScintillationWavelength()/nm, self.RefractionIndex(),
self.Lifetime(), self.ScintillationPhotonsAt511KeV(),
self.AttenuationAt511KeV(1.*cm),self.PhotoelectricFractionAt511KeV())
return s
def testLxe():
lxe = LXe()
print lxe
print lxe.DisplayProperties()
for l in drange(150*nm,450*nm,10*nm):
print """
for lamda = %7.2f nm (%7.2f eV) n = %7.2f
"""%(l/nm, 1240./(l/nm), lxe.RefractionIndexL(l))
l,n = lxe.AverageLamdaAndRI()
print """
Average lamda = %7.2f nm ; average n = %7.2f
"""%(l/nm, n)
rmsri = lxe.RmsRI()
print """
Weighted rms of n = %7.2f
"""%(rmsri)
print """
Dn/n = %7.2f
"""%(rmsri/n)
print "Efficiency for 511 keV photons"
for z in drange(1., 11., 1.):
print """
z = %7.2g cm LXe eff = %7.2g
"""%(z,
lxe.Efficiency(511*keV,z*cm))
print """Photoelectric fraction
at 511 keV photons = %7.2g"""%(
lxe.PhotoelectricCrossSection(511*keV)/
lxe.TotalCrossSection(511*keV))
print """
Gamma path lenght in LXe for 511 keV photons = %7.2g cm
"""%(
lxe.GammaPathLength(511*keV)/cm)
print """
Number of scintillation photons Ns (511 keV, LXe)= %7.2g
with tau1 = %7.2f ns lifetime: = %7.2f
with tau2 = %7.2f ns lifetime: = %7.2f
with tau3 = %7.2f ns lifetime: = %7.2f
"""%(
lxe.ScintillationPhotons(511*keV),lxe.Lifetime(1),
lxe.SPhotonsAt511KeV(1),
lxe.Lifetime(2),
lxe.SPhotonsAt511KeV(2),
lxe.Lifetime(3),
lxe.SPhotonsAt511KeV(3)
)
def testLyso():
lyso = LYSO()
print lyso
lyso.DisplayProperties()
print "Average Lamda = %7.2f"%(lyso.AverageLamda()/nm)
for z in drange(1., 11., 1.):
print """
z = %7.2g cm LYSO eff = %7.2g
"""%(z,
lyso.EfficiencyAt511KeV(z*cm))
def plotLXe():
Lambda=[]
I=[]
N=[]
for elem in LXeRefractionIndex:
ene = elem[0]
n = elem[1]
f = elem[2]
Lambda.append(1240./ene)
I.append(f)
N.append(n)
plt.plot(Lambda,I)
plt.show()
plt.plot(Lambda,N)
plt.show()
def plotLYSO():
Lambda=[]
I=[]
for elem in LysoScintSpectrum:
ene = elem[0]
f = elem[1]
Lambda.append(1240./ene)
I.append(f)
plt.plot(Lambda,I)
plt.show()
if __name__ == '__main__':
#testLxe()
#testLyso()
plotLXe()
plotLYSO()
|
from django import forms
from django.db.models import Count
from dcim.models import Site, Rack, Device, Interface
from extras.forms import CustomFieldForm, CustomFieldBulkEditForm, CustomFieldFilterForm
from tenancy.models import Tenant
from utilities.forms import (
APISelect, BootstrapMixin, BulkImportForm, CSVDataField, ExpandableIPAddressField, FilterChoiceField, Livesearch,
SlugField, add_blank_choice,
)
from .models import (
Aggregate, IPAddress, IPADDRESS_STATUS_CHOICES, Prefix, PREFIX_STATUS_CHOICES, RIR, Role, Service, VLAN,
VLANGroup, VLAN_STATUS_CHOICES, VRF,
)
IP_FAMILY_CHOICES = [
('', 'All'),
(4, 'IPv4'),
(6, 'IPv6'),
]
#
# VRFs
#
class VRFForm(BootstrapMixin, CustomFieldForm):
class Meta:
model = VRF
fields = ['name', 'rd', 'tenant', 'enforce_unique', 'description']
labels = {
'rd': "RD",
}
help_texts = {
'rd': "Route distinguisher in any format",
}
class VRFFromCSVForm(forms.ModelForm):
tenant = forms.ModelChoiceField(Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'})
class Meta:
model = VRF
fields = ['name', 'rd', 'tenant', 'enforce_unique', 'description']
class VRFImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=VRFFromCSVForm)
class VRFBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=VRF.objects.all(), widget=forms.MultipleHiddenInput)
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['tenant', 'description']
class VRFFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = VRF
tenant = FilterChoiceField(queryset=Tenant.objects.annotate(filter_count=Count('vrfs')), to_field_name='slug',
null_option=(0, None))
#
# RIRs
#
class RIRForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = RIR
fields = ['name', 'slug', 'is_private']
class RIRFilterForm(BootstrapMixin, forms.Form):
is_private = forms.NullBooleanField(required=False, label='Private', widget=forms.Select(choices=[
('', '---------'),
('True', 'Yes'),
('False', 'No'),
]))
#
# Aggregates
#
class AggregateForm(BootstrapMixin, CustomFieldForm):
class Meta:
model = Aggregate
fields = ['prefix', 'rir', 'date_added', 'description']
help_texts = {
'prefix': "IPv4 or IPv6 network",
'rir': "Regional Internet Registry responsible for this prefix",
'date_added': "Format: YYYY-MM-DD",
}
class AggregateFromCSVForm(forms.ModelForm):
rir = forms.ModelChoiceField(queryset=RIR.objects.all(), to_field_name='name',
error_messages={'invalid_choice': 'RIR not found.'})
class Meta:
model = Aggregate
fields = ['prefix', 'rir', 'date_added', 'description']
class AggregateImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=AggregateFromCSVForm)
class AggregateBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=Aggregate.objects.all(), widget=forms.MultipleHiddenInput)
rir = forms.ModelChoiceField(queryset=RIR.objects.all(), required=False, label='RIR')
date_added = forms.DateField(required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['date_added', 'description']
class AggregateFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Aggregate
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address Family')
rir = FilterChoiceField(queryset=RIR.objects.annotate(filter_count=Count('aggregates')), to_field_name='slug',
label='RIR')
#
# Roles
#
class RoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Role
fields = ['name', 'slug']
#
# Prefixes
#
class PrefixForm(BootstrapMixin, CustomFieldForm):
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False, label='Site',
widget=forms.Select(attrs={'filter-for': 'vlan'}))
vlan = forms.ModelChoiceField(queryset=VLAN.objects.all(), required=False, label='VLAN',
widget=APISelect(api_url='/api/ipam/vlans/?site_id={{site}}',
display_field='display_name'))
class Meta:
model = Prefix
fields = ['prefix', 'vrf', 'tenant', 'site', 'vlan', 'status', 'role', 'is_pool', 'description']
def __init__(self, *args, **kwargs):
super(PrefixForm, self).__init__(*args, **kwargs)
self.fields['vrf'].empty_label = 'Global'
# Initialize field without choices to avoid pulling all VLANs from the database
if self.is_bound and self.data.get('site'):
self.fields['vlan'].queryset = VLAN.objects.filter(site__pk=self.data['site'])
elif self.initial.get('site'):
self.fields['vlan'].queryset = VLAN.objects.filter(site=self.initial['site'])
else:
self.fields['vlan'].choices = []
class PrefixFromCSVForm(forms.ModelForm):
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, to_field_name='rd',
error_messages={'invalid_choice': 'VRF not found.'})
tenant = forms.ModelChoiceField(Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'})
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Site not found.'})
vlan_group_name = forms.CharField(required=False)
vlan_vid = forms.IntegerField(required=False)
status_name = forms.ChoiceField(choices=[(s[1], s[0]) for s in PREFIX_STATUS_CHOICES])
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Invalid role.'})
class Meta:
model = Prefix
fields = ['prefix', 'vrf', 'tenant', 'site', 'vlan_group_name', 'vlan_vid', 'status_name', 'role', 'is_pool',
'description']
def clean(self):
super(PrefixFromCSVForm, self).clean()
site = self.cleaned_data.get('site')
vlan_group_name = self.cleaned_data.get('vlan_group_name')
vlan_vid = self.cleaned_data.get('vlan_vid')
# Validate VLAN
vlan_group = None
if vlan_group_name:
try:
vlan_group = VLANGroup.objects.get(site=site, name=vlan_group_name)
except VLANGroup.DoesNotExist:
self.add_error('vlan_group_name', "Invalid VLAN group ({} - {}).".format(site, vlan_group_name))
if vlan_vid and vlan_group:
try:
self.instance.vlan = VLAN.objects.get(group=vlan_group, vid=vlan_vid)
except VLAN.DoesNotExist:
self.add_error('vlan_vid', "Invalid VLAN ID ({} - {}).".format(vlan_group, vlan_vid))
elif vlan_vid and site:
try:
self.instance.vlan = VLAN.objects.get(site=site, vid=vlan_vid)
except VLAN.MultipleObjectsReturned:
self.add_error('vlan_vid', "Multiple VLANs found ({} - VID {})".format(site, vlan_vid))
elif vlan_vid:
self.add_error('vlan_vid', "Must specify site and/or VLAN group when assigning a VLAN.")
def save(self, *args, **kwargs):
# Assign Prefix status by name
self.instance.status = dict(self.fields['status_name'].choices)[self.cleaned_data['status_name']]
return super(PrefixFromCSVForm, self).save(*args, **kwargs)
class PrefixImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=PrefixFromCSVForm)
class PrefixBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=Prefix.objects.all(), widget=forms.MultipleHiddenInput)
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False)
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, label='VRF')
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(PREFIX_STATUS_CHOICES), required=False)
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['site', 'vrf', 'tenant', 'role', 'description']
def prefix_status_choices():
status_counts = {}
for status in Prefix.objects.values('status').annotate(count=Count('status')).order_by('status'):
status_counts[status['status']] = status['count']
return [(s[0], u'{} ({})'.format(s[1], status_counts.get(s[0], 0))) for s in PREFIX_STATUS_CHOICES]
class PrefixFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Prefix
parent = forms.CharField(required=False, label='Search Within', widget=forms.TextInput(attrs={
'placeholder': 'Network',
}))
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address Family')
vrf = FilterChoiceField(queryset=VRF.objects.annotate(filter_count=Count('prefixes')), to_field_name='rd',
label='VRF', null_option=(0, 'Global'))
tenant = FilterChoiceField(queryset=Tenant.objects.annotate(filter_count=Count('prefixes')), to_field_name='slug',
null_option=(0, 'None'))
status = forms.MultipleChoiceField(choices=prefix_status_choices, required=False)
site = FilterChoiceField(queryset=Site.objects.annotate(filter_count=Count('prefixes')), to_field_name='slug',
null_option=(0, 'None'))
role = FilterChoiceField(queryset=Role.objects.annotate(filter_count=Count('prefixes')), to_field_name='slug',
null_option=(0, 'None'))
expand = forms.BooleanField(required=False, label='Expand prefix hierarchy')
#
# IP addresses
#
class IPAddressForm(BootstrapMixin, CustomFieldForm):
nat_site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False, label='Site',
widget=forms.Select(attrs={'filter-for': 'nat_device'}))
nat_device = forms.ModelChoiceField(queryset=Device.objects.all(), required=False, label='Device',
widget=APISelect(api_url='/api/dcim/devices/?site_id={{nat_site}}',
display_field='display_name',
attrs={'filter-for': 'nat_inside'}))
livesearch = forms.CharField(required=False, label='IP Address', widget=Livesearch(
query_key='q', query_url='ipam-api:ipaddress_list', field_to_update='nat_inside', obj_label='address')
)
class Meta:
model = IPAddress
fields = ['address', 'vrf', 'tenant', 'status', 'nat_inside', 'description']
widgets = {
'nat_inside': APISelect(api_url='/api/ipam/ip-addresses/?device_id={{nat_device}}', display_field='address')
}
def __init__(self, *args, **kwargs):
super(IPAddressForm, self).__init__(*args, **kwargs)
self.fields['vrf'].empty_label = 'Global'
if self.instance.nat_inside:
nat_inside = self.instance.nat_inside
# If the IP is assigned to an interface, populate site/device fields accordingly
if self.instance.nat_inside.interface:
self.initial['nat_site'] = self.instance.nat_inside.interface.device.rack.site.pk
self.initial['nat_device'] = self.instance.nat_inside.interface.device.pk
self.fields['nat_device'].queryset = Device.objects.filter(
rack__site=nat_inside.interface.device.rack.site)
self.fields['nat_inside'].queryset = IPAddress.objects.filter(
interface__device=nat_inside.interface.device)
else:
self.fields['nat_inside'].queryset = IPAddress.objects.filter(pk=nat_inside.pk)
else:
# Initialize nat_device choices if nat_site is set
if self.is_bound and self.data.get('nat_site'):
self.fields['nat_device'].queryset = Device.objects.filter(rack__site__pk=self.data['nat_site'])
elif self.initial.get('nat_site'):
self.fields['nat_device'].queryset = Device.objects.filter(rack__site=self.initial['nat_site'])
else:
self.fields['nat_device'].choices = []
# Initialize nat_inside choices if nat_device is set
if self.is_bound and self.data.get('nat_device'):
self.fields['nat_inside'].queryset = IPAddress.objects.filter(
interface__device__pk=self.data['nat_device'])
elif self.initial.get('nat_device'):
self.fields['nat_inside'].queryset = IPAddress.objects.filter(
interface__device__pk=self.initial['nat_device'])
else:
self.fields['nat_inside'].choices = []
class IPAddressBulkAddForm(BootstrapMixin, forms.Form):
address = ExpandableIPAddressField()
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, label='VRF')
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=IPADDRESS_STATUS_CHOICES)
description = forms.CharField(max_length=100, required=False)
class IPAddressAssignForm(BootstrapMixin, forms.Form):
site = forms.ModelChoiceField(queryset=Site.objects.all(), label='Site', required=False,
widget=forms.Select(attrs={'filter-for': 'rack'}))
rack = forms.ModelChoiceField(queryset=Rack.objects.all(), label='Rack', required=False,
widget=APISelect(api_url='/api/dcim/racks/?site_id={{site}}', display_field='display_name', attrs={'filter-for': 'device'}))
device = forms.ModelChoiceField(queryset=Device.objects.all(), label='Device', required=False,
widget=APISelect(api_url='/api/dcim/devices/?rack_id={{rack}}', display_field='display_name', attrs={'filter-for': 'interface'}))
livesearch = forms.CharField(required=False, label='Device', widget=Livesearch(
query_key='q', query_url='dcim-api:device_list', field_to_update='device')
)
interface = forms.ModelChoiceField(queryset=Interface.objects.all(), label='Interface',
widget=APISelect(api_url='/api/dcim/devices/{{device}}/interfaces/'))
set_as_primary = forms.BooleanField(label='Set as primary IP for device', required=False)
def __init__(self, *args, **kwargs):
super(IPAddressAssignForm, self).__init__(*args, **kwargs)
self.fields['rack'].choices = []
self.fields['device'].choices = []
self.fields['interface'].choices = []
class IPAddressFromCSVForm(forms.ModelForm):
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, to_field_name='rd',
error_messages={'invalid_choice': 'VRF not found.'})
tenant = forms.ModelChoiceField(Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'})
status_name = forms.ChoiceField(choices=[(s[1], s[0]) for s in IPADDRESS_STATUS_CHOICES])
device = forms.ModelChoiceField(queryset=Device.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Device not found.'})
interface_name = forms.CharField(required=False)
is_primary = forms.BooleanField(required=False)
class Meta:
model = IPAddress
fields = ['address', 'vrf', 'tenant', 'status_name', 'device', 'interface_name', 'is_primary', 'description']
def clean(self):
device = self.cleaned_data.get('device')
interface_name = self.cleaned_data.get('interface_name')
is_primary = self.cleaned_data.get('is_primary')
# Validate interface
if device and interface_name:
try:
Interface.objects.get(device=device, name=interface_name)
except Interface.DoesNotExist:
self.add_error('interface_name', "Invalid interface ({}) for {}".format(interface_name, device))
elif device and not interface_name:
self.add_error('interface_name', "Device set ({}) but interface missing".format(device))
elif interface_name and not device:
self.add_error('device', "Interface set ({}) but device missing or invalid".format(interface_name))
# Validate is_primary
if is_primary and not device:
self.add_error('is_primary', "No device specified; cannot set as primary IP")
def save(self, *args, **kwargs):
# Assign status by name
self.instance.status = dict(self.fields['status_name'].choices)[self.cleaned_data['status_name']]
# Set interface
if self.cleaned_data['device'] and self.cleaned_data['interface_name']:
self.instance.interface = Interface.objects.get(device=self.cleaned_data['device'],
name=self.cleaned_data['interface_name'])
# Set as primary for device
if self.cleaned_data['is_primary']:
if self.instance.address.version == 4:
self.instance.primary_ip4_for = self.cleaned_data['device']
elif self.instance.address.version == 6:
self.instance.primary_ip6_for = self.cleaned_data['device']
return super(IPAddressFromCSVForm, self).save(*args, **kwargs)
class IPAddressImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=IPAddressFromCSVForm)
class IPAddressBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=IPAddress.objects.all(), widget=forms.MultipleHiddenInput)
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, label='VRF')
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(IPADDRESS_STATUS_CHOICES), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['vrf', 'tenant', 'description']
def ipaddress_status_choices():
status_counts = {}
for status in IPAddress.objects.values('status').annotate(count=Count('status')).order_by('status'):
status_counts[status['status']] = status['count']
return [(s[0], u'{} ({})'.format(s[1], status_counts.get(s[0], 0))) for s in IPADDRESS_STATUS_CHOICES]
class IPAddressFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = IPAddress
parent = forms.CharField(required=False, label='Search Within', widget=forms.TextInput(attrs={
'placeholder': 'Prefix',
}))
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address Family')
vrf = FilterChoiceField(queryset=VRF.objects.annotate(filter_count=Count('ip_addresses')), to_field_name='rd',
label='VRF', null_option=(0, 'Global'))
tenant = FilterChoiceField(queryset=Tenant.objects.annotate(filter_count=Count('ip_addresses')),
to_field_name='slug', null_option=(0, 'None'))
status = forms.MultipleChoiceField(choices=ipaddress_status_choices, required=False)
#
# VLAN groups
#
class VLANGroupForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = VLANGroup
fields = ['site', 'name', 'slug']
class VLANGroupFilterForm(BootstrapMixin, forms.Form):
site = FilterChoiceField(queryset=Site.objects.annotate(filter_count=Count('vlan_groups')), to_field_name='slug')
#
# VLANs
#
class VLANForm(BootstrapMixin, CustomFieldForm):
group = forms.ModelChoiceField(queryset=VLANGroup.objects.all(), required=False, label='Group', widget=APISelect(
api_url='/api/ipam/vlan-groups/?site_id={{site}}',
))
class Meta:
model = VLAN
fields = ['site', 'group', 'vid', 'name', 'tenant', 'status', 'role', 'description']
help_texts = {
'site': "The site at which this VLAN exists",
'group': "VLAN group (optional)",
'vid': "Configured VLAN ID",
'name': "Configured VLAN name",
'status': "Operational status of this VLAN",
'role': "The primary function of this VLAN",
}
widgets = {
'site': forms.Select(attrs={'filter-for': 'group'}),
}
def __init__(self, *args, **kwargs):
super(VLANForm, self).__init__(*args, **kwargs)
# Limit VLAN group choices
if self.is_bound and self.data.get('site'):
self.fields['group'].queryset = VLANGroup.objects.filter(site__pk=self.data['site'])
elif self.initial.get('site'):
self.fields['group'].queryset = VLANGroup.objects.filter(site=self.initial['site'])
else:
self.fields['group'].choices = []
class VLANFromCSVForm(forms.ModelForm):
site = forms.ModelChoiceField(queryset=Site.objects.all(), to_field_name='name',
error_messages={'invalid_choice': 'Site not found.'})
group = forms.ModelChoiceField(queryset=VLANGroup.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'VLAN group not found.'})
tenant = forms.ModelChoiceField(Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'})
status_name = forms.ChoiceField(choices=[(s[1], s[0]) for s in VLAN_STATUS_CHOICES])
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Invalid role.'})
class Meta:
model = VLAN
fields = ['site', 'group', 'vid', 'name', 'tenant', 'status_name', 'role', 'description']
def save(self, *args, **kwargs):
m = super(VLANFromCSVForm, self).save(commit=False)
# Assign VLAN status by name
m.status = dict(self.fields['status_name'].choices)[self.cleaned_data['status_name']]
if kwargs.get('commit'):
m.save()
return m
class VLANImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=VLANFromCSVForm)
class VLANBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=VLAN.objects.all(), widget=forms.MultipleHiddenInput)
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False)
group = forms.ModelChoiceField(queryset=VLANGroup.objects.all(), required=False)
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(VLAN_STATUS_CHOICES), required=False)
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['group', 'tenant', 'role', 'description']
def vlan_status_choices():
status_counts = {}
for status in VLAN.objects.values('status').annotate(count=Count('status')).order_by('status'):
status_counts[status['status']] = status['count']
return [(s[0], u'{} ({})'.format(s[1], status_counts.get(s[0], 0))) for s in VLAN_STATUS_CHOICES]
class VLANFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = VLAN
site = FilterChoiceField(queryset=Site.objects.annotate(filter_count=Count('vlans')), to_field_name='slug')
group_id = FilterChoiceField(queryset=VLANGroup.objects.annotate(filter_count=Count('vlans')), label='VLAN group',
null_option=(0, 'None'))
tenant = FilterChoiceField(queryset=Tenant.objects.annotate(filter_count=Count('vlans')), to_field_name='slug',
null_option=(0, 'None'))
status = forms.MultipleChoiceField(choices=vlan_status_choices, required=False)
role = FilterChoiceField(queryset=Role.objects.annotate(filter_count=Count('vlans')), to_field_name='slug',
null_option=(0, 'None'))
#
# Services
#
class ServiceForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = Service
fields = ['name', 'protocol', 'port', 'ipaddresses', 'description']
help_texts = {
'ipaddresses': "IP address assignment is optional. If no IPs are selected, the service is assumed to be "
"reachable via all IPs assigned to the device.",
}
def __init__(self, *args, **kwargs):
super(ServiceForm, self).__init__(*args, **kwargs)
# Limit IP address choices to those assigned to interfaces of the parent device
self.fields['ipaddresses'].queryset = IPAddress.objects.filter(interface__device=self.instance.device)
|
# encoding: utf-8
"""
@project:data_structure_and_algorithm
@author: Jiang Hui
@language:Python 3.7.2 [GCC 7.3.0] :: Anaconda, Inc. on linux
@time: 2019/8/5 19:44
@desc:
"""
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
res = float('-inf') # 初始化 res 为负无穷
dp = [0] * len(nums)
for i in range(len(nums)):
if dp[i - 1] < 0:
dp[i - 1] = 0
dp[i] = dp[i - 1] + nums[i]
res = max(dp[i], res)
return res
|
#!/usr/bin/env python
'''
Oct 10, 2017: Pasi Korhonen, The University of Melbourne
Simplifies running orthoMCL with a wrapper and pre-checks the known
formatting issues with FASTA headers to avoid failure in later stages of the run.
'''
import os, sys, optparse, getpass
from multiprocessing import Process, Pipe
from Utils import Base, Fasta
#################################################
def options():
parser = optparse.OptionParser('example: %prog -i residues1.fa,residues2.fa,...,residuesN.fa -l LB1,LB2,...,LBN -p 1,1,...,1 -e 1e-5 -s 0.5')
parser.add_option('-a', '--ip', dest='ip', help='MySQL server IP address', metavar='IPADDRESS', default='127.0.0.1')
parser.add_option('-d', '--dir', dest='wd', help='The directory, in which the FASTA files for the analysis are copied to', metavar='DIR', default='TmpOrthoMcl')
parser.add_option('-i', '--filenames', dest='filenames', help='Proteome, CDS or transcript FASTA files of species (separated by commas)', metavar='FILES', default='')
parser.add_option('-l', '--labels', dest='labs', help="Respective labels for proteomes following the order of FASTA files (separated by commas)", metavar='LABELS', default='')
parser.add_option('-p', '--positions', dest='positions', help="Positions of a unique identifier in FASTA header separated by |. Default position is 1 (separated by commas).", metavar='POSITIONS', default='')
parser.add_option('-T', '--threads', dest='pCnt', help='Number of parallel threads (default is half of the capacity but >= 1)', metavar='THREADS', default='0')
parser.add_option('-e', '--evalue', dest='evalue', help="E-value for BLAST run. Default is 1e-5. Use always E-value <= 1e-5 and 1e-X format only!", metavar='EVALUE', default='1e-5')
parser.add_option('-s', '--similarity', dest='sim', help="Required similarity (0 .. 1) in mcl algorithm. Default if 0.5", metavar='SIM', default='0.5')
parser.add_option('-m', '--minlen', dest='minlen', help="Required minimum lenght of a sequence. Default is 20.", metavar='MINLEN', default='20')
parser.add_option('-b', '--noblast', dest='skipBlast', action='store_true', help="Skip BLAST (used to rerun mcl using different E-value and similarity settings)", default=False)
parser.add_option('-n', '--nucl', dest='nucl', action='store_true', help="The residues in sequences represent nucleotides instead of proteins", default=False)
options, args = parser.parse_args()
if options.filenames == '' or options.labs == '':
parser.print_help()
print '\nE.g.: orthoMcl -i proteome1.fa,proteome2.fa -l Tax,Tvi -p 4,4 -e 1e-5'
print "Results will be collected to 'Results' directory in groups.txt file."
print "Note! Labels must be exactly 3 characters long."
sys.exit(-1)
return options
#################################################
def checkMySqlVersion(wd, mySqlIpAddress):
'''
'''
base = Base()
with open("%s/version.sql" %wd, 'w') as handle:
handle.write('SHOW VARIABLES LIKE "%version%";\n')
mySqlVer = base.shell("mysql -h %s -P3306 --protocol tcp --user=root --password=password < %s/version.sql" %(mySqlIpAddress, wd), myStdout = True)
verStr = ""
found = False
for line in mySqlVer.stdout:
items = line.strip().split()
for i in xrange(len(items)):
if "inno" in items[i]:
try:
verStr = items[i+1].strip()
found = True
except IndexError:
break
if found == True: break
try:
verList = [int(item) for item in verStr.split('.')]
except ValueError:
print >> sys.stderr, "### Fatal error: Could not read mysql version (%s). Exiting..." %verStr
sys.exit(-1)
if len(verList) != 3:
print >> sys.stderr, "### Fatal error: Could not read mysql version (%s). Exiting..." %verStr
sys.exit(-1)
return verList
#################################################
def checkResidue(fastaFile):
'''
'''
retVal = "nucleotides"
try:
limit = 100
fasta = Fasta(fastaFile)
for i in xrange(fasta.cnt()):
if i > limit: break
seq = fasta.seq(i).upper()
for item in seq:
if item not in ['A', 'T', 'C', 'G', 'N']:
retVal = "amino acids"
break
except IOError:
print >> sys.stderr, "### Fatal error: file %s not found. Exiting..." %fastaFile
sys.exit(-1)
return retVal
#################################################
def checkUniqueIds(fastaFile):
'''
'''
fasta = Fasta(fastaFile)
if fasta.cnt() != len(set(fasta.headers)):
print >> sys.stderr, "### Fatal error: FASTA sequence identifiers are not unique in %s. Exiting..." %fastaFile
print >> sys.stderr, "### Probably position for this file is given wrong..."
sys.exit(-1)
#################################################
def checkIdLen(fastaFile):
'''
'''
fasta = Fasta(fastaFile)
for i in xrange(fasta.cnt()):
seqId = fasta.headers[i].split()[0]
if len(seqId) > 56:
print >> sys.stderr, "### Fatal error: FASTA sequence identifier %s is too long in %s. Exiting..." %(seqId, fastaFile)
sys.exit(-1)
#################################################
def createOrthoMclConfigFile(wd, userName, eValue, similarity, mySqlIpAddress):
'''
'''
eValue = eValue.split('e')[1]
similarity = int(float(similarity) * 100.0)
with open("%s/orthomcl.config" %wd, 'w') as handle:
handle.write("# this config assumes a mysql database named 'orthomcl'. adjust according\n")
handle.write("# to your situation.\n")
handle.write("dbVendor=mysql\n")
#handle.write("dbConnectString=dbi:mysql:database=ortho%s;host=%s;port=3306\n" %(userName, os.environ["MYSQLHOST"]))
handle.write("dbConnectString=dbi:mysql:database=ortho%s;host=%s;port=3306\n" %(userName, mySqlIpAddress))
handle.write("dbLogin=ortho%s\n" %userName)
handle.write("dbPassword=password\n")
handle.write("similarSequencesTable=SimilarSequences\n")
handle.write("orthologTable=Ortholog\n")
handle.write("inParalogTable=InParalog\n")
handle.write("coOrthologTable=CoOrtholog\n")
handle.write("interTaxonMatchView=InterTaxonMatch\n")
handle.write("percentMatchCutoff=%d\n" %similarity)
handle.write("evalueExponentCutoff=%s\n" %eValue)
handle.write("oracleIndexTblSpc=NONE\n")
#################################################
def createMySqlScripts(wd, userName, ver):
'''
'''
with open("%s/createDb.sql" %wd, 'w') as handle:
if ver[0] > 5 or (ver[0] == 5 and ver[1] > 7) or (ver[0] == 5 and ver[1] == 7 and ver[2] > 5):
handle.write("CREATE USER IF NOT EXISTS 'ortho%s'@'%%' IDENTIFIED BY 'password';\n" %userName)
handle.write("CREATE DATABASE ortho%s;\n" %userName)
#handle.write("GRANT SELECT,INSERT,UPDATE,DELETE,CREATE VIEW,CREATE,INDEX,DROP on ortho%s.* TO 'ortho%s'@'%%';\n" %(userName, userName))
handle.write("GRANT ALL PRIVILEGES ON ortho%s.* TO 'ortho%s'@'%%';\n" %(userName, userName))
handle.close()
handle = open("%s/dropDb.sql" %wd, 'w')
handle.write("drop database if exists ortho%s;\n" %userName)
#################################################
def callShell(base, cmdStr, dummy = None):
'''
'''
base.shell(cmdStr)
#################################################
def main():
'''
'''
opts = options() # files contains exactly two PE files
pCnt = int(opts.pCnt)
if pCnt == 0:
pCnt = int(float(multiprocessing.cpu_count()) / 2.0 + 0.5)
eValue = opts.evalue
similarity = opts.sim
minlen = opts.minlen
files = ["%s/%s" %(opts.wd.strip('"').strip("'").rstrip('/'), myFile) for myFile in opts.filenames.strip().split(',')]
labels = opts.labs.strip().split(',')
if len(labels) != len(set(labels)):
print >> sys.stderr, "### Fatal error: duplicate labels found. Exiting..."
sys.exit(-1)
if len(files) != len(set(files)):
print >> sys.stderr, "### Fatal error: duplicate fasta file names found. Exiting..."
sys.exit(-1)
positions = None
if opts.positions != "":
positions = opts.positions.strip().split(',')
if positions == None:
positions = []
for i in xrange(len(files)):
positions.append("1")
if len(files) != len(labels):
print >> sys.stderr, "### Fatal error: number of files does not match with the number of labels. Exiting..."
sys.exit(-1)
if len(positions) != len(labels):
print >> sys.stderr, "### Fatal error: number of labels does not match with the number of positions of the ids. Exiting..."
sys.exit(-1)
for lab in labels:
if len(lab) != 3:
print >> sys.stderr, "### Fatal error: labels have to be exactly three characters long. Exiting..."
sys.exit(-1)
base = Base()
wd = "Results"
wdFasta = "%s/Fasta" %wd
base.shell("rm -rf Results")
base.createDir(wd)
logHandle = open("%s/log.txt" %wd, 'w')
base.setLogHandle(logHandle)
base.createDir(wdFasta)
#userName = getpass.getuser()
#createOrthoMclConfigFile(wd, userName, eValue, similarity)
#createMySqlScripts(wd, userName)
verList = checkMySqlVersion(wd, opts.ip)
createOrthoMclConfigFile(wd, "root", eValue, similarity, opts.ip)
createMySqlScripts(wd, "root", verList)
requiredMolType = "amino acids"
if opts.nucl == True:
requiredMolType = "nucleotides"
for myFile in files:
molType = checkResidue(myFile)
if requiredMolType != molType:
print >> sys.stderr, "### Fatal error: files have to all be %s. Exiting..." %requiredMolType
print >> sys.stderr, "### File %s failed and was %s." %(myFile, molType)
sys.exit(-1)
base.shell("rm -f %s/*.fasta" %wd)
base.shell("rm -f %s/*.fasta" %wdFasta)
for i in xrange(len(files)):
myLab, myFile, myPos = labels[i], files[i], positions[i]
if myFile == "%s.fasta" %myLab:
print >> sys.stderr, "### Fatal error: orthoMCL produces same filenames that you already have. Please rename your fasta files e.g. to .fa instead of .fasta. Exiting..."
sys.exit(-1)
base.shell("orthomclAdjustFasta %s %s %s" %(myLab, myFile, myPos))
checkUniqueIds("%s.fasta" %myLab)
checkIdLen("%s.fasta" %myLab)
base.shell("mv -f %s.fasta %s" %(myLab, wdFasta))
if opts.skipBlast == False:
base.shell("orthomclFilterFasta %s %s 20" %(wdFasta, minlen))
base.shell("mv -f poorProteins.* %s" %wd)
# Blast all against all
if opts.skipBlast == False:
if opts.nucl == False:
base.shell("makeblastdb -in goodProteins.fasta -dbtype prot")
else:
base.shell("makeblastdb -in goodProteins.fasta -dbtype nucl")
base.shell("cp goodProteins.fasta %s/" %wd)
blastEvalue = eValue
if float(blastEvalue) < 1e-5: blastEvalue = "1e-5"
if opts.skipBlast == False:
if opts.nucl == False:
base.shell("blastp -db goodProteins.fasta -query goodProteins.fasta -outfmt 6 -evalue %s -num_threads %d > %s/goodProteins.blast" %(blastEvalue, pCnt, wd))
else:
base.shell("blastn -db goodProteins.fasta -query goodProteins.fasta -outfmt 6 -evalue %s -num_threads %d > %s/goodProteins.blast" %(blastEvalue, pCnt, wd))
base.shell("""awk '{if ($11<=%s) print $0}' %s/goodProteins.blast | grep -v "^#" > %s/filtered.blast""" %(eValue, wd, wd))
base.shell("mv -f goodProteins.* %s/" %wd)
base.shell("orthomclBlastParser %s/filtered.blast %s > %s/similarSequences.txt" %(wd, wdFasta, wd))
# Prepare database
base.shell("mysql -h %s -P 3306 --protocol tcp --user=root --password=password < %s/dropDb.sql" %(opts.ip, wd))
base.shell("mysql -h %s -P 3306 --protocol tcp --user=root --password=password < %s/createDb.sql" %(opts.ip, wd))
base.shell("orthomclInstallSchema %s/orthomcl.config" %wd)
base.shell("orthomclLoadBlast %s/orthomcl.config %s/similarSequences.txt" %(wd, wd))
# Identify potential orthologs
base.shell("orthomclPairs %s/orthomcl.config %s/orthomclPairs.log cleanup=no" %(wd, wd))
base.shell("rm -rf pairs")
base.shell("rm -rf %s/pairs" %wd)
base.shell("orthomclDumpPairsFiles %s/orthomcl.config" %wd)
base.shell("mv -f pairs %s" %wd)
# Group the orthologs
base.shell("mcl mclInput --abc -I 2.0 -o mclOutput")
base.shell("orthomclMclToGroups OWN_ 1 < mclOutput > %s/groups.txt" %wd)
base.shell("mv -f mclInput %s" %wd)
base.shell("mv -f mclOutput %s" %wd)
logHandle.close()
#################################################
if __name__ == "__main__":
main()
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
img = cv2.imread(r"D:\courses\Computer Vision with OpenCV and Deep Learning\Computer-Vision-with-Python\DATA\internal_external.png",0)
img.shape
plt.imshow(img,cmap="gray")
image,contour,hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
external_contours = np.zeros(image.shape)
for i in range(len(contour)):
if hierarchy[0][i][3] == -1:
cv2.drawContours(external_contours, contour, i, 255, -1)
plt.imshow(external_contours,cmap="gray")
internal_contours = np.zeros(image.shape)
for i in range(len(contour)):
if hierarchy[0][i][3] != -1:
cv2.drawContours(internal_contours, contour, i, 255, -1)
plt.imshow(internal_contours,cmap="gray")
|
import base
from base import Base
import logging
import sys
sys.path.append('../core/')
from voice import Voice
class Controller:
# arrows
MOVE_FORWARD = 259
MOVE_BACKWARD = 258
TURN_LEFT = 260
TURN_RIGHT = 261
# 'pgup' and 'pgdown'
HEAD_LEFT = 339
HEAD_RIGHT = 338
# q, a, w, s
LEFT_ARM_UP = 113
LEFT_ARM_DOWN = 97
RIGHT_ARM_UP = 119
RIGHT_ARM_DOWN = 115
# 1, 2 ...
SOUND_1 = 49
SOUND_2 = 50
def __init__(self):
self.walle = base.Base()
def process(self, code):
logging.debug('Processing code: %s' % code)
if code == Controller.MOVE_FORWARD:
self.walle.move(Base.FORWARD)
elif code == Controller.MOVE_BACKWARD:
self.walle.move(Base.BACKWARD)
elif code == Controller.TURN_LEFT:
self.walle.turn(Base.TURN_LEFT)
elif code == Controller.TURN_RIGHT:
self.walle.turn(Base.TURN_RIGHT)
elif code == Controller.HEAD_LEFT:
self.walle.move_head(Base.TURN_LEFT)
elif code == Controller.HEAD_RIGHT:
self.walle.move_head(Base.TURN_RIGHT)
elif code == Controller.LEFT_ARM_UP:
self.walle.move_arm(Base.LEFT_ARM, Base.UP)
elif code == Controller.LEFT_ARM_DOWN:
self.walle.move_arm(Base.LEFT_ARM, Base.DOWN)
elif code == Controller.RIGHT_ARM_UP:
self.walle.move_arm(Base.RIGHT_ARM, Base.UP)
elif code == Controller.RIGHT_ARM_DOWN:
self.walle.move_arm(Base.RIGHT_ARM, Base.DOWN)
elif code == Controller.SOUND_1:
Voice.getInstance().playFile('../resources/wall-e.ogg')
elif code == Controller.SOUND_2:
Voice.getInstance().playFile('../resources/eve.ogg')
def __del__(self):
self.walle.__del__()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 15:39:35 2018
@author: Administrator
"""
from urllib import request
import re
# 检验代理服务器,怎么知道当前和Internet连通的
def check_proxy(html):
pattern = re.compile("<title>百度一下,你就知道</title>")
title = re.findall(pattern, html)
if title is None:
return False
else:
return True
# 设置代理服务器
def use_http_proxy(proxy_addr):
# 这里需要使用代理服务器的Handler
proxyH = request.ProxyHandler({"http":proxy_addr})
# 由这个proxy handler创建一个HTTP的opener
opener = request.build_opener(proxyH)
# 把这个opener装载到urllib中,以备使用
request.install_opener(opener)
# 发起HTTP请求
try:
response = request.urlopen("http://www.baidu.com")
# 读取信息,判断此代理是否可用
html = response.read().decode("utf-8")
except:
return False
return check_proxy(html)
if __name__ == "__main__":
proxy_addr = "116.62.23.142:16816"
print(use_http_proxy(proxy_addr))
|
import numpy as np
A = np.arange(2, 14).reshape((3, 4))
print(A)
print(np.argmin(A))
print(np.argmax(A))
print(np.mean(A))
print(A.mean())
print(np.average(A))
print(np.median(A)) # 中位数
print(np.cumsum(A)) # 逐个累加
print(np.diff(A)) # 差
print(np.nonzero(A)) # 非零的数
A = np.arange(14, 2, -1).reshape((3, 4))
print(A)
print(np.sort(A, axis=0))
print(np.sort(A, axis=1))
print(np.transpose(A))
print(A.T)
print((A.T).dot(A))
print(np.clip(A, 5, 9))
print(A)
print(np.mean(A, axis=0))
print(np.mean(A, axis=1)) |
from spacy.matcher import PhraseMatcher
from spacy.tokens import Doc
from spacy.tokens import Span
from spacy.util import filter_spans
from spacy.language import Language
from text_complexity_analyzer_cm.constants import ACCEPTED_LANGUAGES
emphatics_getter = lambda doc: [doc[span['start']:span['end']]
for span in doc._.emphatics_span_indices]
Doc.set_extension('emphatics_span_indices', force=False, default=[])
Doc.set_extension('emphatics', force=False, getter=emphatics_getter)
@Language.factory('emphatics tagger')
class EmphaticsTagger:
def __init__(self, name, nlp, language) -> None:
if not language in ACCEPTED_LANGUAGES:
raise ValueError(f'Language {language} is not supported yet')
self._language = language
self._matcher = PhraseMatcher(nlp.vocab, attr='LOWER')
self._connectives = []
if language == 'en': # emphatics connectives for spanish
self._connectives = ['him', 'there', 'their', 'it', 'he', 'she', 'we', 'who', 'them', 'they', 'you', 'himself', 'her', 'whom', 'itself', 'somebody', 'something', 'us', 'anybody', 'herself', 'anyone', 'everybody', 'nobody', 'everyone', 'themselves', 'yourself', 'someone', 'his', 'yours']
else: # Support for future languages
pass
for con in self._connectives:
self._matcher.add(con, None, nlp(con))
def __call__(self, doc: Doc) -> Doc:
matches = self._matcher(doc)
emphatics_spans = [doc[start:end] for _, start, end in matches]
doc._.emphatics_span_indices = [{'start': span.start,
'end': span.end,
'label': span.label}
for span in filter_spans(emphatics_spans)] # Save the emphatics connectives found
return doc |
#try1.py
#함수를 정의
def divide(a,b):
return a/b
# 에러 처리
try:
#함수호출
#result = divide(5,"aa")
#result = divide(5,0)
result = divide(5,2)
except ZeroDivisionError:
print("0으로 나누면 안됩니다")
except TypeError:
print("숫자여야 연산이 됩니다.")
else:
print("결과:{0}".format(result))
finally:
print("무조건 실행")
print("전체 코드 실행 종료")
|
from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class PostList(models.Model):
pass
class PostModify(models.Model):
pass
|
from django.db import models
from benchmark_django_rest_framework.benchmark_model import BenchmarkModel
# Create your models here.
class AppVersions(BenchmarkModel, models.Model):
version = models.IntegerField(primary_key=True)
app_id = models.IntegerField()
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
status = models.IntegerField()
allow_users = models.CharField(max_length=512)
range_dates = models.CharField(max_length=256)
citys = models.CharField(max_length=256)
city_enable = models.IntegerField()
# sit 环境有percentage 字段
# percentage = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'app_versions'
unique_together = (('version', 'app_id'),)
class Clientversion(BenchmarkModel, models.Model):
cvid = models.AutoField(primary_key=True)
app_id = models.CharField(max_length=20)
channel = models.CharField(max_length=20)
clienttype = models.SmallIntegerField()
version = models.ForeignKey(AppVersions, db_column='version')
version_name = models.CharField(max_length=20)
changelogimg = models.CharField(max_length=200, blank=True, null=True)
changelog = models.CharField(max_length=20000, blank=True, null=True)
size = models.IntegerField()
url = models.CharField(max_length=256)
iscompatible = models.SmallIntegerField()
createtime = models.DateTimeField()
minimumversion = models.IntegerField()
frequency = models.IntegerField(blank=True, null=True)
status = models.IntegerField()
class Meta:
managed = False
db_table = 'clientversion'
|
#####A program to calculate the one second moving averages and turbulence factors of the velocimetry graphs from testpiv2.py#####
#####If this program is to be used for other arrays, computers, or videos, things that need to change are marked by #*#. #####
import numpy as np
import glob
import pylab
#####make the dictionaries to store the abundance of velocity arrays#####
arrays = {}
#####put the velocity arrays in their appropriate dictionaries#####
for np_name in glob.glob('out/Vertical Velocity/arrays/*.npz'): #*#
with np.load(np_name) as data: #*#
arrays[np_name[29:34]] = data['VV'] #*#
#####finding the average#####
####Variables####
window = 11 #*# This is equal to the fps/2 - 1
frames = arrays.keys()
frames.sort()
nx = 89 #*# X dimension of the video
ny = 159 #*# Y dimension of the video
nt = len(frames) #*# The number of total frames
A = np.empty([nx,ny,nt])
MA = np.empty([nx,ny])
turb = np.empty([nx,ny])
count = 0
####Moving Average Function####
def mov_avg(x,i,w):
inp = list(x)
out = list(x)
start = max(0,i-w)
end = min(len(inp), i+w)
total = sum( inp[start:end] )
count = float( end-start+1)
out = total/count
return out
####Puts the velocity arrays into a large numpy array in frame order####
for key in frames:
A[:,:,count] = arrays[key]
count += 1
####Calculates the moving average and the turbulence and then saves the arrays####
for frame in range(nt):
for i in range(nx):
for j in range(ny):
MA[i,j] = mov_avg(A[i,j,:], frame, window)
turb[i,j] = A[i,j,frame] - MA[i,j]
np.savez('out/Vertical Velocity/mvavgturb/VV/%05d.npz' %frame, MA = MA, turb = turb) #*#
|
# Generated by Django 3.0.3 on 2020-05-06 00:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Accidente',
fields=[
('k_numaccidente', models.IntegerField(primary_key=True, serialize=False)),
('w_ntsbdoc', models.CharField(blank=True, max_length=100, null=True)),
('d_conclusion', models.CharField(max_length=500)),
('n_heridos', models.IntegerField()),
('n_muertos', models.IntegerField()),
],
options={
'db_table': 'accidente',
'managed': False,
},
),
migrations.CreateModel(
name='Avion',
fields=[
('k_nommodelo', models.CharField(max_length=50, primary_key=True, serialize=False)),
('d_general', models.CharField(blank=True, max_length=200, null=True)),
],
options={
'db_table': 'avion',
'managed': False,
},
),
migrations.CreateModel(
name='Entidad',
fields=[
('k_nomentidad', models.CharField(max_length=50, primary_key=True, serialize=False)),
('f_fundacion', models.DateField(blank=True, null=True)),
('s_tipo', models.CharField(max_length=50)),
],
options={
'db_table': 'entidad',
'managed': False,
},
),
migrations.CreateModel(
name='Fabricante',
fields=[
('k_nomfabricante', models.CharField(max_length=50, primary_key=True, serialize=False)),
('f_fundacion', models.DateField(blank=True, null=True)),
('u_headquarter', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'db_table': 'fabricante',
'managed': False,
},
),
migrations.CreateModel(
name='Situacion',
fields=[
('k_num', models.IntegerField(primary_key=True, serialize=False)),
('un_lat', models.DecimalField(decimal_places=6, max_digits=8)),
('un_lon', models.DecimalField(decimal_places=6, max_digits=9)),
('u_nomlugar', models.CharField(blank=True, max_length=30, null=True)),
('f_hora', models.DateTimeField()),
('d_situacion', models.CharField(max_length=200)),
('un_altitud', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'situacion',
'managed': False,
},
),
migrations.CreateModel(
name='Vuelo',
fields=[
('k_nomvuelo', models.CharField(max_length=10, primary_key=True, serialize=False)),
('f_salida', models.DateTimeField()),
('u_ciudadorigen', models.CharField(max_length=50)),
('u_ciudaddestino', models.CharField(blank=True, max_length=50, null=True)),
('i_registroavion', models.CharField(max_length=6)),
],
options={
'db_table': 'vuelo',
'managed': False,
},
),
]
|
from django.db import models
class User(models.Model):
name = models.CharField(max_length=50,
blank=False,
null=False)
role = models.ForeignKey('Role', on_delete=models.CASCADE)
password = models.CharField(max_length=55,
blank=True,
null=True)
companies = models.ForeignKey('Company', on_delete=models.CASCADE, null=True)
def __str__(self):
return f'{self.name}'
class Company(models.Model):
name = models.CharField(max_length=255, )
address = models.CharField(max_length=255, )
class Meta:
verbose_name_plural = 'Companies'
def __str__(self):
return f'{self.name}'
class Level(models.Model):
label = models.CharField(max_length=55,
default='No level')
def __str__(self):
return f'{self.label}'
class Status(models.Model):
label = models.CharField(max_length=55,
default='No Status')
def __str__(self):
return f'{self.label}'
class Meta:
verbose_name_plural = 'Status'
class Project(models.Model):
name = models.CharField(max_length=55, )
created_at = models.DateTimeField
company = models.ManyToManyField(Company, through='CompanyProject', related_name='projects')
def __str__(self):
return f'{self.name}'
class Role(models.Model):
name = models.CharField(max_length=55,
default='No role')
class Meta:
verbose_name_plural = 'Roles'
def __str__(self):
return f'{self.name}'
class Ticket(models.Model):
label = models.TextField(max_length=255,
default='No description')
user = models.ManyToManyField('User')
created_at = models.DateTimeField
updated_at = models.DateTimeField
status = models.ForeignKey(Status,
on_delete=models.CASCADE)
level = models.ForeignKey(Level,
on_delete=models.CASCADE)
project = models.ForeignKey(Project,
on_delete=models.CASCADE)
def __str__(self):
return f'{self.label}'
class CompanyProject(models.Model):
company = models.ForeignKey(Company, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
is_client = models.BooleanField(blank=False)
def __str__(self):
if self.is_client:
return f'{self.company} a effectué une demande'
return f'{self.company} répond à une demande '
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
import utils.Image_loader as il
import segmentation.threshold as t
import measure.find_countours as fc
img = il.get_sample()
countour = fc.get_contour(img)
polygon = measure.approximate_polygon(countour, 0.8)
print(polygon)
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import ellipse
from skimage.measure import find_contours, approximate_polygon, \
subdivide_polygon
hand = countour
# subdivide polygon using 2nd degree B-Splines
new_hand = hand.copy()
for _ in range(5):
new_hand = subdivide_polygon(new_hand, degree=2, preserve_ends=True)
# approximate subdivided polygon with Douglas-Peucker algorithm
appr_hand = approximate_polygon(new_hand, tolerance=0.02)
print("Number of coordinates:", len(hand), len(new_hand), len(appr_hand))
fig, (ax1) = plt.subplots()
ax1.axis('off')
ax1.imshow(img, cmap=plt.cm.gray)
ax1.set_title('Input image')
# ax1.plot(hand[:, 1], hand[:, 0])
ax1.plot(new_hand[:, 1], new_hand[:, 0])
# ax1.plot(appr_hand[:, 1], appr_hand[:, 0])
plt.show() |
from textblob import TextBlob
from textblob import Word
import sys
from similar import get_cosine, text_to_vector
def parse(string):
global verbose
verbose = False
ques = []
sim = []
line = []
print("hi")
try:
txt = TextBlob(string)
for sentence in txt.sentences:
q, l = genQuestion(sentence)
ques.append(q)
line.append(str(l))
# sim.append(cosine)
# print(cosine)
for q in ques:
for l in line:
vector1 = text_to_vector(str(l))
vector2 = text_to_vector(q)
sim = get_cosine(vector1, vector2)
if (sim > 0.5):
line.append(l)
except Exception as e:
raise e
print(ques)
return ques, line
def newline(string, n):
print(n)
global verbose
verbose = False
ques = []
sim = []
line = []
res = ""
print("hi")
try:
txt = TextBlob(string)
for sentence in txt.sentences:
q, l = genQuestion(sentence)
ques.append(q)
line.append(str(l))
# sim.append(cosine)
# print(cosine)
for l in line:
print(l)
vector1 = text_to_vector(str(l))
vector2 = text_to_vector(str(n))
sim = get_cosine(vector1, vector2)
print(sim)
if (sim > 0.3):
res = l
print(res)
except Exception as e:
raise e
# print(ques)
return res
def genQuestion(line):
if type(line) is str:
line = TextBlob(line)
bucket = {}
for i, j in enumerate(line.tags):
if j[1] not in bucket:
bucket[j[1]] = i
if verbose:
print('\n', '-'*20)
print(line, '\n')
print("TAGS:", line.tags, '\n')
print(bucket)
question = ''
# NNS Noun, plural
# JJ Adjective
# NNP Proper noun, singular
# VBG Verb, gerund or present participle
# VBN Verb, past participle
# VBZ Verb, 3rd person singular present
# VBD Verb, past tense
# IN Preposition or subordinating conjunction
# PRP Personal pronoun
# NN Noun, singular or mass
l1 = ['NNP', 'VBG', 'VBZ', 'IN']
l2 = ['NNP', 'VBG', 'VBZ']
l3 = ['PRP', 'VBG', 'VBZ', 'IN']
l4 = ['PRP', 'VBG', 'VBZ']
l5 = ['PRP', 'VBG', 'VBD']
l6 = ['NNP', 'VBG', 'VBD']
l7 = ['NN', 'VBG', 'VBZ']
l8 = ['NNP', 'VBZ', 'JJ']
l9 = ['NNP', 'VBZ', 'NN']
l10 = ['NNP', 'VBZ']
l11 = ['PRP', 'VBZ']
l12 = ['NNP', 'NN', 'IN']
l13 = ['NN', 'VBZ']
l14 = ['NNP'] # when is
l15 = ['DT', 'JJ', 'NNP'] # when is
l16 = ['PRP$', 'NN'] # when is
l17 = ['DT', 'NN', 'VBG'] # when is
l18 = ['DT', 'JJS', 'NN'] # where is
l19 = ['NNP', 'POS', 'NN']
l20 = ['NNP', 'POS', 'JJS', 'NN']
l21 = ['NNS', 'VB']
l22 = ['DT', 'NN', 'VB']
l23 = ['NN', 'IN', 'NNP']
l24 = ['JJ', 'VBZ', 'NNP']
l25 = ['DT', 'JJS', 'NNS']
l26 = ['JJR']
l27 = ['DT', 'JJR', 'NN']
l28 = ['NN', 'VBZ', 'JJR']
l29 = ['DT', 'JJR', 'CD']
l30 = ['NNP', 'POS', 'NNP']
l31 = ['DT', 'JJ', 'NN']
l32 = ['PRP$', 'NN']
l33 = ['DT', 'NN', 'JJ']
l34 = ['NNP', 'VBN']
l35 = ['NN', 'VBN']
l36 = ['NN', 'JJ']
if all(key in bucket for key in l1):
question = 'What' + ' ' + line.words[bucket['VBZ']] + ' ' + \
line.words[bucket['NNP']] + ' ' + line.words[bucket['VBG']] + '?'
elif all(key in bucket for key in l2): # 'NNP', 'VBG', 'VBZ' in sentence.
question = 'What' + ' ' + line.words[bucket['VBZ']] + ' ' + \
line.words[bucket['NNP']] + ' ' + line.words[bucket['VBG']] + '?'
elif all(key in bucket for key in l3):
question = 'What' + ' ' + line.words[bucket['VBZ']] + ' ' + \
line.words[bucket['PRP']] + ' ' + line.words[bucket['VBG']] + '?'
elif all(key in bucket for key in l4): # 'PRP', 'VBG', 'VBZ' in sentence.
question = 'What ' + line.words[bucket['PRP']] + ' ' + ' does ' + \
line.words[bucket['VBG']] + ' ' + line.words[bucket['VBG']] + '?'
elif all(key in bucket for key in l7): # 'NN', 'VBG', 'VBZ' in sentence.
question = 'What' + ' ' + line.words[bucket['VBZ']] + ' ' + \
line.words[bucket['NN']] + ' ' + line.words[bucket['VBG']] + '?'
elif all(key in bucket for key in l8): # 'NNP', 'VBZ', 'JJ' in sentence.
question = 'What' + ' ' + \
line.words[bucket['VBZ']] + ' ' + line.words[bucket['NNP']] + '?'
elif all(key in bucket for key in l9): # 'NNP', 'VBZ', 'NN' in sentence
question = 'What' + ' ' + \
line.words[bucket['VBZ']] + ' ' + line.words[bucket['NNP']] + '?'
elif all(key in bucket for key in l11): # 'PRP', 'VBZ' in sentence.
if line.words[bucket['PRP']] in ['she', 'he']:
question = 'What' + ' does ' + \
line.words[bucket['PRP']].lower() + ' ' + \
line.words[bucket['VBZ']].singularize() + '?'
elif all(key in bucket for key in l10): # 'NNP', 'VBZ' in sentence.
question = 'What' + ' does ' + \
line.words[bucket['NNP']] + ' ' + \
line.words[bucket['VBZ']].singularize() + '?'
elif all(key in bucket for key in l13): # 'NN', 'VBZ' in sentence.
question = 'What' + ' ' + \
line.words[bucket['VBZ']] + ' ' + line.words[bucket['NN']] + '?'
elif all(key in bucket for key in l4):
question = 'When' + ' is ' + line.words[bucket['NNP']] + '?'
elif all(key in bucket for key in l15):
question = 'When' + ' is ' + line.words[bucket['DT']] + ' ' + \
line.words[bucket['JJ']] + ' ' + line.words[bucket['NNP']] + '?'
elif all(key in bucket for key in l16):
question = 'When' + ' is ' + \
line.words[bucket['PRP$']] + ' ' + line.words[bucket['NN']] + '?'
elif all(key in bucket for key in l17):
question = 'When' + ' is ' + line.words[bucket['DT']] + ' ' + \
line.words[bucket['NN']] + ' ' + line.words[bucket['VBG']] + '?'
elif all(key in bucket for key in l18):
question = 'Where' + ' is ' + line.words[bucket['DT']] + ' ' + \
line.words[bucket['JJS']] + ' ' + line.words[bucket['NN']] + '?'
elif all(key in bucket for key in l19):
question = 'Where' + ' is ' + line.words[bucket['NNP']] + ' ' + \
line.words[bucket['POS']] + ' ' + line.words[bucket['NN']] + '?'
elif all(key in bucket for key in l20):
question = 'Where' + ' is ' + line.words[bucket['NNP']] + ' ' + line.words[bucket['POS']
] + ' ' + line.words[bucket['JJS']] + ' ' + line.words[bucket['NN']] + '?'
elif all(key in bucket for key in l21):
question = 'How' + ' do ' + \
line.words[bucket['NNS']] + ' ' + line.words[bucket['VB']] + '?'
elif all(key in bucket for key in l22):
question = 'How' + ' do ' + line.words[bucket['DT']] + ' ' + \
line.words[bucket['NN']]+' ' + line.words[bucket['VB']] + '?'
elif all(key in bucket for key in l23):
question = 'How' + ' is ' + line.words[bucket['NN']] + ' ' + \
line.words[bucket['IN']]+' ' + line.words[bucket['NNP']] + '?'
elif all(key in bucket for key in l24):
question = 'How' + line.words[bucket['JJ']] + ' ' + \
line.words[bucket['VBZ']]+' ' + line.words[bucket['NNP']] + '?'
elif all(key in bucket for key in l25):
question = 'Which' + ' is ' + \
line.words[bucket['DT']] + ' ' + line.words[bucket['JJS']
]+' ' + line.words[bucket['NNS']] + '?'
elif all(key in bucket for key in l26):
question = 'Which' + ' is ' + line.words[bucket['JJR']] + '?'
elif all(key in bucket for key in l27):
question = 'Which' + ' is ' + \
line.words[bucket['DT']] + ' ' + line.words[bucket['JJR']
]+' ' + line.words[bucket['NN']] + '?'
elif all(key in bucket for key in l28):
question = 'Which' + ' ' + line.words[bucket['NN']] + ' ' + \
line.words[bucket['VBZ']]+' ' + line.words[bucket['JJR']] + '?'
elif all(key in bucket for key in l29):
question = 'Which' + ' is ' + \
line.words[bucket['DT']] + ' ' + line.words[bucket['JJR']
]+' ' + line.words[bucket['CD']] + '?'
elif all(key in bucket for key in l30):
question = 'Who' + ' is ' + line.words[bucket['NNP']] + ' ' + \
line.words[bucket['POS']]+' ' + line.words[bucket['NNP']] + '?'
elif all(key in bucket for key in l31):
question = 'Who' + ' is ' + line.words[bucket['DT']] + ' ' + \
line.words[bucket['JJ']]+' ' + line.words[bucket['NN']] + '?'
elif all(key in bucket for key in l32):
question = 'Who' + ' is ' + \
line.words[bucket['PRP$']] + ' ' + line.words[bucket['NN']] + '?'
elif all(key in bucket for key in l33):
question = 'Why' + ' is ' + line.words[bucket['DT']] + ' ' + \
line.words[bucket['NN']]+' ' + line.words[bucket['JJ']] + '?'
elif all(key in bucket for key in l34):
question = 'Why' + ' is ' + \
line.words[bucket['NNP']] + ' ' + line.words[bucket['VBN']] + '?'
elif all(key in bucket for key in l35):
question = 'Why' + ' is ' + \
line.words[bucket['NN']] + ' ' + line.words[bucket['VBN']] + '?'
elif all(key in bucket for key in l36):
question = 'Why' + ' are ' + \
line.words[bucket['NN']] + ' ' + line.words[bucket['JJ']] + '?'
if 'VBZ' in bucket and line.words[bucket['VBZ']] == "’":
question = question.replace(" ’ ", "'s ")
return question, line
|
import re
def name(utterance, slot_value, delex=False):
if delex:
if "__NAME__" in utterance:
return "__NAME__"
else:
return "N/A"
pattern = slot_value.replace("The ", "").lower()
if pattern in utterance:
return slot_value
else:
return "N/A"
def near(utterance, slot_value, delex=False):
if delex:
if "__NEAR__" in utterance:
return "__NEAR__"
else:
return "N/A"
pattern = slot_value.replace("The ", "").lower()
if pattern in utterance:
return slot_value
else:
return "N/A"
def area(utterance, slot_value, delex=False):
RIVER_TERMS = ["river", "riverside", "water", "waterfront"]
CITY_TERMS = ["city", "centre"]
if slot_value == "riverside":
if any([t in utterance for t in RIVER_TERMS]):
return "riverside"
elif slot_value == "city centre":
if any([t in utterance for t in CITY_TERMS]):
return "city centre"
return "N/A"
def eat_type(utterance, slot_value, delex=False):
RESTAURANT_TERMS = ["restaurant"]
COFFEE_TERMS = ["coffee", "coffee shop"]
PUB_TERMS = ["pub"]
if slot_value == "restaurant":
if any([t in utterance for t in RESTAURANT_TERMS]):
return "restaurant"
elif slot_value == "coffee shop":
if any([t in utterance for t in COFFEE_TERMS]):
return "coffee shop"
elif slot_value == "pub":
if any([t in utterance for t in PUB_TERMS]):
return "pub"
return "N/A"
def price_range(utterance, slot_value, delex=False):
CHEAP_TERMS = ["cheap", "inexpensive", "not expensive",
"not very expensive", "low price", "budget"]
if slot_value == "cheap":
if any([t in utterance for t in CHEAP_TERMS]):
return "cheap"
if "low" in utterance and "cost" in utterance:
return "cheap"
if "low" in utterance and "price" in utterance:
return "cheap"
if "value" in utterance and "price" in utterance:
return "cheap"
if slot_value == "high":
if "high price" in utterance:
return "high"
if " expensive" in utterance:
return "high"
if "highly - priced" in utterance:
return "high"
if "high - priced" in utterance:
return "high"
if "high cost" in utterance:
return "high"
if "higher - priced" in utterance:
return "high"
if "price range is high" in utterance:
return "high"
if "price range is slightly higher" in utterance:
return "high"
if "costly" in utterance:
return "high"
if "cost" in utterance:
return "high"
if "higher price" in utterance:
return "high"
if "upper price" in utterance:
return "high"
if "highly priced" in utterance:
return "high"
if "prices are high" in utterance:
return "high"
if "high" in utterance:
return "high"
if "high range" in utterance:
return "high"
if "pricey" in utterance:
return "high"
if "expensive" in utterance:
return "high"
if "not cheap" in utterance:
return "high"
if "above average" in utterance:
return "high"
if slot_value == "moderate":
if "moderate price" in utterance:
return "moderate"
if "average price" in utterance:
return "moderate"
if "average pricing" in utterance:
return "moderate"
if "moderate - priced" in utterance:
return "moderate"
if "moderately priced" in utterance:
return "moderate"
if "moderate" in utterance:
return "moderate"
if "affordable" in utterance:
return "moderate"
if "medium price" in utterance:
return "moderate"
if "mid - range price" in utterance:
return "moderate"
if "mid - price" in utterance:
return "moderate"
if "mid price" in utterance:
return "moderate"
if "reasona" in utterance and "price" in utterance:
return "moderate"
if "fair" in utterance and "price" in utterance:
return "moderate"
if "average - priced" in utterance:
return "moderate"
if "mid range for price" in utterance:
return "moderate"
if "medium - priced" in utterance:
return "moderate"
if "average" in utterance and "price" in utterance:
return "moderate"
if "decent" in utterance and "price" in utterance:
return "moderate"
if "mid" in utterance and "price" in utterance:
return "moderate"
if "not pricy" in utterance:
return "moderate"
if "competitive" in utterance:
return "moderate"
if slot_value == "more than £30":
if "more than £ 30" in utterance:
return "more than £30"
if "prices are over £ 30":
return "more than £30"
if "over £ 30":
return "more than £30"
if slot_value == "less than £20":
if "less than £ 20" in utterance:
return "less than £20"
if "less than 20 pounds" in utterance:
return "less than £20"
if "less than twenty pounds" in utterance:
return "less than £20"
if "price range under 20" in utterance:
return "less than £20"
if "less than" in utterance and "20" in utterance:
return "less than £20"
if "under" in utterance and "20" in utterance:
return "less than £20"
if "less than" in utterance and "twenty" in utterance:
return "less than £20"
if "£ 20 or less" in utterance:
return "less than £20"
if "in the £ 20 price range" in utterance:
return "less than £20"
if "20" in utterance and "25" not in utterance:
return "less than £20"
if "twenty pounds" in utterance:
return "less than £20"
if slot_value == "£20-25":
if "£ 20 - £ 25" in utterance:
return "£20-25"
if "£ 20 - 25" in utterance:
return "£20-25"
if "20 - 25" in utterance:
return "£20-25"
if "20" in utterance and "25" in utterance:
return "£20-25"
if "20" in utterance and "30" in utterance:
return "£20-25"
if "twenty" in utterance and "twenty five" in utterance:
return "£20-25"
if "twenty" in utterance and "twenty - five" in utterance:
return "£20-25"
if "under £ 25" in utterance:
return "£20-25"
return "N/A"
def family_friendly(utterance, slot_value, delex=False):
if slot_value == "no":
if "not kid" in utterance and "friendly" in utterance:
return "no"
if "not family" in utterance and "friendly" in utterance:
return "no"
if "not child" in utterance and "friendly" in utterance:
return "no"
if "no kid" in utterance and "friendly" in utterance:
return "no"
if "adult" in utterance:
return "no"
if "non kid" in utterance:
return "no"
if "non family" in utterance:
return "no"
if "non child" in utterance:
return "no"
if "non - kid" in utterance:
return "no"
if "non - family" in utterance:
return "no"
if "non - child" in utterance:
return "no"
if "not - kid" in utterance and "friendly" in utterance:
return "no"
if "not - family" in utterance and "friendly" in utterance:
return "no"
if "not - child" in utterance and "friendly" in utterance:
return "no"
if "not a kid" in utterance:
return "no"
if "not a family" in utterance:
return "no"
if "not a child" in utterance:
return "no"
if "no kids allowed" in utterance:
return "no"
if "no children allowed" in utterance:
return "no"
if "do not allow" in utterance:
return "no"
if "n't" in utterance and "family - friendly" in utterance:
return "no"
if "not" in utterance and "family" in utterance:
return "no"
if "not" in utterance and "children" in utterance:
return "no"
if "not" in utterance and "kid" in utterance:
return "no"
if "no for children" in utterance:
return "no"
if "we welcome" in utterance and "children" in utterance:
return "no"
if "family" in utterance and any([x in utterance for x in [" no ", " not ", " n't ", " lacking "]]):
return "no"
if "kid" in utterance and any([x in utterance for x in [" no ", " not ", " n't ", " lacking " ]]):
return "no"
if "child" in utterance and any([x in utterance for x in [" no ", " not ", " n't ", " lacking "]]):
return "no"
if "families" in utterance and any([x in utterance for x in [" no ", " not ", " n't ", " lacking " ]]):
return "no"
if "bad family - friendly" in utterance:
return "no"
if "unfriendly" in utterance:
return "no"
if "non-family-friendly" in utterance:
return "no"
if slot_value == "yes":
if "we welcome" in utterance and "children" in utterance:
return "yes"
if "families" in utterance and all([x not in utterance for x in [" no ", " not ", " n't "]]):
return "yes"
if "family" in utterance and all([x not in utterance for x in [" no ", " not ", " n't "]]):
return "yes"
if "kid" in utterance and all([x not in utterance for x in [" no ", " not ", " n't "]]):
return "yes"
if "child" in utterance and all([x not in utterance for x in [" no ", " not ", " n't "]]):
return "yes"
if "a family" in utterance:
return "yes"
if "for the whole family" in utterance:
return "yes"
if "welcomes" in utterance and "children" in utterance:
return "yes"
if "child" in utterance and "friendly" in utterance and "not" not in utterance:
return "yes"
if "kid" in utterance and "friendly" in utterance and "not" not in utterance:
return "yes"
if "family" in utterance and "friendly" in utterance and "not" not in utterance:
return "yes"
if "family" in utterance and "friendly" in utterance and "not" not in utterance:
return "yes"
return "N/A"
def food(utterance, slot_value, delex=False):
if slot_value.lower() in utterance:
return slot_value
if "sushi" in utterance:
return "Japanese"
if "pasta" in utterance:
return "Italian"
if "british" in utterance:
return "English"
if "wine" in utterance:
return "French"
if "fries" in utterance:
return "Fast food"
if "spaghetti" in utterance:
return "Italian"
if "fast - food" in utterance:
return "Fast food"
return "N/A"
def customer_rating(utterance, slot_value, delex=False):
if slot_value == "3 out of 5":
if " 3 " in utterance and any([x in utterance for x in [" 5 ", " five ", "star", "out of"]]):
return "3 out of 5"
if " three " in utterance and any([x in utterance for x in [" 5 ", " five ", "star", "out of"]]):
return "3 out of 5"
if slot_value == "1 out of 5":
if " 1 " in utterance and any([x in utterance for x in [" 5 ", " five ", "star", "out of"]]):
return "1 out of 5"
if " one " in utterance and any([x in utterance for x in [" 5 ", " five ", "star", "out of"]]):
return "1 out of 5"
if slot_value == "5 out of 5":
if " 5 " in utterance and any([x in utterance for x in [" 5 ", " five ", "star", "out of"]]):
return "5 out of 5"
if " five " in utterance and any([x in utterance for x in [" 5 ", "star", "out of"]]):
return "5 out of 5"
if slot_value == "low":
if "low customer satisfaction" in utterance:
return "low"
if "customer rating is quite low" in utterance:
return "low"
if re.search("not have high customer ratings", utterance):
return "low"
if re.search("not highly rated", utterance):
return "low"
if "below than average customer rating" in utterance:
return "low"
if "below average customer rating" in utterance:
return "low"
if "below than average rating" in utterance:
return "low"
if "below average rating" in utterance:
return "low"
if 'low customer rating' in utterance:
return "low"
if "low" in utterance and "rating" in utterance:
return "low"
if "low" in utterance and "rate" in utterance:
return "low"
if "poor" in utterance and "rating" in utterance:
return "low"
if "poor" in utterance and "rate" in utterance:
return "low"
if "not" in utterance and " rat" in utterance:
return "low"
if slot_value == "high":
if "is a highly rated" in utterance:
return "high"
if "a high quality " in utterance:
return "high"
if "has received great reviews" in utterance:
return "high"
if "and have great reviews" in utterance:
return "high"
if "respected" in utterance:
return "high"
if "recommended" in utterance:
return "high"
if "not highly - reviewed" in utterance:
return "high"
if "highly - reviewed" in utterance:
return "high"
if re.search("has a high customer rating", utterance):
return "high"
if re.search("with a high customer rating", utterance):
return "high"
if re.search("average \w+ food", utterance):
return "average"
if re.search("highly rated", utterance):
return "high"
if re.search("high customer ratings", utterance):
return "high"
if "high" in utterance and "rating" in utterance:
return "high"
if "high" in utterance and "rate" in utterance:
return "high"
if "high customer satisfaction" in utterance:
return "high"
if re.search("has received high reviews", utterance):
return "high"
if re.search("and has high customer rating", utterance):
return "high"
if "and great customer rating" in utterance:
return "high"
if "an average but" in utterance:
return "average"
if "is a good fast food" in utterance:
return "average"
if "is a good indian" in utterance:
return "average"
if "is a good french" in utterance:
return "average"
if "is a good chinese" in utterance:
return "average"
if "is a good italian" in utterance:
return "average"
if "is an good" in utterance:
return "average"
if "with moderate price range and customer rating" in utterance:
return "average"
if re.search(r"an average (restaurant|bar|pub|coffee)", utterance):
return "average"
if "with average customer review" in utterance:
return "average"
if "with average customers reviews" in utterance:
return "average"
if "moderate review" in utterance:
return "average"
if "moderate rat" in utterance:
return "average"
if "customer rating is average" in utterance:
return "average"
if "is a decent" in utterance:
return "average"
if "average rated" in utterance:
return "average"
if "rated average" in utterance:
return "average"
if re.search("rating for this space is average", utterance):
return "average"
if re.search("average food", utterance):
return "average"
if "average customer rating" in utterance:
return "average"
if "average rat" in utterance:
return "average"
if "average" in utterance and "rating" in utterance:
return "average"
if "average" in utterance and "rate" in utterance:
return "average"
return "N/A"
|
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from tkinter import *
import tkinter.font as font
import numpy as np
import serial as sr
import csv
skipfirst = True
root = Tk()
root.title("Welcome to Brage test")
verdi = Entry(root, width=50)
verdi.grid(row=0, column=2)
def on_open():
temp = []
Wind = []
Praser = []
roll = []
hading = []
pitch = []
Latitude = []
Longitude = []
sampol = []
global skipfirst
print("1")
with open(verdi.get()+".csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if not skipfirst:
sampol.append(int(row[0]))
temp.append(float(row[5]))
if "0-" in row[6]:
row[6] = row[6].replace("0-", "")
if not "" == row[9]:
row[9] = row[9].replace("N", "")
Latitude.append(float(row[9]))
if not "" == row[10]:
row[10] = row[10].replace("E", "")
Longitude.append(float(row[10]))
Wind.append(float(row[6]))
Praser.append(float(row[4]))
roll.append(float(row[1]))
hading.append(float(row[3]))
pitch.append(float(row[2]))
skipfirst = False
print("1")
fig1 = Figure()
fig2 = Figure()
fig3 = Figure()
fig4 = Figure()
fig5 = Figure()
fig6 = Figure()
fig7 = Figure()
fig8 = Figure()
tg = fig1.add_subplot(111)
vg = fig2.add_subplot(111)
pg = fig3.add_subplot(111)
rg = fig4.add_subplot(111)
hg = fig5.add_subplot(111)
pig = fig6.add_subplot(111)
Lati = fig7.add_subplot(111)
Long = fig8.add_subplot(111)
print(temp)
tg.set_title('Temp')
tg.set_xlabel('Sample')
tg.set_ylabel('C')
tg.set_xlim(0,sampol[-1]-1)
tg.set_ylim(-15,40)
tg_height = (3,2)
lines = tg.plot(np.arange(0,sampol[-1]+1),temp)[0]
vg.set_title('Wind')
vg.set_xlabel('Sample')
vg.set_ylabel('m/s')
vg.set_xlim(0,sampol[-1]-1)
vg.set_ylim(-2,30)
lines2 = vg.plot(np.arange(0,sampol[-1]+1),Wind)[0]
pg.set_title('Pressure')
pg.set_xlabel('Sample')
pg.set_ylabel('Pa')
pg.set_xlim(0,sampol[-1]-1)
pg.set_ylim(500,1200)
lines3 = pg.plot(np.arange(0,sampol[-1]+1),Praser)[0]
rg.set_title('Roll')
rg.set_xlabel('Sample')
rg.set_ylabel('Deg')
rg.set_xlim(0,sampol[-1]-1)
rg.set_ylim(-180,180)
lines4 = rg.plot(np.arange(0,sampol[-1]+1),roll)[0]
hg.set_title('Heading')
hg.set_xlabel('Sample')
hg.set_ylabel('Deg')
hg.set_xlim(0,sampol[-1]-1)
hg.set_ylim(0,360)
lines5 = hg.plot(np.arange(0,sampol[-1]+1),hading)[0]
pig.set_title('Pitch')
pig.set_xlabel('Sample')
pig.set_ylabel('Deg')
pig.set_xlim(0,sampol[-1]-1)
pig.set_ylim(-180,180)
lines6 = pig.plot(np.arange(0,sampol[-1]+1),pitch)[0]
Lati.set_title('Latitude')
Lati.set_xlabel('Sample')
Lati.set_ylabel('N')
Lati.set_xlim(0,len(Latitude)-1)
Lati.set_ylim(6200,6300)
lines7 = Lati.plot(np.arange(0,len(Latitude)),Latitude)[0]
print(Latitude)
Long.set_title('Longitude')
Long.set_xlabel('Sample')
Long.set_ylabel('E')
Long.set_xlim(0,len(Longitude)-1)
Long.set_ylim(600,700)
lines8 = Long.plot(np.arange(0,len(Longitude)),Longitude)[0]
canvas = FigureCanvasTkAgg(fig1, master=root)
canvas.get_tk_widget().grid(row=1, column=1)
canvas2 = FigureCanvasTkAgg(fig2, master=root)
canvas2.get_tk_widget().grid(row=2, column=1)
canvas3 = FigureCanvasTkAgg(fig3, master=root)
canvas3.get_tk_widget().grid(row=1, column=2)
canvas4 = FigureCanvasTkAgg(fig4, master=root)
canvas4.get_tk_widget().grid(row=2, column=2)
canvas5 = FigureCanvasTkAgg(fig5, master=root)
canvas5.get_tk_widget().grid(row=1, column=3)
canvas6 = FigureCanvasTkAgg(fig6, master=root)
canvas6.get_tk_widget().grid(row=2, column=3)
canvas7 = FigureCanvasTkAgg(fig7, master=root)
canvas7.get_tk_widget().grid(row=1, column=4)
canvas8 = FigureCanvasTkAgg(fig8, master=root)
canvas8.get_tk_widget().grid(row=2, column=4)
canvas.draw()
skipfirst = True
def on_open_time():
temp = []
Wind = []
Praser = []
roll = []
hading = []
pitch = []
Latitude = []
Longitude = []
timestamp = verdi.get().split(",")
sampol = []
teg = False
eee = []
i = 1
global skipfirst
print("1")
with open(timestamp[0]+".csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if not skipfirst:
eee = row[8].split(":")
print(eee)
if not eee[0] == "":
if timestamp[1] == (eee[0] + ":" + eee[1]):
teg = True
if timestamp[2] == (eee[0] + ":" + eee[1]):
teg = False
if teg:
sampol.append(int(row[0]))
temp.append(float(row[5]))
if "0-" in row[6]:
row[6] = row[6].replace("0-", "")
if not "" == row[9]:
row[9] = row[9].replace("N", "")
Latitude.append(float(row[9]))
if not "" == row[10]:
row[10] = row[10].replace("E", "")
Longitude.append(float(row[10]))
Wind.append(float(row[6]))
Praser.append(float(row[4]))
roll.append(float(row[1]))
hading.append(float(row[3]))
pitch.append(float(row[2]))
skipfirst = False
print("1")
fig1 = Figure()
fig2 = Figure()
fig3 = Figure()
fig4 = Figure()
fig5 = Figure()
fig6 = Figure()
fig7 = Figure()
fig8 = Figure()
tg = fig1.add_subplot(111)
vg = fig2.add_subplot(111)
pg = fig3.add_subplot(111)
rg = fig4.add_subplot(111)
hg = fig5.add_subplot(111)
pig = fig6.add_subplot(111)
Lati = fig7.add_subplot(111)
Long = fig8.add_subplot(111)
tg.set_title('Temp')
tg.set_xlabel('Sample')
tg.set_ylabel('C')
tg.set_xlim(sampol[0] ,sampol[-1])
tg.set_ylim(-15,40)
tg_height = (3,2)
lines = tg.plot(np.arange(sampol[0],sampol[-1]+1),temp)[0]
vg.set_title('Wind')
vg.set_xlabel('Sample')
vg.set_ylabel('m/s')
vg.set_xlim(sampol[0],sampol[-1])
vg.set_ylim(-2,30)
lines2 = vg.plot(np.arange(sampol[0],sampol[-1]+1),Wind)[0]
pg.set_title('Pressure')
pg.set_xlabel('Sample')
pg.set_ylabel('Pa')
pg.set_xlim(sampol[0],sampol[-1])
pg.set_ylim(500,1200)
lines3 = pg.plot(np.arange(sampol[0],sampol[-1]+1),Praser)[0]
rg.set_title('Roll')
rg.set_xlabel('Sample')
rg.set_ylabel('Deg')
rg.set_xlim(sampol[0],sampol[-1])
rg.set_ylim(-180,180)
lines4 = rg.plot(np.arange(sampol[0],sampol[-1]+1),roll)[0]
hg.set_title('Heading')
hg.set_xlabel('Sample')
hg.set_ylabel('Deg')
hg.set_xlim(sampol[0],sampol[-1])
hg.set_ylim(0,360)
lines5 = hg.plot(np.arange(sampol[0],sampol[-1]+1),hading)[0]
pig.set_title('Pitch')
pig.set_xlabel('Sample')
pig.set_ylabel('Deg')
pig.set_xlim(sampol[0],sampol[-1])
pig.set_ylim(-180,180)
lines6 = pig.plot(np.arange(sampol[0],sampol[-1]+1),pitch)[0]
Lati.set_title('Latitude')
Lati.set_xlabel('Sample')
Lati.set_ylabel('N')
Lati.set_xlim(0,len(Latitude)-1)
Lati.set_ylim(6200,6300)
lines7 = Lati.plot(np.arange(0,len(Latitude)),Latitude)[0]
print(Latitude)
Long.set_title('Longitude')
Long.set_xlabel('Sample')
Long.set_ylabel('E')
Long.set_xlim(0,len(Longitude)-1)
Long.set_ylim(600,700)
lines8 = Long.plot(np.arange(0,len(Longitude)),Longitude)[0]
canvas = FigureCanvasTkAgg(fig1, master=root)
canvas.get_tk_widget().grid(row=1, column=1)
canvas2 = FigureCanvasTkAgg(fig2, master=root)
canvas2.get_tk_widget().grid(row=2, column=1)
canvas3 = FigureCanvasTkAgg(fig3, master=root)
canvas3.get_tk_widget().grid(row=1, column=2)
canvas4 = FigureCanvasTkAgg(fig4, master=root)
canvas4.get_tk_widget().grid(row=2, column=2)
canvas5 = FigureCanvasTkAgg(fig5, master=root)
canvas5.get_tk_widget().grid(row=1, column=3)
canvas6 = FigureCanvasTkAgg(fig6, master=root)
canvas6.get_tk_widget().grid(row=2, column=3)
canvas7 = FigureCanvasTkAgg(fig7, master=root)
canvas7.get_tk_widget().grid(row=1, column=4)
canvas8 = FigureCanvasTkAgg(fig8, master=root)
canvas8.get_tk_widget().grid(row=2, column=4)
canvas.draw()
skipfirst = True
def on_open_step():
temp = []
Wind = []
Praser = []
roll = []
hading = []
pitch = []
Latitude = []
Longitude = []
timestamp = verdi.get().split(",")
sampol = []
teg = False
i = 1
print("1")
with open(timestamp[0]+".csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
print(i+1)
if timestamp[1] == row[0]:
teg = True
if timestamp[2] == row[0]:
teg = False
if teg:
sampol.append(int(row[0]))
temp.append(float(row[5]))
if "0-" in row[6]:
row[6] = row[6].replace("0-", "")
if not "" == row[9]:
row[9] = row[9].replace("N", "")
Latitude.append(float(row[9]))
if not "" == row[10]:
row[10] = row[10].replace("E", "")
Longitude.append(float(row[10]))
Wind.append(float(row[6]))
Praser.append(float(row[4]))
roll.append(float(row[1]))
hading.append(float(row[3]))
pitch.append(float(row[2]))
print("1")
fig1 = Figure()
fig2 = Figure()
fig3 = Figure()
fig4 = Figure()
fig5 = Figure()
fig6 = Figure()
fig7 = Figure()
fig8 = Figure()
tg = fig1.add_subplot(111)
vg = fig2.add_subplot(111)
pg = fig3.add_subplot(111)
rg = fig4.add_subplot(111)
hg = fig5.add_subplot(111)
pig = fig6.add_subplot(111)
Lati = fig7.add_subplot(111)
Long = fig8.add_subplot(111)
tg.set_title('Temp')
tg.set_xlabel('Sample')
tg.set_ylabel('C')
tg.set_xlim(sampol[0] ,sampol[-1])
tg.set_ylim(-15,40)
tg_height = (3,2)
lines = tg.plot(np.arange(sampol[0],sampol[-1]+1),temp)[0]
vg.set_title('Wind')
vg.set_xlabel('Sample')
vg.set_ylabel('m/s')
vg.set_xlim(sampol[0],sampol[-1])
vg.set_ylim(-2,30)
lines2 = vg.plot(np.arange(sampol[0],sampol[-1]+1),Wind)[0]
pg.set_title('Pressure')
pg.set_xlabel('Sample')
pg.set_ylabel('Pa')
pg.set_xlim(sampol[0],sampol[-1])
pg.set_ylim(500,1200)
lines3 = pg.plot(np.arange(sampol[0],sampol[-1]+1),Praser)[0]
rg.set_title('Roll')
rg.set_xlabel('Sample')
rg.set_ylabel('Deg')
rg.set_xlim(sampol[0],sampol[-1])
rg.set_ylim(-180,180)
lines4 = rg.plot(np.arange(sampol[0],sampol[-1]+1),roll)[0]
hg.set_title('Heading')
hg.set_xlabel('Sample')
hg.set_ylabel('Deg')
hg.set_xlim(sampol[0],sampol[-1])
hg.set_ylim(0,360)
lines5 = hg.plot(np.arange(sampol[0],sampol[-1]+1),hading)[0]
pig.set_title('Pitch')
pig.set_xlabel('Sample')
pig.set_ylabel('Deg')
pig.set_xlim(sampol[0],sampol[-1])
pig.set_ylim(-180,180)
lines6 = pig.plot(np.arange(sampol[0],sampol[-1]+1),pitch)[0]
Lati.set_title('Latitude')
Lati.set_xlabel('Sample')
Lati.set_ylabel('N')
Lati.set_xlim(0,len(Latitude)-1)
Lati.set_ylim(6200,6300)
lines7 = Lati.plot(np.arange(0,len(Latitude)),Latitude)[0]
print(Latitude)
Long.set_title('Longitude')
Long.set_xlabel('Sample')
Long.set_ylabel('E')
Long.set_xlim(0,len(Longitude)-1)
Long.set_ylim(600,700)
lines8 = Long.plot(np.arange(0,len(Longitude)),Longitude)[0]
canvas = FigureCanvasTkAgg(fig1, master=root)
canvas.get_tk_widget().grid(row=1, column=1)
canvas2 = FigureCanvasTkAgg(fig2, master=root)
canvas2.get_tk_widget().grid(row=2, column=1)
canvas3 = FigureCanvasTkAgg(fig3, master=root)
canvas3.get_tk_widget().grid(row=1, column=2)
canvas4 = FigureCanvasTkAgg(fig4, master=root)
canvas4.get_tk_widget().grid(row=2, column=2)
canvas5 = FigureCanvasTkAgg(fig5, master=root)
canvas5.get_tk_widget().grid(row=1, column=3)
canvas6 = FigureCanvasTkAgg(fig6, master=root)
canvas6.get_tk_widget().grid(row=2, column=3)
canvas7 = FigureCanvasTkAgg(fig7, master=root)
canvas7.get_tk_widget().grid(row=1, column=4)
canvas8 = FigureCanvasTkAgg(fig8, master=root)
canvas8.get_tk_widget().grid(row=2, column=4)
canvas.draw()
skipfirst = True
OpenFil = Button(root, text="Open", command=on_open)
OpenFil.grid(row=0, column=0)
OpenFilt = Button(root, text="Time start", command=on_open_time)
OpenFilt.grid(row=0, column=1)
OpenFilS = Button(root, text="Step start", command=on_open_step)
OpenFilS.grid(row=0, column=3)
root.mainloop() |
import os
data_prefix_path = '../data/'
model_prefix_path = '../Model/model/'
eval_prefix_path = '../evaltool/'
save_prefix_path = '../save/'
TRAIN_FILE = data_prefix_path + 'nlpcc-iccpol-2016.dbqa.training-data'
TEST_FILE = data_prefix_path + 'nlpcc-iccpol-2016.dbqa.testing-data'
WIKI_EMBEDDING_MATRIX = data_prefix_path + 'news_12g_baidubaike_20g_novel_90g_embedding_64.bin'
prefix_path = os.getcwd()
prefix_path = prefix_path.replace(prefix_path.split("\\")[-1],"")
cache_prefix_path = prefix_path + 'Cache/'
token_train_pkl = "token_train.pkl"
token_test_pkl = "token_test.pkl"
IMAGE_PATH = prefix_path + "Image/" |
"""
function:以一定传染概率进行多次传播实验
@author: Ethan
"""
import xlrd
import numpy as np
import random
# 0传递节点数和感染概率,及设置初始值
nodeNum = 379
a = 0.09
step = 10
# 1读图
edges = xlrd.open_workbook("F:\lzw\EC\data\data2_netscience_379_914.xlsx")
table = edges.sheets()[0]
nrows = table.nrows
print(nrows)
# 2构建连接矩阵
p = np.zeros((nodeNum,nodeNum),dtype=np.int)
for i in range(nrows):
[sou,des] = [table.cell(i,0).value,table.cell(i,1).value]
p[int(sou-1)][int(des-1)] = 1
p[int(des-1)][int(sou-1)] = 1
# 3初始化状态
state = np.zeros((nodeNum,3),dtype=np.int)
stateList = np.zeros((step,nodeNum,3),dtype=np.int)
for i in range(nodeNum):
state[i,0] = 1
# 4初始化种子节点
seedAF = {}
for z in range(nodeNum):
state[z,0] = 0
state[z,1] = 1
oldSeedList = []
newSeedList = []
oldSeedList.append(z)
num = 0
res = 0
resList = []
seedF = []
for time in range(100):
# 5sir传播
for i in range(step):
oldLen = len(oldSeedList)
for j in range(0,oldLen):
oldSeed = oldSeedList[j]
for m in range(nodeNum):
if p[oldSeed,m] == 1:
if state[m,0] == 1:
if random.random() < a:
#print(str(oldSeed)+":"+str(m))
newSeedList.append(m)
# 种子节点的状态进行更新
for k in range(0,oldLen):
oldSeed = oldSeedList[k]
state[oldSeed][1] = 0
state[oldSeed][2] = 1
newLen = len(newSeedList)
for l in range(0,newLen):
newSeed = newSeedList[l]
state[newSeed][0] = 0
state[newSeed][1] = 1
oldSeedList = newSeedList
newSeedList = []
stateList[i] = state
for n in range(step):
res = nodeNum - sum(stateList[n][:,0])
resList.append(res)
num = num + 1
print(str(num)+"--------"+str(resList[step-1]))
seedF.append(resList[step-1])
# 设置初始状态
oldSeedList = []
oldSeedList.append(z)
state = np.zeros((nodeNum,3),dtype=np.int)
for i in range(nodeNum):
state[i,0] = 1
state[z,0] = 0
state[z,1] = 1
stateList = np.zeros((step,nodeNum,3),dtype=np.int)
res = 0
resList = []
seedAF[z] = sum(seedF) / 100
#最后结果进行排序
sorted(seedAF.items(),key=lambda item:item[1],reverse=True) |
from typing import Any
from unittest import TestCase
from unittest.mock import patch, MagicMock
import yaml
from github import GithubException
from reconcile.utils.openshift_resource import ResourceInventory
from reconcile.utils.saasherder import SaasHerder
from reconcile.utils.saasherder import TARGET_CONFIG_HASH
from .fixtures import Fixtures
class TestCheckSaasFileEnvComboUnique(TestCase):
def test_check_saas_file_env_combo_unique(self):
saas_files = [
{
'path': 'path1',
'name': 'a1',
'managedResourceTypes': [],
'resourceTemplates':
[
{
'name': 'rt',
'url': 'url',
'targets':
[
{
'namespace': {
'name': 'ns',
'environment': {
'name': 'env1'
},
'cluster': {
'name': 'cluster'
}
},
'parameters': {}
},
{
'namespace': {
'name': 'ns',
'environment': {
'name': 'env2'
},
'cluster': {
'name': 'cluster'
}
},
'parameters': {}
}
]
}
],
'roles': [
{'users': [{'org_username': 'myname'}]}
]
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={},
validate=True
)
self.assertTrue(saasherder.valid)
def test_check_saas_file_env_combo_not_unique(self):
saas_files = [
{
'path': 'path1',
'name':
'long-name-which-is-too-long-to-produce-unique-combo',
'managedResourceTypes': [],
'resourceTemplates':
[
{
'name': 'rt',
'url': 'url',
'targets':
[
{
'namespace': {
'name': 'ns',
'environment': {
'name': 'env1'
},
'cluster': {
'name': 'cluster'
}
},
'parameters': {}
},
{
'namespace': {
'name': 'ns',
'environment': {
'name': 'env2'
},
'cluster': {
'name': 'cluster'
}
},
'parameters': {}
}
]
}
],
'roles': [
{'users': [{'org_username': 'myname'}]}
]
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={},
validate=True
)
self.assertFalse(saasherder.valid)
class TestGetMovingCommitsDiffSaasFile(TestCase):
def setUp(self):
self.saas_files = [
{
'path': 'path1',
'name': 'a1',
'managedResourceTypes': [],
'resourceTemplates':
[
{
'name': 'rt',
'url': 'http://github.com/user/repo',
'targets':
[
{
'namespace': {
'name': 'ns',
'environment': {
'name': 'env1'
},
'cluster': {
'name': 'cluster1'
}
},
'parameters': {},
'ref': 'main',
},
{
'namespace': {
'name': 'ns',
'environment': {
'name': 'env2'
},
'cluster': {
'name': 'cluster2'
}
},
'parameters': {},
'ref': 'secondary'
}
]
}
],
'roles': [
{'users': [{'org_username': 'myname'}]}
]
}
]
self.initiate_gh_patcher = patch.object(
SaasHerder, '_initiate_github', autospec=True
)
self.get_pipelines_provider_patcher = patch.object(
SaasHerder, '_get_pipelines_provider'
)
self.get_commit_sha_patcher = patch.object(
SaasHerder, '_get_commit_sha', autospec=True
)
self.initiate_gh = self.initiate_gh_patcher.start()
self.get_pipelines_provider = \
self.get_pipelines_provider_patcher.start()
self.get_commit_sha = self.get_commit_sha_patcher.start()
self.maxDiff = None
def tearDown(self):
for p in (
self.initiate_gh_patcher,
self.get_pipelines_provider_patcher,
self.get_commit_sha_patcher
):
p.stop()
def test_get_moving_commits_diff_saas_file_all_fine(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={},
validate=False
)
saasherder.state = MagicMock()
saasherder.state.get.return_value = 'asha'
self.get_commit_sha.side_effect = ('abcd4242', '4242efg')
self.get_pipelines_provider.return_value = 'apipelineprovider'
expected = [
{
'saas_file_name': self.saas_files[0]['name'],
'env_name': 'env1',
'timeout': None,
'ref': 'main',
'commit_sha': 'abcd4242',
'cluster_name': 'cluster1',
'pipelines_provider': 'apipelineprovider',
'namespace_name': 'ns',
'rt_name': 'rt',
},
{
'saas_file_name': self.saas_files[0]['name'],
'env_name': 'env2',
'timeout': None,
'ref': 'secondary',
'commit_sha': '4242efg',
'cluster_name': 'cluster2',
'pipelines_provider': 'apipelineprovider',
'namespace_name': 'ns',
'rt_name': 'rt',
}
]
self.assertEqual(
saasherder.get_moving_commits_diff_saas_file(
self.saas_files[0], True
),
expected
)
def test_get_moving_commits_diff_saas_file_bad_sha1(self):
saasherder = SaasHerder(
self.saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={},
validate=False
)
saasherder.state = MagicMock()
saasherder.state.get.return_value = 'asha'
self.get_pipelines_provider.return_value = 'apipelineprovider'
self.get_commit_sha.side_effect = GithubException(
401, 'somedata', {'aheader': 'avalue'}
)
# At least we don't crash!
self.assertEqual(
saasherder.get_moving_commits_diff_saas_file(
self.saas_files[0], True
),
[]
)
class TestPopulateDesiredState(TestCase):
def setUp(self):
saas_files = []
self.fxts = Fixtures('saasherder_populate_desired')
for file in [self.fxts.get("saas_remote_openshift_template.yaml")]:
saas_files.append(yaml.safe_load(file))
self.assertEqual(1, len(saas_files))
self.saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={'hashLength': 7}
)
# Mock GitHub interactions.
self.initiate_gh_patcher = patch.object(
SaasHerder, '_initiate_github', autospec=True, return_value=None,
)
self.get_file_contents_patcher = patch.object(
SaasHerder,
'_get_file_contents',
wraps=self.fake_get_file_contents,
)
self.initiate_gh_patcher.start()
self.get_file_contents_patcher.start()
# Mock image checking.
self.get_check_images_patcher = patch.object(
SaasHerder,
'_check_images',
autospec=True,
return_value=None,
)
self.get_check_images_patcher.start()
def fake_get_file_contents(self, options):
self.assertEqual(
'https://github.com/rhobs/configuration', options['url'])
content = self.fxts.get(
options['ref'] + (options['path'].replace('/', '_')))
return yaml.safe_load(content), "yolo", options['ref']
def tearDown(self):
for p in (
self.initiate_gh_patcher,
self.get_file_contents_patcher,
self.get_check_images_patcher,
):
p.stop()
def test_populate_desired_state_saas_file_delete(self):
spec = {'delete': True}
desired_state \
= self.saasherder.populate_desired_state_saas_file(spec, None)
self.assertIsNone(desired_state)
def test_populate_desired_state_cases(self):
ri = ResourceInventory()
for resource_type in (
"Deployment",
"Service",
"ConfigMap",
):
ri.initialize_resource_type("stage-1", "yolo-stage", resource_type)
ri.initialize_resource_type("prod-1", "yolo", resource_type)
self.saasherder.populate_desired_state(ri)
cnt = 0
for (cluster, namespace, resource_type, data) in ri:
for _, d_item in data['desired'].items():
expected = yaml.safe_load(self.fxts.get(
f"expected_{cluster}_{namespace}_{resource_type}.json",
))
self.assertEqual(expected, d_item.body)
cnt += 1
self.assertEqual(5, cnt, "expected 5 resources, found less")
class TestCollectRepoUrls(TestCase):
def test_collect_repo_urls(self):
repo_url = 'git-repo'
saas_files = [
{
'path': 'path1',
'name': 'name1',
'managedResourceTypes': [],
'resourceTemplates': [
{
'name': 'name',
'url': repo_url,
'targets': []
}
]
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={}
)
self.assertEqual({repo_url}, saasherder.repo_urls)
class TestGetSaasFileAttribute(TestCase):
def test_attribute_none(self):
saas_files = [
{
'path': 'path1',
'name': 'name1',
'managedResourceTypes': [],
'resourceTemplates': []
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={}
)
att = saasherder._get_saas_file_feature_enabled('no_such_attribute')
self.assertEqual(att, None)
def test_attribute_not_none(self):
saas_files = [
{
'path': 'path1',
'name': 'name1',
'managedResourceTypes': [],
'resourceTemplates': [],
'attrib': True
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={}
)
att = saasherder._get_saas_file_feature_enabled('attrib')
self.assertEqual(att, True)
def test_attribute_none_with_default(self):
saas_files = [
{
'path': 'path1',
'name': 'name1',
'managedResourceTypes': [],
'resourceTemplates': []
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={}
)
att = saasherder._get_saas_file_feature_enabled(
'no_such_att', default=True)
self.assertEqual(att, True)
def test_attribute_not_none_with_default(self):
saas_files = [
{
'path': 'path1',
'name': 'name1',
'managedResourceTypes': [],
'resourceTemplates': [],
'attrib': True
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={}
)
att = saasherder._get_saas_file_feature_enabled(
'attrib', default=False)
self.assertEqual(att, True)
def test_attribute_multiple_saas_files_return_false(self):
saas_files = [
{
'path': 'path1',
'name': 'name1',
'managedResourceTypes': [],
'resourceTemplates': [],
'attrib': True
},
{
'path': 'path2',
'name': 'name2',
'managedResourceTypes': [],
'resourceTemplates': []
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={}
)
self.assertFalse(saasherder._get_saas_file_feature_enabled('attrib'))
def test_attribute_multiple_saas_files_with_default_return_false(self):
saas_files = [
{
'path': 'path1',
'name': 'name1',
'managedResourceTypes': [],
'resourceTemplates': [],
'attrib': True
},
{
'path': 'path2',
'name': 'name2',
'managedResourceTypes': [],
'resourceTemplates': [],
'attrib': True
}
]
saasherder = SaasHerder(
saas_files,
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
settings={}
)
att = saasherder._get_saas_file_feature_enabled(
'attrib', default=True)
self.assertFalse(att)
class TestConfigHashPromotionsValidation(TestCase):
""" TestCase to test SaasHerder promotions validation. SaasHerder is
initialized with ResourceInventory population. Like is done in
openshift-saas-deploy"""
cluster: str
namespace: str
fxt: Any
template: Any
@classmethod
def setUpClass(cls):
cls.fxt = Fixtures('saasherder')
cls.cluster = "test-cluster"
cls.template = cls.fxt.get_anymarkup('template_1.yml')
def setUp(self) -> None:
self.all_saas_files = \
[self.fxt.get_anymarkup('saas.gql.yml')]
self.state_patcher = \
patch("reconcile.utils.saasherder.State", autospec=True)
self.state_mock = self.state_patcher.start().return_value
self.ig_patcher = \
patch.object(SaasHerder, "_initiate_github", autospec=True)
self.ig_patcher.start()
self.image_auth_patcher = \
patch.object(SaasHerder, "_initiate_image_auth")
self.image_auth_patcher.start()
self.gfc_patcher = \
patch.object(SaasHerder, "_get_file_contents", autospec=True)
gfc_mock = self.gfc_patcher.start()
self.saas_file = \
self.fxt.get_anymarkup('saas.gql.yml')
# ApiVersion is set in the saas gql query method in queries module
self.saas_file["apiVersion"] = "v2"
gfc_mock.return_value = (self.template, "url", "ahash")
self.deploy_current_state_fxt = \
self.fxt.get_anymarkup('saas_deploy.state.json')
self.post_deploy_current_state_fxt = \
self.fxt.get_anymarkup('saas_post_deploy.state.json')
self.saasherder = SaasHerder(
[self.saas_file],
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
accounts={"name": "test-account"}, # Initiates State in SaasHerder
settings={
"hashLength": 24
}
)
# IMPORTANT: Populating desired state modify self.saas_files within
# saasherder object.
self.ri = ResourceInventory()
for ns in ["test-ns-publisher", "test-ns-subscriber"]:
for kind in ["Service", "Deployment"]:
self.ri.initialize_resource_type(
self.cluster, ns, kind)
self.saasherder.populate_desired_state(self.ri)
if self.ri.has_error_registered():
raise Exception("Errors registered in Resourceinventory")
def tearDown(self):
self.state_patcher.stop()
self.ig_patcher.stop()
self.gfc_patcher.stop()
def test_config_hash_is_filled(self):
""" Ensures the get_config_diff_saas_file fills the promotion data
on the publisher target. This data is used in publish_promotions
method to add the hash to subscribed targets.
IMPORTANT: This is not the promotion_data within promotion. This
fields are set by _process_template method in saasherder
"""
job_spec = \
self.saasherder.get_configs_diff_saas_file(self.saas_file)[0]
promotion = job_spec["target_config"]["promotion"]
self.assertIsNotNone(promotion[TARGET_CONFIG_HASH])
def test_promotion_state_config_hash_match_validates(self):
""" A promotion is valid if the pusblisher state got from the state
is equal to the one set in the subscriber target promotion data.
This is the happy path, publisher job state target config hash is
the same set in the subscriber job
"""
configs = \
self.saasherder.get_saas_targets_config(self.saas_file)
tcs = list(configs.values())
publisher_config_hash = tcs[0]['promotion'][TARGET_CONFIG_HASH]
publisher_state = {
"success": True,
"saas_file": self.saas_file["name"],
TARGET_CONFIG_HASH: publisher_config_hash
}
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions(self.all_saas_files)
self.assertTrue(result)
def test_promotion_state_config_hash_not_match_no_validates(self):
""" Promotion is not valid if the parent target config hash set in
promotion data is not the same set in the publisher job state. This
could happen if a new publisher job has before the subscriber job
"""
publisher_state = {
"success": True,
"saas_file": self.saas_file["name"],
TARGET_CONFIG_HASH: "will_not_match"
}
self.state_mock.get.return_value = publisher_state
result = self.saasherder.validate_promotions(self.all_saas_files)
self.assertFalse(result)
def test_promotion_without_state_config_hash_validates(self):
""" Existent states won't have promotion data. If there is an ongoing
promotion, this ensures it will happen.
"""
promotion_result = {
"success": True,
}
self.state_mock.get.return_value = promotion_result
result = self.saasherder.validate_promotions(self.all_saas_files)
self.assertTrue(result)
class TestConfigHashTrigger(TestCase):
""" TestCase to test Openshift SAAS deploy configs trigger. SaasHerder is
initialized WITHOUT ResourceInventory population. Like is done in the
config changes trigger"""
cluster: str
namespace: str
fxt: Any
template: Any
@classmethod
def setUpClass(cls):
cls.fxt = Fixtures('saasherder')
cls.cluster = "test-cluster"
def setUp(self) -> None:
self.all_saas_files = \
[self.fxt.get_anymarkup('saas.gql.yml')]
self.state_patcher = \
patch("reconcile.utils.saasherder.State", autospec=True)
self.state_mock = self.state_patcher.start().return_value
self.saas_file = \
self.fxt.get_anymarkup('saas.gql.yml')
# ApiVersion is set in the saas gql query method in queries module
self.saas_file["apiVersion"] = "v2"
self.deploy_current_state_fxt = \
self.fxt.get_anymarkup('saas_deploy.state.json')
self.post_deploy_current_state_fxt = \
self.fxt.get_anymarkup('saas_post_deploy.state.json')
self.state_mock.get.side_effect = [
self.deploy_current_state_fxt,
self.post_deploy_current_state_fxt
]
self.saasherder = SaasHerder(
[self.saas_file],
thread_pool_size=1,
gitlab=None,
integration='',
integration_version='',
accounts={"name": "test-account"}, # Initiates State in SaasHerder
settings={
"hashLength": 24
}
)
def tearDown(self):
self.state_patcher.stop()
def test_same_configs_do_not_trigger(self):
""" Ensures that if the same config is found, no job is triggered
current Config is fetched from the state
"""
job_specs = \
self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertListEqual(job_specs, [])
def test_config_hash_change_do_trigger(self):
""" Ensures a new job is triggered if the parent config hash changes
"""
configs = \
self.saasherder.get_saas_targets_config(self.saas_file)
desired_tc = list(configs.values())[1]
desired_promo_data = desired_tc["promotion"]["promotion_data"]
desired_promo_data[0]["data"][TARGET_CONFIG_HASH] = "Changed"
job_specs = \
self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertEqual(len(job_specs), 1)
def test_non_existent_config_triggers(self):
self.state_mock.get.side_effect = [
self.deploy_current_state_fxt,
None
]
job_specs = \
self.saasherder.get_configs_diff_saas_file(self.saas_file)
self.assertEqual(len(job_specs), 1)
class TestRemoveNoneAttributes(TestCase):
def testSimpleDict(self):
input = {
"a": 1,
"b": {},
"d": None,
"e": {
"aa": "aa",
"bb": None
}
}
expected = {
"a": 1,
"b": {},
"e": {
"aa": "aa"
}
}
res = SaasHerder.remove_none_values(input)
self.assertEqual(res, expected)
def testNoneValue(self):
input = None
expected = {}
res = SaasHerder.remove_none_values(input)
self.assertEqual(res, expected)
|
import numpy as np
import math
cpu_speed = 1.0
def leaky_relu(x):
return x if x >= 0 else 0.3*x
class App(object):
def __init__(self, name, candidate_models, alpha=0.05, beta=0.001, acc_min=0, lag_max=0, freeze_model=False):
self.name = name
self.can_models = candidate_models
self.acc_min = np.random.normal(acc_min, acc_min*0.001)
self.freeze_model = freeze_model
#self.latency_max = np.random.normal(candidate_models[0].infer_time, 10)
self.latency_max = np.random.normal(lag_max,10)
self.nb_switches = 0
self.alpha = alpha
self.beta = beta
self.model = None
self.load_model_time = 0
self.infer_remain_time = 0
self.ellapse = 0
self.nb_infers = 0
self.infer_accs = []
self.last_time = 0
self.ellapse_times = []
self.sum_load_model_time = 0
self.sim_cpu = 0
self.cpu = 0
def load_model(self, model):
if self.load_model_time > 0:
return
if self.model is not None and self.model.name == model.name:
pass
else:
if self.model is None:
self.load_model_time = model.load_time
else:
if self.freeze_model:
# go bigger:
if model.size > self.model.size:
self.load_model_time = np.abs(self.model.load_time - model.load_time)
else:
self.load_model_time = 0
else:
self.load_model_time = model.load_time
self.model = model
self.nb_switches += 1
self.infer_remain_time = self.model.infer_time
def compute_cost(self, sim_model, sim_cpu, print_cost=False):
#acc_cost = max( self.acc_min - sim_model.acc, 0)
acc_cost = self.acc_min - sim_model.acc
#latency_cost = max( sim_model.infer_time /sim_cpu - self.latency_max , 0)
latency_cost = sim_model.infer_time / sim_cpu - self.latency_max
#latency_cost = sim_model.infer_time * sim_cpu
#compute load cost
#if already load, then no cost.
if self.model is not None and self.model.name == sim_model.name:
load_cost = 0
else:
if self.model is None:
load_cost = sim_model.load_time
else:
##load model cost
if self.freeze_model:
if sim_model.size > self.model.size:
load_cost = np.abs(sim_model.load_time - self.model.load_time)
else:
load_cost = 0
else:
load_cost = sim_model.load_time
if print_cost:
print('acc:{:.3f}, lag:{:.3f}, load:{:.3f}'.format(acc_cost,self.alpha * latency_cost,self.beta * load_cost,
acc_cost + self.alpha * latency_cost + self.beta * load_cost))
return acc_cost + self.alpha * latency_cost + self.beta * load_cost
def run_model(self):
#load model
if self.load_model_time > 0:
#just loaded
if self.load_model_time == self.model.load_time:
self.last_time = self.ellapse
self.load_model_time -= 1
self.sum_load_model_time += 1
## finish loading model, inference
else:
new_remain_time = self.infer_remain_time - self.cpu
# inference finished
if new_remain_time <= 0:
self.ellapse_times.append(self.ellapse - self.last_time)
self.last_time = self.ellapse
#fire a inference
self.nb_infers = self.nb_infers + 1
self.infer_accs.append(self.model.acc)
self.infer_remain_time = self.model.infer_time + new_remain_time
else:
self.infer_remain_time = new_remain_time
self.ellapse = self.ellapse + 1
def print_sum(self):
print(self.name + "\t{}, Run for {} times, switch {} times, mean acc {:.2f}/{:.2f}, average lag:{:.2f}/{:.2f}".format(
self.model.name,
self.nb_infers, self.nb_switches, np.mean(self.infer_accs),self.acc_min, np.mean(self.ellapse_times), self.latency_max / cpu_speed))
def print_sim_2(self):
fps_list = 1000./np.array(self.ellapse_times)
fps = np.mean(fps_list)
print("Run {} times, switch {} times, sum_load_model_time{}, fps{}".
format(self.nb_infers, self.nb_switches, self.sum_load_model_time,fps) )
def get_mem_cost(self):
return 0 if self.model is None else self.model.size
# def get_Gflops(self):
# return 0 if self.model is None else self.model.Gflops
class Model(object):
def __init__(self, arch, name, acc, Gflops, load_time, infer_time, size):
self.name = name
self.arch = arch
self.acc = acc
#self.Gflops = Gflops
self.load_time = load_time
self.infer_time = infer_time
self.size = size
@classmethod
def init_from_list(cls, arch, config):
return cls(arch, config[0],config[3],config[1],config[2],config[4],config[5])
|
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for the Hyper-V driver and related APIs.
"""
import os
import shutil
import time
import uuid
import mock
import mox
from oslo.config import cfg
from oslo.utils import units
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova import context
from nova import db
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova.openstack.common import fileutils
from nova import test
from nova.tests.unit import fake_network
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit.virt.hyperv import db_fakes
from nova.tests.unit.virt.hyperv import fake
from nova import utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import ioutils
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import rdpconsoleutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
from nova.virt import images
CONF = cfg.CONF
CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
class HyperVAPIBaseTestCase(test.NoDBTestCase):
"""Base unit tests class for Hyper-V driver calls."""
def __init__(self, test_case_name):
self._mox = mox.Mox()
super(HyperVAPIBaseTestCase, self).__init__(test_case_name)
def setUp(self):
super(HyperVAPIBaseTestCase, self).setUp()
self._user_id = 'fake'
self._project_id = 'fake'
self._instance_data = None
self._image_metadata = None
self._fetched_image = None
self._update_image_raise_exception = False
self._volume_target_portal = 'testtargetportal:3260'
self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
self._context = context.RequestContext(self._user_id, self._project_id)
self._instance_disks = []
self._instance_dvds = []
self._instance_volume_disks = []
self._test_vm_name = None
self._test_instance_dir = 'C:\\FakeInstancesPath\\instance-0000001'
self._check_min_windows_version_satisfied = True
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
network_api_class='nova.network.neutronv2.api.API')
self.flags(force_volumeutils_v1=True, group='hyperv')
self.flags(force_hyperv_utils_v1=True, group='hyperv')
self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
fake_image.stub_out_image_service(self.stubs)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def fake_fetch(context, image_id, target, user, project):
self._fetched_image = target
self.stubs.Set(images, 'fetch', fake_fetch)
def fake_get_remote_image_service(context, name):
class FakeGlanceImageService(object):
def update(self_fake, context, image_id, image_metadata, f):
if self._update_image_raise_exception:
raise vmutils.HyperVException(
"Simulated update failure")
self._image_metadata = image_metadata
return (FakeGlanceImageService(), 1)
self.stubs.Set(glance, 'get_remote_image_service',
fake_get_remote_image_service)
def fake_check_min_windows_version(fake_self, major, minor):
if [major, minor] >= [6, 3]:
return False
return self._check_min_windows_version_satisfied
self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
fake_check_min_windows_version)
def fake_sleep(ms):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
class FakeIOThread(object):
def __init__(self, src, dest, max_bytes):
pass
def start(self):
pass
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self.stubs.Set(ioutils, 'IOThread', FakeIOThread)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
self._mox.StubOutWithMock(fake.PathUtils, 'copy')
self._mox.StubOutWithMock(fake.PathUtils, 'remove')
self._mox.StubOutWithMock(fake.PathUtils, 'rename')
self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
self._mox.StubOutWithMock(fake.PathUtils,
'get_instance_migr_revert_dir')
self._mox.StubOutWithMock(fake.PathUtils, 'get_instance_dir')
self._mox.StubOutWithMock(fake.PathUtils, 'get_vm_console_log_paths')
self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks')
self._mox.StubOutWithMock(vmutils.VMUtils,
'attach_volume_to_controller')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_mounted_disk_by_drive_number')
self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_controller_volume_paths')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_free_controller_slot')
self._mox.StubOutWithMock(vmutils.VMUtils,
'enable_vm_metrics_collection')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_vm_serial_port_connection')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils,
'get_internal_vhd_size_by_file_size')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'validate_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_format')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_dynamic_vhd')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'get_external_vswitch')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'create_vswitch_port')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'vswitch_port_needed')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'volume_in_mapping')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_session_id_from_mounted_disk')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_device_number_for_target')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_target_from_disk_path')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_target_lun_count')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'login_storage_target')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'logout_storage_target')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'execute_log_out')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'get_iscsi_initiator')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'login_storage_target')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'logout_storage_target')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'execute_log_out')
self._mox.StubOutWithMock(rdpconsoleutils.RDPConsoleUtils,
'get_rdp_console_port')
self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'metadata_for_config_drive')
# Can't use StubOutClassWithMocks due to __exit__ and __enter__
self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
self._mox.StubOutWithMock(fileutils, 'delete_if_exists')
self._mox.StubOutWithMock(utils, 'execute')
def tearDown(self):
self._mox.UnsetStubs()
super(HyperVAPIBaseTestCase, self).tearDown()
class HyperVAPITestCase(HyperVAPIBaseTestCase):
"""Unit tests for Hyper-V driver calls."""
def test_public_api_signatures(self):
self.assertPublicAPISignatures(driver.ComputeDriver(None), self._conn)
def test_list_instances(self):
fake_instances = ['fake1', 'fake2']
vmutils.VMUtils.list_instances().AndReturn(fake_instances)
self._mox.ReplayAll()
instances = self._conn.list_instances()
self._mox.VerifyAll()
self.assertEqual(instances, fake_instances)
def test_get_info(self):
self._instance_data = self._get_instance_data()
summary_info = {'NumberOfProcessors': 2,
'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
'MemoryUsage': 1000,
'UpTime': 1}
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
func = mox.Func(self._check_instance_name)
m = vmutils.VMUtils.get_vm_summary_info(func)
m.AndReturn(summary_info)
self._mox.ReplayAll()
info = self._conn.get_info(self._instance_data)
self._mox.VerifyAll()
self.assertEqual(info.state, power_state.RUNNING)
def test_get_info_instance_not_found(self):
# Tests that InstanceNotFound is raised if the instance isn't found
# from the vmutils.vm_exists method.
self._instance_data = self._get_instance_data()
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(False)
self._mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound, self._conn.get_info,
self._instance_data)
self._mox.VerifyAll()
def _setup_spawn_config_drive_mocks(self, use_cdrom):
instance_metadata.InstanceMetadata(mox.IgnoreArg(),
content=mox.IsA(list),
extra_md=mox.IsA(dict))
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
cdb = self._mox.CreateMockAnything()
m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
m.AndReturn(cdb)
# __enter__ and __exit__ are required by "with"
cdb.__enter__().AndReturn(cdb)
cdb.make_drive(mox.IsA(str))
cdb.__exit__(None, None, None).AndReturn(None)
if not use_cdrom:
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
mox.IsA(str),
mox.IsA(str),
attempts=1)
fake.PathUtils.remove(mox.IsA(str))
m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_disk)
def _check_instance_name(self, vm_name):
return vm_name == self._instance_data['name']
def _test_vm_state_change(self, action, from_state, to_state):
self._instance_data = self._get_instance_data()
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
to_state)
if to_state in (constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_REBOOT):
self._setup_delete_vm_log_mocks()
if to_state in (constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_REBOOT):
self._setup_log_vm_output_mocks()
self._mox.ReplayAll()
action(self._instance_data)
self._mox.VerifyAll()
def test_pause(self):
self._test_vm_state_change(self._conn.pause, None,
constants.HYPERV_VM_STATE_PAUSED)
def test_pause_already_paused(self):
self._test_vm_state_change(self._conn.pause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_PAUSED)
def test_unpause(self):
self._test_vm_state_change(self._conn.unpause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_ENABLED)
def test_unpause_already_running(self):
self._test_vm_state_change(self._conn.unpause, None,
constants.HYPERV_VM_STATE_ENABLED)
def test_suspend(self):
self._test_vm_state_change(self._conn.suspend, None,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_suspend_already_suspended(self):
self._test_vm_state_change(self._conn.suspend,
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_resume(self):
self._test_vm_state_change(lambda i: self._conn.resume(self._context,
i, None),
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_ENABLED)
def test_resume_already_running(self):
self._test_vm_state_change(lambda i: self._conn.resume(self._context,
i, None), None,
constants.HYPERV_VM_STATE_ENABLED)
def test_power_off(self):
self._test_vm_state_change(self._conn.power_off, None,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_already_powered_off(self):
self._test_vm_state_change(self._conn.power_off,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_DISABLED)
def _test_power_on(self, block_device_info):
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
if block_device_info:
self._mox.StubOutWithMock(volumeops.VolumeOps,
'fix_instance_volume_disk_paths')
volumeops.VolumeOps.fix_instance_volume_disk_paths(
mox.Func(self._check_instance_name), block_device_info)
self._setup_log_vm_output_mocks()
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info,
block_device_info=block_device_info)
self._mox.VerifyAll()
def test_power_on_having_block_devices(self):
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
self._test_power_on(block_device_info=block_device_info)
def test_power_on_without_block_devices(self):
self._test_power_on(block_device_info=None)
def test_power_on_already_running(self):
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()
def test_reboot(self):
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
self._instance_data = self._get_instance_data()
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_REBOOT)
self._setup_delete_vm_log_mocks()
self._setup_log_vm_output_mocks()
self._mox.ReplayAll()
self._conn.reboot(self._context, self._instance_data, network_info,
None)
self._mox.VerifyAll()
def _setup_destroy_mocks(self, destroy_disks=True):
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
func = mox.Func(self._check_instance_name)
vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
self._setup_delete_vm_log_mocks()
vmutils.VMUtils.destroy_vm(func)
if destroy_disks:
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
def test_destroy(self):
self._instance_data = self._get_instance_data()
self._setup_destroy_mocks()
self._mox.ReplayAll()
self._conn.destroy(self._context, self._instance_data, None)
self._mox.VerifyAll()
def test_get_instance_disk_info_is_implemented(self):
# Ensure that the method has been implemented in the driver
try:
disk_info = self._conn.get_instance_disk_info('fake_instance_name')
self.assertIsNone(disk_info)
except NotImplementedError:
self.fail("test_get_instance_disk_info() should not raise "
"NotImplementedError")
def _get_instance_data(self):
instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
return db_fakes.get_fake_instance_data(instance_name,
self._project_id,
self._user_id)
def _spawn_instance(self, cow, block_device_info=None,
ephemeral_storage=False):
self.flags(use_cow_images=cow)
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
if ephemeral_storage:
instance['ephemeral_gb'] = 1
image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
self._conn.spawn(self._context, instance, image,
injected_files=[], admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
def _add_disk(self, vm_name, path, ctrller_addr,
drive_addr, drive_type):
if drive_type == constants.DISK:
self._instance_disks.append(path)
elif drive_type == constants.DVD:
self._instance_dvds.append(path)
def _add_volume_disk(self, vm_name, controller_path, address,
mounted_disk_path):
self._instance_volume_disks.append(mounted_disk_path)
def _check_img_path(self, image_path):
return image_path == self._fetched_image
def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
boot_from_volume=False,
block_device_info=None,
admin_permissions=True,
ephemeral_storage=False):
vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
mox.IsA(int), mox.IsA(bool),
CONF.hyperv.dynamic_memory_ratio,
mox.IsA(list))
if not boot_from_volume:
m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_disk).InAnyOrder()
if ephemeral_storage:
m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_disk).InAnyOrder()
func = mox.Func(self._check_vm_name)
m = vmutils.VMUtils.create_scsi_controller(func)
m.InAnyOrder()
if boot_from_volume:
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
target_lun, target_portal, True)
vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name),
mox.IsA(str), mox.IsA(unicode)).InAnyOrder()
if setup_vif_mocks_func:
setup_vif_mocks_func()
if CONF.hyperv.enable_instance_metrics_collection:
vmutils.VMUtils.enable_vm_metrics_collection(
mox.Func(self._check_vm_name))
vmutils.VMUtils.get_vm_serial_port_connection(
mox.IsA(str), update_connection=mox.IsA(str))
def _set_vm_name(self, vm_name):
self._test_vm_name = vm_name
def _check_vm_name(self, vm_name):
return vm_name == self._test_vm_name
def _setup_check_admin_permissions_mocks(self, admin_permissions=True):
self._mox.StubOutWithMock(vmutils.VMUtils,
'check_admin_permissions')
m = vmutils.VMUtils.check_admin_permissions()
if admin_permissions:
m.AndReturn(None)
else:
m.AndRaise(vmutils.HyperVAuthorizationException(_(
'Simulated failure')))
def _setup_log_vm_output_mocks(self):
m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
ioutils.IOThread('fake_pipe', 'fake_vm_log_path',
units.Mi).start()
def _setup_delete_vm_log_mocks(self):
m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
fileutils.delete_if_exists(mox.IsA(str))
fileutils.delete_if_exists(mox.IsA(str))
def _setup_get_cached_image_mocks(self, cow=True,
vhd_format=constants.DISK_FORMAT_VHD):
m = vhdutils.VHDUtils.get_vhd_format(
mox.Func(self._check_img_path))
m.AndReturn(vhd_format)
def check_img_path_with_ext(image_path):
return image_path == self._fetched_image + '.' + vhd_format.lower()
fake.PathUtils.rename(mox.Func(self._check_img_path),
mox.Func(check_img_path_with_ext))
if cow and vhd_format == constants.DISK_FORMAT_VHD:
m = vhdutils.VHDUtils.get_vhd_info(
mox.Func(check_img_path_with_ext))
m.AndReturn({'MaxInternalSize': 1024})
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
is_file_max_size=False)
def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
with_exception=False,
block_device_info=None,
boot_from_volume=False,
config_drive=False,
use_cdrom=False,
admin_permissions=True,
vhd_format=constants.DISK_FORMAT_VHD,
ephemeral_storage=False):
m = vmutils.VMUtils.vm_exists(mox.IsA(str))
m.WithSideEffects(self._set_vm_name).AndReturn(False)
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
if block_device_info:
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
'fake_root_device_name', block_device_info)
m.AndReturn(boot_from_volume)
if not boot_from_volume:
m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
m.AndReturn(self._test_instance_dir)
self._setup_get_cached_image_mocks(cow, vhd_format)
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024,
'Type': 2})
if cow:
vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
mox.IsA(str))
m = vhdutils.VHDUtils.get_vhd_format(mox.IsA(str))
m.AndReturn(vhd_format)
else:
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
if not (cow and vhd_format == constants.DISK_FORMAT_VHD):
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
is_file_max_size=False)
self._setup_check_admin_permissions_mocks(
admin_permissions=admin_permissions)
if ephemeral_storage:
m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
m.AndReturn(self._test_instance_dir)
vhdutils.VHDUtils.create_dynamic_vhd(mox.IsA(str), mox.IsA(int),
mox.IsA(str))
self._setup_create_instance_mocks(setup_vif_mocks_func,
boot_from_volume,
block_device_info,
ephemeral_storage=ephemeral_storage)
if config_drive and not with_exception:
self._setup_spawn_config_drive_mocks(use_cdrom)
# TODO(alexpilotti) Based on where the exception is thrown
# some of the above mock calls need to be skipped
if with_exception:
self._setup_destroy_mocks()
else:
vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
def _test_spawn_instance(self, cow=True,
expected_disks=1,
expected_dvds=0,
setup_vif_mocks_func=None,
with_exception=False,
config_drive=False,
use_cdrom=False,
admin_permissions=True,
vhd_format=constants.DISK_FORMAT_VHD,
ephemeral_storage=False):
self._setup_spawn_instance_mocks(cow,
setup_vif_mocks_func,
with_exception,
config_drive=config_drive,
use_cdrom=use_cdrom,
admin_permissions=admin_permissions,
vhd_format=vhd_format,
ephemeral_storage=ephemeral_storage)
self._mox.ReplayAll()
self._spawn_instance(cow, ephemeral_storage=ephemeral_storage)
self._mox.VerifyAll()
self.assertEqual(len(self._instance_disks), expected_disks)
self.assertEqual(len(self._instance_dvds), expected_dvds)
vhd_path = os.path.join(self._test_instance_dir, 'root.' +
vhd_format.lower())
self.assertEqual(vhd_path, self._instance_disks[0])
def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
fake_mounted_disk,
fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
fake_device_number)
m.AndReturn(fake_mounted_disk)
def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
fake_mounted_disk, fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
volumeutils.VolumeUtils.login_storage_target(target_lun,
target_iqn,
target_portal,
'fake_username',
'fake_password')
self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
target_portal=None, boot_from_volume=False):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_controller_path = 'fake_scsi_controller_path'
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
if boot_from_volume:
m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
m.AndReturn(fake_controller_path)
fake_free_slot = 0
else:
m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
m.AndReturn(fake_controller_path)
fake_free_slot = 1
m = vmutils.VMUtils.get_free_controller_slot(
fake_controller_path)
m.AndReturn(fake_free_slot)
m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
fake_controller_path,
fake_free_slot,
fake_mounted_disk)
m.WithSideEffects(self._add_volume_disk)
def test_attach_volume(self):
instance_data = self._get_instance_data()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
mount_point = '/dev/sdc'
self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
target_portal)
self._mox.ReplayAll()
self._conn.attach_volume(None, connection_info, instance_data,
mount_point)
self._mox.VerifyAll()
self.assertEqual(len(self._instance_volume_disks), 1)
def _mock_get_mounted_disk_from_lun_error(self, target_iqn, target_lun,
fake_mounted_disk,
fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndRaise(vmutils.HyperVException('Simulated failure'))
def _mock_attach_volume_target_logout(self, instance_name, target_iqn,
target_lun, target_portal=None,
boot_from_volume=False):
fake_mounted_disk = "fake_mounted disk"
fake_device_number = 0
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
self._mock_get_mounted_disk_from_lun_error(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
self._mock_logout_storage_target(target_iqn)
def test_attach_volume_logout(self):
instance_data = self._get_instance_data()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
mount_point = '/dev/sdc'
self._mock_attach_volume_target_logout(instance_data['name'],
target_iqn, target_lun,
target_portal)
self._mox.ReplayAll()
self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
None, connection_info, instance_data, mount_point)
self._mox.VerifyAll()
def test_attach_volume_connection_error(self):
instance_data = self._get_instance_data()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
mount_point = '/dev/sdc'
def fake_login_storage_target(self, connection_info):
raise vmutils.HyperVException('Fake connection exception')
self.stubs.Set(volumeops.ISCSIVolumeDriver, 'login_storage_target',
fake_login_storage_target)
self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
None, connection_info, instance_data, mount_point)
def _mock_detach_volume(self, target_iqn, target_lun,
other_luns_available=False):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
fake_device_number)
m.AndReturn(fake_mounted_disk)
vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
self._mock_logout_storage_target(target_iqn, other_luns_available)
def _mock_logout_storage_target(self, target_iqn,
other_luns_available=False):
m = volumeutils.VolumeUtils.get_target_lun_count(target_iqn)
m.AndReturn(1 + int(other_luns_available))
if not other_luns_available:
volumeutils.VolumeUtils.logout_storage_target(target_iqn)
def _test_detach_volume(self, other_luns_available=False):
instance_data = self._get_instance_data()
self.assertIn('name', instance_data)
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
self.assertIn('target_portal', data)
mount_point = '/dev/sdc'
self._mock_detach_volume(target_iqn, target_lun, other_luns_available)
self._mox.ReplayAll()
self._conn.detach_volume(connection_info, instance_data, mount_point)
self._mox.VerifyAll()
def test_detach_volume(self):
self._test_detach_volume()
def test_detach_volume_multiple_luns_per_target(self):
# The iSCSI target should not be disconnected in this case.
self._test_detach_volume(other_luns_available=True)
def test_boot_from_volume(self):
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
self._setup_spawn_instance_mocks(cow=False,
block_device_info=block_device_info,
boot_from_volume=True)
self._mox.ReplayAll()
self._spawn_instance(False, block_device_info)
self._mox.VerifyAll()
self.assertEqual(len(self._instance_volume_disks), 1)
def test_get_volume_connector(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
fake_my_ip = "fake_ip"
fake_host = "fake_host"
fake_initiator = "fake_initiator"
self.flags(my_ip=fake_my_ip)
self.flags(host=fake_host)
m = volumeutils.VolumeUtils.get_iscsi_initiator()
m.AndReturn(fake_initiator)
self._mox.ReplayAll()
data = self._conn.get_volume_connector(instance)
self._mox.VerifyAll()
self.assertEqual(fake_my_ip, data.get('ip'))
self.assertEqual(fake_host, data.get('host'))
self.assertEqual(fake_initiator, data.get('initiator'))
def test_get_volume_connector_storage_ip(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
fake_my_ip = "fake_ip"
fake_my_block_ip = "fake_block_ip"
fake_host = "fake_host"
fake_initiator = "fake_initiator"
self.flags(my_ip=fake_my_ip)
self.flags(my_block_storage_ip=fake_my_block_ip)
self.flags(host=fake_host)
with mock.patch.object(volumeutils.VolumeUtils,
"get_iscsi_initiator") as mock_initiator:
mock_initiator.return_value = fake_initiator
data = self._conn.get_volume_connector(instance)
self.assertEqual(fake_my_block_ip, data.get('ip'))
self.assertEqual(fake_host, data.get('host'))
self.assertEqual(fake_initiator, data.get('initiator'))
def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
copy_exception=False,
size_exception=False):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
instance['root_gb'] = 10
fake_local_ip = '10.0.0.1'
if same_host:
fake_dest_ip = fake_local_ip
else:
fake_dest_ip = '10.0.0.2'
if size_exception:
flavor = 'm1.tiny'
else:
flavor = 'm1.small'
flavor = db.flavor_get_by_name(self._context, flavor)
if not size_exception:
fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
fake_revert_path = os.path.join(self._test_instance_dir, '_revert')
func = mox.Func(self._check_instance_name)
vmutils.VMUtils.set_vm_state(func,
constants.HYPERV_VM_STATE_DISABLED)
self._setup_delete_vm_log_mocks()
m = vmutils.VMUtils.get_vm_storage_paths(func)
m.AndReturn(([fake_root_vhd_path], []))
m = hostutils.HostUtils.get_local_ips()
m.AndReturn([fake_local_ip])
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
m = pathutils.PathUtils.get_instance_migr_revert_dir(
instance['name'], remove_dir=True)
m.AndReturn(fake_revert_path)
if same_host:
fake.PathUtils.makedirs(mox.IsA(str))
m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
if copy_exception:
m.AndRaise(shutil.Error('Simulated copy error'))
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
mox.IsA(str),
remove_dir=True)
m.AndReturn(self._test_instance_dir)
else:
fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
destroy_disks = True
if same_host:
fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
destroy_disks = False
self._setup_destroy_mocks(False)
if destroy_disks:
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
mox.IsA(str),
remove_dir=True)
m.AndReturn(self._test_instance_dir)
return (instance, fake_dest_ip, network_info, flavor)
def test_migrate_disk_and_power_off(self):
(instance,
fake_dest_ip,
network_info,
flavor) = self._setup_test_migrate_disk_and_power_off_mocks()
self._mox.ReplayAll()
self._conn.migrate_disk_and_power_off(self._context, instance,
fake_dest_ip, flavor,
network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_same_host(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
same_host=True)
(instance, fake_dest_ip, network_info, flavor) = args
self._mox.ReplayAll()
self._conn.migrate_disk_and_power_off(self._context, instance,
fake_dest_ip, flavor,
network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_copy_exception(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
copy_exception=True)
(instance, fake_dest_ip, network_info, flavor) = args
self._mox.ReplayAll()
self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
self._context, instance, fake_dest_ip,
flavor, network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
size_exception=True)
(instance, fake_dest_ip, network_info, flavor) = args
self._mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
self._conn.migrate_disk_and_power_off,
self._context, instance, fake_dest_ip,
flavor, network_info)
self._mox.VerifyAll()
def _mock_attach_config_drive(self, instance, config_drive_format):
instance['config_drive'] = True
self._mox.StubOutWithMock(fake.PathUtils, 'lookup_configdrive_path')
m = fake.PathUtils.lookup_configdrive_path(
mox.Func(self._check_instance_name))
if config_drive_format in constants.DISK_FORMAT_MAP:
m.AndReturn(self._test_instance_dir + '/configdrive.' +
config_drive_format)
else:
m.AndReturn(None)
m = vmutils.VMUtils.attach_ide_drive(
mox.Func(self._check_instance_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_disk).InAnyOrder()
def _verify_attach_config_drive(self, config_drive_format):
if config_drive_format == constants.DISK_FORMAT.lower():
self.assertEqual(self._instance_disks[1],
self._test_instance_dir + '/configdrive.' +
config_drive_format)
elif config_drive_format == constants.DVD_FORMAT.lower():
self.assertEqual(self._instance_dvds[0],
self._test_instance_dir + '/configdrive.' +
config_drive_format)
def _test_finish_migration(self, power_on, ephemeral_storage=False,
config_drive=False,
config_drive_format='iso'):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
self._mox.StubOutWithMock(fake.PathUtils, 'exists')
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
instance["image_ref"]))
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'ParentPath': fake_parent_vhd_path,
'MaxInternalSize': 1})
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'MaxInternalSize': 1024})
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
if ephemeral_storage:
return m.AndReturn(self._test_instance_dir)
else:
m.AndReturn(None)
self._set_vm_name(instance['name'])
self._setup_create_instance_mocks(None, False,
ephemeral_storage=ephemeral_storage)
if power_on:
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
if config_drive:
self._mock_attach_config_drive(instance, config_drive_format)
self._mox.ReplayAll()
self._conn.finish_migration(self._context, None, instance, "",
network_info, None, False, None, power_on)
self._mox.VerifyAll()
if config_drive:
self._verify_attach_config_drive(config_drive_format)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
def test_finish_migration_power_off(self):
self._test_finish_migration(False)
def test_finish_migration_with_ephemeral_storage(self):
self._test_finish_migration(False, ephemeral_storage=True)
def test_finish_migration_attach_config_drive_iso(self):
self._test_finish_migration(False, config_drive=True,
config_drive_format=constants.DVD_FORMAT.lower())
def test_finish_migration_attach_config_drive_vhd(self):
self._test_finish_migration(False, config_drive=True,
config_drive_format=constants.DISK_FORMAT.lower())
def test_confirm_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
self._mox.ReplayAll()
self._conn.confirm_migration(None, instance, network_info)
self._mox.VerifyAll()
def _test_finish_revert_migration(self, power_on, ephemeral_storage=False,
config_drive=False,
config_drive_format='iso'):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
instance['name'])
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
m.AndReturn(fake_revert_path)
fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
if ephemeral_storage:
m.AndReturn(self._test_instance_dir)
else:
m.AndReturn(None)
self._set_vm_name(instance['name'])
self._setup_create_instance_mocks(None, False,
ephemeral_storage=ephemeral_storage)
if power_on:
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
if config_drive:
self._mock_attach_config_drive(instance, config_drive_format)
self._mox.ReplayAll()
self._conn.finish_revert_migration(self._context, instance,
network_info, None,
power_on)
self._mox.VerifyAll()
if config_drive:
self._verify_attach_config_drive(config_drive_format)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def test_finish_revert_migration_with_ephemeral_storage(self):
self._test_finish_revert_migration(False, ephemeral_storage=True)
def test_finish_revert_migration_attach_config_drive_iso(self):
self._test_finish_revert_migration(False, config_drive=True,
config_drive_format=constants.DVD_FORMAT.lower())
def test_finish_revert_migration_attach_config_drive_vhd(self):
self._test_finish_revert_migration(False, config_drive=True,
config_drive_format=constants.DISK_FORMAT.lower())
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self._conn.plug_vifs,
instance=self._test_spawn_instance,
network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self._conn.unplug_vifs,
instance=self._test_spawn_instance,
network_info=None)
def test_refresh_instance_security_rules(self):
self.assertRaises(NotImplementedError,
self._conn.refresh_instance_security_rules,
instance=None)
def test_get_rdp_console(self):
self.flags(my_ip="192.168.1.1")
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
fake_port = 9999
fake_vm_id = "fake_vm_id"
m = rdpconsoleutils.RDPConsoleUtils.get_rdp_console_port()
m.AndReturn(fake_port)
m = vmutils.VMUtils.get_vm_id(mox.IsA(str))
m.AndReturn(fake_vm_id)
self._mox.ReplayAll()
connect_info = self._conn.get_rdp_console(self._context, instance)
self._mox.VerifyAll()
self.assertEqual(CONF.my_ip, connect_info.host)
self.assertEqual(fake_port, connect_info.port)
self.assertEqual(fake_vm_id, connect_info.internal_access_path)
|
#from django.test import TestCase
# coding=utf-8
# Create your tests here.
from django import forms
from models import Article
import datetime
import codecs
def myDate(date):
dt = datetime.datetime.today()
if date.year == dt.year and date.month == dt.month and date.day == dt.day:
res = "今天"
else:
res = str(date.month) + "月" + str(date.day)+ "日"
res = res + str(date.hour) + ":" + str(date.minute)
return res
dd = datetime.datetime(1974, 9, 4, 18, 17, 30)
dt = datetime.datetime.now()
d = myDate(dt)
print unicode(d, "cp936")
d = myDate(dd)
print unicode(d, "cp936")
#print dt.date.year
#print dt.time.hour
#print dt
#print dt.year
#print dt.month
|
import psutil
from config import logs, database
from domain import repository
log = logs.config_loggin()
def print_cpu_info():
log.info('===================================================================')
log.info('[ CPU Information summary ]')
log.info('===================================================================')
# gives a single float value
vcc = psutil.cpu_count()
log.info(f'Total number of CPUs: {vcc}')
vcpu = psutil.cpu_percent()
log.info(f'Total CPUs utilized percentage: {vcpu}%')
db = database.connect()
log.debug(f"DB -> {db}")
key = repository.create_metric(db, {'vcc': vcc, 'vcpu': vcpu}, 'cpu')
result = repository.read_metric(db, key, 'cpu')
log.info(f"Metric persisted -> {result}")
def print_ram_info():
log.info('===================================================================')
log.info('[ RAM Information summary ]')
log.info('===================================================================')
ram = dict(psutil.virtual_memory()._asdict())
def forloop():
for i in ram:
log.info(f"{i}: {ram[i] / 1024 / 1024 / 1024}") # Output will be printed in GBs
forloop() |
import pandas as pd
data = pd.read_csv(
r'c:\Users\Professional\Documents\GitHub\openedu-answers\6\10input.csv', sep=';', encoding="windows-1251")
prices = []
for i in range(len(data)):
for j in range(1, data.shape[1]):
prices.append(data.iloc[i, j])
print(min(prices))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('adm', '0008_auto_20150827_0858'),
]
operations = [
migrations.AlterField(
model_name='ofertatec',
name='IEM',
field=models.CharField(max_length=150, null=True, blank=True),
),
]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'BackendArgs',
'BackendPoolArgs',
'BackendPoolsSettingsArgs',
'CacheConfigurationArgs',
'ForwardingConfigurationArgs',
'FrontendEndpointArgs',
'FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs',
'HeaderActionArgs',
'HealthProbeSettingsModelArgs',
'LoadBalancingSettingsModelArgs',
'RedirectConfigurationArgs',
'RoutingRuleArgs',
'RulesEngineActionArgs',
'RulesEngineMatchConditionArgs',
'RulesEngineRuleArgs',
'SubResourceArgs',
]
@pulumi.input_type
class BackendArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
backend_host_header: Optional[pulumi.Input[str]] = None,
enabled_state: Optional[pulumi.Input[str]] = None,
http_port: Optional[pulumi.Input[int]] = None,
https_port: Optional[pulumi.Input[int]] = None,
priority: Optional[pulumi.Input[int]] = None,
private_link_alias: Optional[pulumi.Input[str]] = None,
private_link_approval_message: Optional[pulumi.Input[str]] = None,
weight: Optional[pulumi.Input[int]] = None):
"""
Backend address of a frontDoor load balancer.
:param pulumi.Input[str] address: Location of the backend (IP address or FQDN)
:param pulumi.Input[str] backend_host_header: The value to use as the host header sent to the backend. If blank or unspecified, this defaults to the incoming host.
:param pulumi.Input[str] enabled_state: Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled'
:param pulumi.Input[int] http_port: The HTTP TCP port number. Must be between 1 and 65535.
:param pulumi.Input[int] https_port: The HTTPS TCP port number. Must be between 1 and 65535.
:param pulumi.Input[int] priority: Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy.
:param pulumi.Input[str] private_link_alias: The Alias of the Private Link resource. Populating this optional field indicates that this backend is 'Private'
:param pulumi.Input[str] private_link_approval_message: A custom message to be included in the approval request to connect to the Private Link
:param pulumi.Input[int] weight: Weight of this endpoint for load balancing purposes.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if backend_host_header is not None:
pulumi.set(__self__, "backend_host_header", backend_host_header)
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if https_port is not None:
pulumi.set(__self__, "https_port", https_port)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if private_link_alias is not None:
pulumi.set(__self__, "private_link_alias", private_link_alias)
if private_link_approval_message is not None:
pulumi.set(__self__, "private_link_approval_message", private_link_approval_message)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Location of the backend (IP address or FQDN)
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="backendHostHeader")
def backend_host_header(self) -> Optional[pulumi.Input[str]]:
"""
The value to use as the host header sent to the backend. If blank or unspecified, this defaults to the incoming host.
"""
return pulumi.get(self, "backend_host_header")
@backend_host_header.setter
def backend_host_header(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend_host_header", value)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[str]]:
"""
Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[pulumi.Input[int]]:
"""
The HTTP TCP port number. Must be between 1 and 65535.
"""
return pulumi.get(self, "http_port")
@http_port.setter
def http_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_port", value)
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[pulumi.Input[int]]:
"""
The HTTPS TCP port number. Must be between 1 and 65535.
"""
return pulumi.get(self, "https_port")
@https_port.setter
def https_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https_port", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="privateLinkAlias")
def private_link_alias(self) -> Optional[pulumi.Input[str]]:
"""
The Alias of the Private Link resource. Populating this optional field indicates that this backend is 'Private'
"""
return pulumi.get(self, "private_link_alias")
@private_link_alias.setter
def private_link_alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_alias", value)
@property
@pulumi.getter(name="privateLinkApprovalMessage")
def private_link_approval_message(self) -> Optional[pulumi.Input[str]]:
"""
A custom message to be included in the approval request to connect to the Private Link
"""
return pulumi.get(self, "private_link_approval_message")
@private_link_approval_message.setter
def private_link_approval_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_approval_message", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
"""
Weight of this endpoint for load balancing purposes.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class BackendPoolArgs:
def __init__(__self__, *,
backends: Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]] = None,
health_probe_settings: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
load_balancing_settings: Optional[pulumi.Input['SubResourceArgs']] = None,
name: Optional[pulumi.Input[str]] = None):
"""
A backend pool is a collection of backends that can be routed to.
:param pulumi.Input[Sequence[pulumi.Input['BackendArgs']]] backends: The set of backends for this pool
:param pulumi.Input['SubResourceArgs'] health_probe_settings: L7 health probe settings for a backend pool
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['SubResourceArgs'] load_balancing_settings: Load balancing settings for a backend pool
:param pulumi.Input[str] name: Resource name.
"""
if backends is not None:
pulumi.set(__self__, "backends", backends)
if health_probe_settings is not None:
pulumi.set(__self__, "health_probe_settings", health_probe_settings)
if id is not None:
pulumi.set(__self__, "id", id)
if load_balancing_settings is not None:
pulumi.set(__self__, "load_balancing_settings", load_balancing_settings)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def backends(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]]:
"""
The set of backends for this pool
"""
return pulumi.get(self, "backends")
@backends.setter
def backends(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendArgs']]]]):
pulumi.set(self, "backends", value)
@property
@pulumi.getter(name="healthProbeSettings")
def health_probe_settings(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
L7 health probe settings for a backend pool
"""
return pulumi.get(self, "health_probe_settings")
@health_probe_settings.setter
def health_probe_settings(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "health_probe_settings", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="loadBalancingSettings")
def load_balancing_settings(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Load balancing settings for a backend pool
"""
return pulumi.get(self, "load_balancing_settings")
@load_balancing_settings.setter
def load_balancing_settings(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "load_balancing_settings", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class BackendPoolsSettingsArgs:
def __init__(__self__, *,
enforce_certificate_name_check: Optional[pulumi.Input[str]] = None,
send_recv_timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Settings that apply to all backend pools.
:param pulumi.Input[str] enforce_certificate_name_check: Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on non-HTTPS requests.
:param pulumi.Input[int] send_recv_timeout_seconds: Send and receive timeout on forwarding request to the backend. When timeout is reached, the request fails and returns.
"""
if enforce_certificate_name_check is not None:
pulumi.set(__self__, "enforce_certificate_name_check", enforce_certificate_name_check)
if send_recv_timeout_seconds is not None:
pulumi.set(__self__, "send_recv_timeout_seconds", send_recv_timeout_seconds)
@property
@pulumi.getter(name="enforceCertificateNameCheck")
def enforce_certificate_name_check(self) -> Optional[pulumi.Input[str]]:
"""
Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on non-HTTPS requests.
"""
return pulumi.get(self, "enforce_certificate_name_check")
@enforce_certificate_name_check.setter
def enforce_certificate_name_check(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enforce_certificate_name_check", value)
@property
@pulumi.getter(name="sendRecvTimeoutSeconds")
def send_recv_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Send and receive timeout on forwarding request to the backend. When timeout is reached, the request fails and returns.
"""
return pulumi.get(self, "send_recv_timeout_seconds")
@send_recv_timeout_seconds.setter
def send_recv_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "send_recv_timeout_seconds", value)
@pulumi.input_type
class CacheConfigurationArgs:
def __init__(__self__, *,
cache_duration: Optional[pulumi.Input[str]] = None,
dynamic_compression: Optional[pulumi.Input[str]] = None,
query_parameter_strip_directive: Optional[pulumi.Input[str]] = None,
query_parameters: Optional[pulumi.Input[str]] = None):
"""
Caching settings for a caching-type route. To disable caching, do not provide a cacheConfiguration object.
:param pulumi.Input[str] cache_duration: The duration for which the content needs to be cached. Allowed format is in ISO 8601 format (http://en.wikipedia.org/wiki/ISO_8601#Durations). HTTP requires the value to be no more than a year
:param pulumi.Input[str] dynamic_compression: Whether to use dynamic compression for cached content
:param pulumi.Input[str] query_parameter_strip_directive: Treatment of URL query terms when forming the cache key.
:param pulumi.Input[str] query_parameters: query parameters to include or exclude (comma separated).
"""
if cache_duration is not None:
pulumi.set(__self__, "cache_duration", cache_duration)
if dynamic_compression is not None:
pulumi.set(__self__, "dynamic_compression", dynamic_compression)
if query_parameter_strip_directive is not None:
pulumi.set(__self__, "query_parameter_strip_directive", query_parameter_strip_directive)
if query_parameters is not None:
pulumi.set(__self__, "query_parameters", query_parameters)
@property
@pulumi.getter(name="cacheDuration")
def cache_duration(self) -> Optional[pulumi.Input[str]]:
"""
The duration for which the content needs to be cached. Allowed format is in ISO 8601 format (http://en.wikipedia.org/wiki/ISO_8601#Durations). HTTP requires the value to be no more than a year
"""
return pulumi.get(self, "cache_duration")
@cache_duration.setter
def cache_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_duration", value)
@property
@pulumi.getter(name="dynamicCompression")
def dynamic_compression(self) -> Optional[pulumi.Input[str]]:
"""
Whether to use dynamic compression for cached content
"""
return pulumi.get(self, "dynamic_compression")
@dynamic_compression.setter
def dynamic_compression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dynamic_compression", value)
@property
@pulumi.getter(name="queryParameterStripDirective")
def query_parameter_strip_directive(self) -> Optional[pulumi.Input[str]]:
"""
Treatment of URL query terms when forming the cache key.
"""
return pulumi.get(self, "query_parameter_strip_directive")
@query_parameter_strip_directive.setter
def query_parameter_strip_directive(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query_parameter_strip_directive", value)
@property
@pulumi.getter(name="queryParameters")
def query_parameters(self) -> Optional[pulumi.Input[str]]:
"""
query parameters to include or exclude (comma separated).
"""
return pulumi.get(self, "query_parameters")
@query_parameters.setter
def query_parameters(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query_parameters", value)
@pulumi.input_type
class ForwardingConfigurationArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
backend_pool: Optional[pulumi.Input['SubResourceArgs']] = None,
cache_configuration: Optional[pulumi.Input['CacheConfigurationArgs']] = None,
custom_forwarding_path: Optional[pulumi.Input[str]] = None,
forwarding_protocol: Optional[pulumi.Input[str]] = None):
"""
Describes Forwarding Route.
:param pulumi.Input['SubResourceArgs'] backend_pool: A reference to the BackendPool which this rule routes to.
:param pulumi.Input['CacheConfigurationArgs'] cache_configuration: The caching configuration associated with this rule.
:param pulumi.Input[str] custom_forwarding_path: A custom path used to rewrite resource paths matched by this rule. Leave empty to use incoming path.
:param pulumi.Input[str] forwarding_protocol: Protocol this rule will use when forwarding traffic to backends.
"""
pulumi.set(__self__, "odata_type", '#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration')
if backend_pool is not None:
pulumi.set(__self__, "backend_pool", backend_pool)
if cache_configuration is not None:
pulumi.set(__self__, "cache_configuration", cache_configuration)
if custom_forwarding_path is not None:
pulumi.set(__self__, "custom_forwarding_path", custom_forwarding_path)
if forwarding_protocol is not None:
pulumi.set(__self__, "forwarding_protocol", forwarding_protocol)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="backendPool")
def backend_pool(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
A reference to the BackendPool which this rule routes to.
"""
return pulumi.get(self, "backend_pool")
@backend_pool.setter
def backend_pool(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "backend_pool", value)
@property
@pulumi.getter(name="cacheConfiguration")
def cache_configuration(self) -> Optional[pulumi.Input['CacheConfigurationArgs']]:
"""
The caching configuration associated with this rule.
"""
return pulumi.get(self, "cache_configuration")
@cache_configuration.setter
def cache_configuration(self, value: Optional[pulumi.Input['CacheConfigurationArgs']]):
pulumi.set(self, "cache_configuration", value)
@property
@pulumi.getter(name="customForwardingPath")
def custom_forwarding_path(self) -> Optional[pulumi.Input[str]]:
"""
A custom path used to rewrite resource paths matched by this rule. Leave empty to use incoming path.
"""
return pulumi.get(self, "custom_forwarding_path")
@custom_forwarding_path.setter
def custom_forwarding_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_forwarding_path", value)
@property
@pulumi.getter(name="forwardingProtocol")
def forwarding_protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol this rule will use when forwarding traffic to backends.
"""
return pulumi.get(self, "forwarding_protocol")
@forwarding_protocol.setter
def forwarding_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "forwarding_protocol", value)
@pulumi.input_type
class FrontendEndpointArgs:
def __init__(__self__, *,
host_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
session_affinity_enabled_state: Optional[pulumi.Input[str]] = None,
session_affinity_ttl_seconds: Optional[pulumi.Input[int]] = None,
web_application_firewall_policy_link: Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']] = None):
"""
A frontend endpoint used for routing.
:param pulumi.Input[str] host_name: The host name of the frontendEndpoint. Must be a domain name.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Resource name.
:param pulumi.Input[str] session_affinity_enabled_state: Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'
:param pulumi.Input[int] session_affinity_ttl_seconds: UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable.
:param pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs'] web_application_firewall_policy_link: Defines the Web Application Firewall policy for each host (if applicable)
"""
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if session_affinity_enabled_state is not None:
pulumi.set(__self__, "session_affinity_enabled_state", session_affinity_enabled_state)
if session_affinity_ttl_seconds is not None:
pulumi.set(__self__, "session_affinity_ttl_seconds", session_affinity_ttl_seconds)
if web_application_firewall_policy_link is not None:
pulumi.set(__self__, "web_application_firewall_policy_link", web_application_firewall_policy_link)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[pulumi.Input[str]]:
"""
The host name of the frontendEndpoint. Must be a domain name.
"""
return pulumi.get(self, "host_name")
@host_name.setter
def host_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sessionAffinityEnabledState")
def session_affinity_enabled_state(self) -> Optional[pulumi.Input[str]]:
"""
Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "session_affinity_enabled_state")
@session_affinity_enabled_state.setter
def session_affinity_enabled_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity_enabled_state", value)
@property
@pulumi.getter(name="sessionAffinityTtlSeconds")
def session_affinity_ttl_seconds(self) -> Optional[pulumi.Input[int]]:
"""
UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable.
"""
return pulumi.get(self, "session_affinity_ttl_seconds")
@session_affinity_ttl_seconds.setter
def session_affinity_ttl_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_affinity_ttl_seconds", value)
@property
@pulumi.getter(name="webApplicationFirewallPolicyLink")
def web_application_firewall_policy_link(self) -> Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']]:
"""
Defines the Web Application Firewall policy for each host (if applicable)
"""
return pulumi.get(self, "web_application_firewall_policy_link")
@web_application_firewall_policy_link.setter
def web_application_firewall_policy_link(self, value: Optional[pulumi.Input['FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs']]):
pulumi.set(self, "web_application_firewall_policy_link", value)
@pulumi.input_type
class FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLinkArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
Defines the Web Application Firewall policy for each host (if applicable)
:param pulumi.Input[str] id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class HeaderActionArgs:
def __init__(__self__, *,
header_action_type: pulumi.Input[str],
header_name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
An action that can manipulate an http header.
:param pulumi.Input[str] header_action_type: Which type of manipulation to apply to the header.
:param pulumi.Input[str] header_name: The name of the header this action will apply to.
:param pulumi.Input[str] value: The value to update the given header name with. This value is not used if the actionType is Delete.
"""
pulumi.set(__self__, "header_action_type", header_action_type)
pulumi.set(__self__, "header_name", header_name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="headerActionType")
def header_action_type(self) -> pulumi.Input[str]:
"""
Which type of manipulation to apply to the header.
"""
return pulumi.get(self, "header_action_type")
@header_action_type.setter
def header_action_type(self, value: pulumi.Input[str]):
pulumi.set(self, "header_action_type", value)
@property
@pulumi.getter(name="headerName")
def header_name(self) -> pulumi.Input[str]:
"""
The name of the header this action will apply to.
"""
return pulumi.get(self, "header_name")
@header_name.setter
def header_name(self, value: pulumi.Input[str]):
pulumi.set(self, "header_name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value to update the given header name with. This value is not used if the actionType is Delete.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class HealthProbeSettingsModelArgs:
def __init__(__self__, *,
enabled_state: Optional[pulumi.Input[str]] = None,
health_probe_method: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
interval_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None):
"""
Load balancing settings for a backend pool
:param pulumi.Input[str] enabled_state: Whether to enable health probes to be made against backends defined under backendPools. Health probes can only be disabled if there is a single enabled backend in single enabled backend pool.
:param pulumi.Input[str] health_probe_method: Configures which HTTP method to use to probe the backends defined under backendPools.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[int] interval_in_seconds: The number of seconds between health probes.
:param pulumi.Input[str] name: Resource name.
:param pulumi.Input[str] path: The path to use for the health probe. Default is /
:param pulumi.Input[str] protocol: Protocol scheme to use for this probe
"""
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if health_probe_method is not None:
pulumi.set(__self__, "health_probe_method", health_probe_method)
if id is not None:
pulumi.set(__self__, "id", id)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[str]]:
"""
Whether to enable health probes to be made against backends defined under backendPools. Health probes can only be disabled if there is a single enabled backend in single enabled backend pool.
"""
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="healthProbeMethod")
def health_probe_method(self) -> Optional[pulumi.Input[str]]:
"""
Configures which HTTP method to use to probe the backends defined under backendPools.
"""
return pulumi.get(self, "health_probe_method")
@health_probe_method.setter
def health_probe_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_probe_method", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds between health probes.
"""
return pulumi.get(self, "interval_in_seconds")
@interval_in_seconds.setter
def interval_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval_in_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The path to use for the health probe. Default is /
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol scheme to use for this probe
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class LoadBalancingSettingsModelArgs:
def __init__(__self__, *,
additional_latency_milliseconds: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
sample_size: Optional[pulumi.Input[int]] = None,
successful_samples_required: Optional[pulumi.Input[int]] = None):
"""
Load balancing settings for a backend pool
:param pulumi.Input[int] additional_latency_milliseconds: The additional latency in milliseconds for probes to fall into the lowest latency bucket
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Resource name.
:param pulumi.Input[int] sample_size: The number of samples to consider for load balancing decisions
:param pulumi.Input[int] successful_samples_required: The number of samples within the sample period that must succeed
"""
if additional_latency_milliseconds is not None:
pulumi.set(__self__, "additional_latency_milliseconds", additional_latency_milliseconds)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if sample_size is not None:
pulumi.set(__self__, "sample_size", sample_size)
if successful_samples_required is not None:
pulumi.set(__self__, "successful_samples_required", successful_samples_required)
@property
@pulumi.getter(name="additionalLatencyMilliseconds")
def additional_latency_milliseconds(self) -> Optional[pulumi.Input[int]]:
"""
The additional latency in milliseconds for probes to fall into the lowest latency bucket
"""
return pulumi.get(self, "additional_latency_milliseconds")
@additional_latency_milliseconds.setter
def additional_latency_milliseconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "additional_latency_milliseconds", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sampleSize")
def sample_size(self) -> Optional[pulumi.Input[int]]:
"""
The number of samples to consider for load balancing decisions
"""
return pulumi.get(self, "sample_size")
@sample_size.setter
def sample_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sample_size", value)
@property
@pulumi.getter(name="successfulSamplesRequired")
def successful_samples_required(self) -> Optional[pulumi.Input[int]]:
"""
The number of samples within the sample period that must succeed
"""
return pulumi.get(self, "successful_samples_required")
@successful_samples_required.setter
def successful_samples_required(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "successful_samples_required", value)
@pulumi.input_type
class RedirectConfigurationArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
custom_fragment: Optional[pulumi.Input[str]] = None,
custom_host: Optional[pulumi.Input[str]] = None,
custom_path: Optional[pulumi.Input[str]] = None,
custom_query_string: Optional[pulumi.Input[str]] = None,
redirect_protocol: Optional[pulumi.Input[str]] = None,
redirect_type: Optional[pulumi.Input[str]] = None):
"""
Describes Redirect Route.
:param pulumi.Input[str] custom_fragment: Fragment to add to the redirect URL. Fragment is the part of the URL that comes after #. Do not include the #.
:param pulumi.Input[str] custom_host: Host to redirect. Leave empty to use the incoming host as the destination host.
:param pulumi.Input[str] custom_path: The full path to redirect. Path cannot be empty and must start with /. Leave empty to use the incoming path as destination path.
:param pulumi.Input[str] custom_query_string: The set of query strings to be placed in the redirect URL. Setting this value would replace any existing query string; leave empty to preserve the incoming query string. Query string must be in <key>=<value> format. The first ? and & will be added automatically so do not include them in the front, but do separate multiple query strings with &.
:param pulumi.Input[str] redirect_protocol: The protocol of the destination to where the traffic is redirected
:param pulumi.Input[str] redirect_type: The redirect type the rule will use when redirecting traffic.
"""
pulumi.set(__self__, "odata_type", '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration')
if custom_fragment is not None:
pulumi.set(__self__, "custom_fragment", custom_fragment)
if custom_host is not None:
pulumi.set(__self__, "custom_host", custom_host)
if custom_path is not None:
pulumi.set(__self__, "custom_path", custom_path)
if custom_query_string is not None:
pulumi.set(__self__, "custom_query_string", custom_query_string)
if redirect_protocol is not None:
pulumi.set(__self__, "redirect_protocol", redirect_protocol)
if redirect_type is not None:
pulumi.set(__self__, "redirect_type", redirect_type)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="customFragment")
def custom_fragment(self) -> Optional[pulumi.Input[str]]:
"""
Fragment to add to the redirect URL. Fragment is the part of the URL that comes after #. Do not include the #.
"""
return pulumi.get(self, "custom_fragment")
@custom_fragment.setter
def custom_fragment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_fragment", value)
@property
@pulumi.getter(name="customHost")
def custom_host(self) -> Optional[pulumi.Input[str]]:
"""
Host to redirect. Leave empty to use the incoming host as the destination host.
"""
return pulumi.get(self, "custom_host")
@custom_host.setter
def custom_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_host", value)
@property
@pulumi.getter(name="customPath")
def custom_path(self) -> Optional[pulumi.Input[str]]:
"""
The full path to redirect. Path cannot be empty and must start with /. Leave empty to use the incoming path as destination path.
"""
return pulumi.get(self, "custom_path")
@custom_path.setter
def custom_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_path", value)
@property
@pulumi.getter(name="customQueryString")
def custom_query_string(self) -> Optional[pulumi.Input[str]]:
"""
The set of query strings to be placed in the redirect URL. Setting this value would replace any existing query string; leave empty to preserve the incoming query string. Query string must be in <key>=<value> format. The first ? and & will be added automatically so do not include them in the front, but do separate multiple query strings with &.
"""
return pulumi.get(self, "custom_query_string")
@custom_query_string.setter
def custom_query_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_query_string", value)
@property
@pulumi.getter(name="redirectProtocol")
def redirect_protocol(self) -> Optional[pulumi.Input[str]]:
"""
The protocol of the destination to where the traffic is redirected
"""
return pulumi.get(self, "redirect_protocol")
@redirect_protocol.setter
def redirect_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redirect_protocol", value)
@property
@pulumi.getter(name="redirectType")
def redirect_type(self) -> Optional[pulumi.Input[str]]:
"""
The redirect type the rule will use when redirecting traffic.
"""
return pulumi.get(self, "redirect_type")
@redirect_type.setter
def redirect_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redirect_type", value)
@pulumi.input_type
class RoutingRuleArgs:
def __init__(__self__, *,
accepted_protocols: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled_state: Optional[pulumi.Input[str]] = None,
frontend_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
patterns_to_match: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
route_configuration: Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]] = None,
rules_engine: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
A routing rule represents a specification for traffic to treat and where to send it, along with health probe information.
:param pulumi.Input[Sequence[pulumi.Input[str]]] accepted_protocols: Protocol schemes to match for this rule
:param pulumi.Input[str] enabled_state: Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] frontend_endpoints: Frontend endpoints associated with this rule
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Resource name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] patterns_to_match: The route patterns of the rule.
:param pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']] route_configuration: A reference to the routing configuration.
:param pulumi.Input['SubResourceArgs'] rules_engine: A reference to a specific Rules Engine Configuration to apply to this route.
"""
if accepted_protocols is not None:
pulumi.set(__self__, "accepted_protocols", accepted_protocols)
if enabled_state is not None:
pulumi.set(__self__, "enabled_state", enabled_state)
if frontend_endpoints is not None:
pulumi.set(__self__, "frontend_endpoints", frontend_endpoints)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if patterns_to_match is not None:
pulumi.set(__self__, "patterns_to_match", patterns_to_match)
if route_configuration is not None:
pulumi.set(__self__, "route_configuration", route_configuration)
if rules_engine is not None:
pulumi.set(__self__, "rules_engine", rules_engine)
@property
@pulumi.getter(name="acceptedProtocols")
def accepted_protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Protocol schemes to match for this rule
"""
return pulumi.get(self, "accepted_protocols")
@accepted_protocols.setter
def accepted_protocols(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "accepted_protocols", value)
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> Optional[pulumi.Input[str]]:
"""
Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "enabled_state")
@enabled_state.setter
def enabled_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enabled_state", value)
@property
@pulumi.getter(name="frontendEndpoints")
def frontend_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Frontend endpoints associated with this rule
"""
return pulumi.get(self, "frontend_endpoints")
@frontend_endpoints.setter
def frontend_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "frontend_endpoints", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="patternsToMatch")
def patterns_to_match(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The route patterns of the rule.
"""
return pulumi.get(self, "patterns_to_match")
@patterns_to_match.setter
def patterns_to_match(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "patterns_to_match", value)
@property
@pulumi.getter(name="routeConfiguration")
def route_configuration(self) -> Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]]:
"""
A reference to the routing configuration.
"""
return pulumi.get(self, "route_configuration")
@route_configuration.setter
def route_configuration(self, value: Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]]):
pulumi.set(self, "route_configuration", value)
@property
@pulumi.getter(name="rulesEngine")
def rules_engine(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
A reference to a specific Rules Engine Configuration to apply to this route.
"""
return pulumi.get(self, "rules_engine")
@rules_engine.setter
def rules_engine(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "rules_engine", value)
@pulumi.input_type
class RulesEngineActionArgs:
def __init__(__self__, *,
request_header_actions: Optional[pulumi.Input[Sequence[pulumi.Input['HeaderActionArgs']]]] = None,
response_header_actions: Optional[pulumi.Input[Sequence[pulumi.Input['HeaderActionArgs']]]] = None,
route_configuration_override: Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]] = None):
"""
One or more actions that will execute, modifying the request and/or response.
:param pulumi.Input[Sequence[pulumi.Input['HeaderActionArgs']]] request_header_actions: A list of header actions to apply from the request from AFD to the origin.
:param pulumi.Input[Sequence[pulumi.Input['HeaderActionArgs']]] response_header_actions: A list of header actions to apply from the response from AFD to the client.
:param pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']] route_configuration_override: Override the route configuration.
"""
if request_header_actions is not None:
pulumi.set(__self__, "request_header_actions", request_header_actions)
if response_header_actions is not None:
pulumi.set(__self__, "response_header_actions", response_header_actions)
if route_configuration_override is not None:
pulumi.set(__self__, "route_configuration_override", route_configuration_override)
@property
@pulumi.getter(name="requestHeaderActions")
def request_header_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HeaderActionArgs']]]]:
"""
A list of header actions to apply from the request from AFD to the origin.
"""
return pulumi.get(self, "request_header_actions")
@request_header_actions.setter
def request_header_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HeaderActionArgs']]]]):
pulumi.set(self, "request_header_actions", value)
@property
@pulumi.getter(name="responseHeaderActions")
def response_header_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HeaderActionArgs']]]]:
"""
A list of header actions to apply from the response from AFD to the client.
"""
return pulumi.get(self, "response_header_actions")
@response_header_actions.setter
def response_header_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HeaderActionArgs']]]]):
pulumi.set(self, "response_header_actions", value)
@property
@pulumi.getter(name="routeConfigurationOverride")
def route_configuration_override(self) -> Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]]:
"""
Override the route configuration.
"""
return pulumi.get(self, "route_configuration_override")
@route_configuration_override.setter
def route_configuration_override(self, value: Optional[pulumi.Input[Union['ForwardingConfigurationArgs', 'RedirectConfigurationArgs']]]):
pulumi.set(self, "route_configuration_override", value)
@pulumi.input_type
class RulesEngineMatchConditionArgs:
def __init__(__self__, *,
rules_engine_match_value: pulumi.Input[Sequence[pulumi.Input[str]]],
rules_engine_match_variable: pulumi.Input[str],
rules_engine_operator: pulumi.Input[str],
negate_condition: Optional[pulumi.Input[bool]] = None,
selector: Optional[pulumi.Input[str]] = None,
transforms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Define a match condition
:param pulumi.Input[Sequence[pulumi.Input[str]]] rules_engine_match_value: Match values to match against. The operator will apply to each value in here with OR semantics. If any of them match the variable with the given operator this match condition is considered a match.
:param pulumi.Input[str] rules_engine_match_variable: Match Variable
:param pulumi.Input[str] rules_engine_operator: Describes operator to apply to the match condition.
:param pulumi.Input[bool] negate_condition: Describes if this is negate condition or not
:param pulumi.Input[str] selector: Name of selector in RequestHeader or RequestBody to be matched
:param pulumi.Input[Sequence[pulumi.Input[str]]] transforms: List of transforms
"""
pulumi.set(__self__, "rules_engine_match_value", rules_engine_match_value)
pulumi.set(__self__, "rules_engine_match_variable", rules_engine_match_variable)
pulumi.set(__self__, "rules_engine_operator", rules_engine_operator)
if negate_condition is not None:
pulumi.set(__self__, "negate_condition", negate_condition)
if selector is not None:
pulumi.set(__self__, "selector", selector)
if transforms is not None:
pulumi.set(__self__, "transforms", transforms)
@property
@pulumi.getter(name="rulesEngineMatchValue")
def rules_engine_match_value(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Match values to match against. The operator will apply to each value in here with OR semantics. If any of them match the variable with the given operator this match condition is considered a match.
"""
return pulumi.get(self, "rules_engine_match_value")
@rules_engine_match_value.setter
def rules_engine_match_value(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "rules_engine_match_value", value)
@property
@pulumi.getter(name="rulesEngineMatchVariable")
def rules_engine_match_variable(self) -> pulumi.Input[str]:
"""
Match Variable
"""
return pulumi.get(self, "rules_engine_match_variable")
@rules_engine_match_variable.setter
def rules_engine_match_variable(self, value: pulumi.Input[str]):
pulumi.set(self, "rules_engine_match_variable", value)
@property
@pulumi.getter(name="rulesEngineOperator")
def rules_engine_operator(self) -> pulumi.Input[str]:
"""
Describes operator to apply to the match condition.
"""
return pulumi.get(self, "rules_engine_operator")
@rules_engine_operator.setter
def rules_engine_operator(self, value: pulumi.Input[str]):
pulumi.set(self, "rules_engine_operator", value)
@property
@pulumi.getter(name="negateCondition")
def negate_condition(self) -> Optional[pulumi.Input[bool]]:
"""
Describes if this is negate condition or not
"""
return pulumi.get(self, "negate_condition")
@negate_condition.setter
def negate_condition(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "negate_condition", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input[str]]:
"""
Name of selector in RequestHeader or RequestBody to be matched
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "selector", value)
@property
@pulumi.getter
def transforms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of transforms
"""
return pulumi.get(self, "transforms")
@transforms.setter
def transforms(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "transforms", value)
@pulumi.input_type
class RulesEngineRuleArgs:
def __init__(__self__, *,
action: pulumi.Input['RulesEngineActionArgs'],
name: pulumi.Input[str],
priority: pulumi.Input[int],
match_conditions: Optional[pulumi.Input[Sequence[pulumi.Input['RulesEngineMatchConditionArgs']]]] = None,
match_processing_behavior: Optional[pulumi.Input[str]] = None):
"""
Contains a list of match conditions, and an action on how to modify the request/response. If multiple rules match, the actions from one rule that conflict with a previous rule overwrite for a singular action, or append in the case of headers manipulation.
:param pulumi.Input['RulesEngineActionArgs'] action: Actions to perform on the request and response if all of the match conditions are met.
:param pulumi.Input[str] name: A name to refer to this specific rule.
:param pulumi.Input[int] priority: A priority assigned to this rule.
:param pulumi.Input[Sequence[pulumi.Input['RulesEngineMatchConditionArgs']]] match_conditions: A list of match conditions that must meet in order for the actions of this rule to run. Having no match conditions means the actions will always run.
:param pulumi.Input[str] match_processing_behavior: If this rule is a match should the rules engine continue running the remaining rules or stop. If not present, defaults to Continue.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "priority", priority)
if match_conditions is not None:
pulumi.set(__self__, "match_conditions", match_conditions)
if match_processing_behavior is not None:
pulumi.set(__self__, "match_processing_behavior", match_processing_behavior)
@property
@pulumi.getter
def action(self) -> pulumi.Input['RulesEngineActionArgs']:
"""
Actions to perform on the request and response if all of the match conditions are met.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['RulesEngineActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
A name to refer to this specific rule.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> pulumi.Input[int]:
"""
A priority assigned to this rule.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: pulumi.Input[int]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="matchConditions")
def match_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RulesEngineMatchConditionArgs']]]]:
"""
A list of match conditions that must meet in order for the actions of this rule to run. Having no match conditions means the actions will always run.
"""
return pulumi.get(self, "match_conditions")
@match_conditions.setter
def match_conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RulesEngineMatchConditionArgs']]]]):
pulumi.set(self, "match_conditions", value)
@property
@pulumi.getter(name="matchProcessingBehavior")
def match_processing_behavior(self) -> Optional[pulumi.Input[str]]:
"""
If this rule is a match should the rules engine continue running the remaining rules or stop. If not present, defaults to Continue.
"""
return pulumi.get(self, "match_processing_behavior")
@match_processing_behavior.setter
def match_processing_behavior(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "match_processing_behavior", value)
@pulumi.input_type
class SubResourceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
Reference to another subresource.
:param pulumi.Input[str] id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
|
from django.conf.urls import url
from . import views
app_name = 'order_app'
urlpatterns = [
url(r'^$', views.OrderListView.as_view(), name='list'),
url(r'^(?P<pk>\d+)/$', views.OrderDetailView.as_view(), name='detail'),
url(r'^create/$', views.OrderCreateView.as_view(), name='create'),
url(r'update/(?P<pk>\d+)/$', views.OrderUpdateView.as_view(), name='update'),
url(r'delete/(?P<pk>\d+)/$', views.OrderDeleteView.as_view(), name='delete'),
]
|
#!/usr/bin/env python
# enable debugging
import cgitb
cgitb.enable()
import cgi
from Db.Order import Order
from Db.OrderSearch import OrderSearch
#print "Content-Type: text/plain\r\n\r\n"
# Gather url paramters
form = cgi.FieldStorage()
sAction = form.getvalue('action')
if sAction == 'create' or sAction == 'edit' or sAction == 'remove':
sId = form.keys()[1][5:form.keys()[1].find('][')]
iId = int(sId)
sName = str(form.getvalue('data['+sId+'][name]'))
iCustTypeId = form.getvalue('data['+sId+'][custtypeid]')
sPriority = str(form.getvalue('data[' + sId + '][priority]'))
if sAction == 'get':
iId = str(form.getvalue("id"))
if sAction == None:
print "Content-Type: text/plain\r\n\r\n"
print "Missing action parameter. Valid values are get, create, edit, and remove"
elif sAction == "get":
order = Order(iId, '', '', '', '', '', '')
order.load()
print "Content-Type: application/json\r\n\r\n"
print order.toJson('data', False, True)
elif sAction == "create" or sAction == "edit":
order = Order(iId, sName, iCustTypeId, sPriority)
order.save()
print "Content-Type: application/json\r\n\r\n"
print order.toJson('data', True, True)
elif sAction == "remove":
order = Order(iId, '', '', '', '', '', '')
order.delete()
print "Content-Type: application/json\r\n\r\n"
print '{ "data" : [ ]}'
elif sAction == "search":
sStatus = form.getvalue('status')
orderSearch = OrderSearch(sStatus)
orderSearch.search()
custJson = orderSearch.toJson()
# insert options clause and closing brace
returnJson = custJson
print "Content-Type: application/json\r\n\r\n"
print returnJson
else:
print "Content-Type: text/plain\r\n\r\n"
print "Invalid action: " + sAction
|
from PyQt5.QtWidgets import QFileDialog
from PyQt5 import QtCore
import os
from pathlib import Path
def LoadParamTemplate(self):
'''
ask user to select a template file to fill everything with presaved parameters
'''
directory=os.path.join(str(Path(os.path.abspath(__file__)).parent.parent),'ExamplesTemplateFiles')
fname = QFileDialog.getOpenFileName(self, 'Choose a template file to load', directory,"Text files (*.txt)")
with open(fname[0],'r') as file:
for line in file:
if 'UserName' in line:
self.ui.lineEdit_UserName.setText(line.split('\t')[1][:-1])
elif 'SampleName' in line:
self.ui.lineEdit_SampleName.setText(line.split('\t')[1][:-1])
elif 'Comment' in line:
self.ui.lineEdit_Comment.setText(line.split('\t')[1][:-1])
elif 'Meas. type' in line:
index = self.ui.comboBox_MeasType.findText(line.split('\t')[1][:-1], QtCore.Qt.MatchFixedString)
if index >= 0:
self.ui.comboBox_MeasType.setCurrentIndex(index)
elif '#rep' in line:
self.ui.spinBox_RepNumb.setValue(float(line.split('\t')[1]))
elif 'DelayRep' in line:
self.ui.spinBox_RepDelay.setValue(float(line.split('\t')[1]))
elif 'Diode1sun' in line:
self.ui.doubleSpinBox_DiodeNominalCurrent.setValue(float(line.split('\t')[1]))
elif 'temperature' in line:
self.ui.doubleSpinBox_Temperature.setValue(float(line.split('\t')[1]))
elif 'assume1sun' in line:
self.ui.radioButton_Assume1sun.setChecked(eval(line.split('\t')[1]))
elif 'minvoltage' in line:
self.ui.doubleSpinBox_JVminvoltage.setValue(float(line.split('\t')[1]))
elif 'maxvoltage' in line:
self.ui.doubleSpinBox_JVmaxvoltage.setValue(float(line.split('\t')[1]))
elif 'JVstepsize' in line:
self.ui.doubleSpinBox_JVstepsize.setValue(float(line.split('\t')[1]))
elif 'currentlimit' in line:
self.ui.doubleSpinBox_JVcurrentlimit.setValue(float(line.split('\t')[1]))
elif 'integtime' in line:
self.ui.doubleSpinBox_JVintegrationtime.setValue(float(line.split('\t')[1]))
elif 'JVdelaypoints' in line:
self.ui.doubleSpinBox_JVdelaypoints.setValue(float(line.split('\t')[1]))
print(float(line.split('\t')[1]))
elif 'delayshutter' in line:
self.ui.doubleSpinBox_JVdelayshutter.setValue(float(line.split('\t')[1]))
elif 'scandirection' in line:
index = self.ui.comboBox_JVscandirection.findText(line.split('\t')[1][:-1], QtCore.Qt.MatchFixedString)
if index >= 0:
self.ui.comboBox_JVscandirection.setCurrentIndex(index)
elif 'polarity' in line:
if line.split('\t')[1]=='nip':
self.ui.radioButton_nip.setChecked(True)
self.ui.radioButton_pin.setChecked(False)
else:
self.ui.radioButton_nip.setChecked(False)
self.ui.radioButton_pin.setChecked(True)
elif 'startvoltage' in line:
self.ui.doubleSpinBox_MPPTstartvoltage.setValue(float(line.split('\t')[1]))
elif 'MPPTstepsize' in line:
self.ui.spinBox_MPPTstepsize.setValue(float(line.split('\t')[1]))
elif 'voltagelimit' in line:
self.ui.doubleSpinBox_MPPTvoltagelimit.setValue(float(line.split('\t')[1]))
elif 'delaypointsmpp' in line:
self.ui.doubleSpinBox_MPPTdelaypoints.setValue(float(line.split('\t')[1]))
elif 'lighton' in line:
self.ui.checkBox_MPPTlighton.setChecked(eval(line.split('\t')[1]))
elif 'keeplightafter' in line:
self.ui.checkBox_MPPTlightonafter.setChecked(eval(line.split('\t')[1]))
elif 'PixelA' in line:
self.ui.checkBox_pixA.setChecked(eval(line.split('\t')[1]))
elif 'PixelB' in line:
self.ui.checkBox_pixB.setChecked(eval(line.split('\t')[1]))
elif 'PixelC' in line:
self.ui.checkBox_pixC.setChecked(eval(line.split('\t')[1]))
elif 'PixelD' in line:
self.ui.checkBox_pixD.setChecked(eval(line.split('\t')[1]))
elif 'PixelE' in line:
self.ui.checkBox_pixE.setChecked(eval(line.split('\t')[1]))
elif 'PixelF' in line:
self.ui.checkBox_pixF.setChecked(eval(line.split('\t')[1]))
elif 'A-area' in line:
self.ui.doubleSpinBox_pixAarea.setValue(float(line.split('\t')[1]))
elif 'B-area' in line:
self.ui.doubleSpinBox_pixBarea.setValue(float(line.split('\t')[1]))
elif 'C-area' in line:
self.ui.doubleSpinBox_pixCarea.setValue(float(line.split('\t')[1]))
elif 'D-area' in line:
self.ui.doubleSpinBox_pixDarea.setValue(float(line.split('\t')[1]))
elif 'E-area' in line:
self.ui.doubleSpinBox_pixEarea.setValue(float(line.split('\t')[1]))
elif 'F-area' in line:
self.ui.doubleSpinBox_pixFarea.setValue(float(line.split('\t')[1]))
elif 'Allpix' in line:
self.ui.radioButton_pixAll.setChecked(eval(line.split('\t')[1]))
def SaveParamTemplate(self):
'''
ask user to select a template file to save all parameters
'''
if self.ui.checkBox_MPPTlighton.isChecked():
checkBox_MPPTlighton='True'
else:
checkBox_MPPTlighton='False'
if self.ui.checkBox_MPPTlightonafter.isChecked():
checkBox_MPPTlightonafter='True'
else:
checkBox_MPPTlightonafter='False'
if self.ui.checkBox_pixA.isChecked():
checkBox_pixA='True'
else:
checkBox_pixA='False'
if self.ui.checkBox_pixB.isChecked():
checkBox_pixB='True'
else:
checkBox_pixB='False'
if self.ui.checkBox_pixC.isChecked():
checkBox_pixC='True'
else:
checkBox_pixC='False'
if self.ui.checkBox_pixD.isChecked():
checkBox_pixD='True'
else:
checkBox_pixD='False'
if self.ui.checkBox_pixE.isChecked():
checkBox_pixE='True'
else:
checkBox_pixE='False'
if self.ui.checkBox_pixF.isChecked():
checkBox_pixF='True'
else:
checkBox_pixF='False'
if self.ui.radioButton_pixAll.isChecked():
radioButton_pixAll='True'
else:
radioButton_pixAll='False'
if self.ui.radioButton_Assume1sun.isChecked():
radioButton_Assume1sun='True'
else:
radioButton_Assume1sun='False'
if self.ui.radioButton_nip.isChecked():
polarity='nip'
else:
polarity='pin'
directory=os.path.join(str(Path(os.path.abspath(__file__)).parent.parent),'ExamplesTemplateFiles')
print(str(self.ui.doubleSpinBox_JVdelaypoints.value()))
fname = QFileDialog.getSaveFileName(self, 'Save file', directory,"Text files (*.txt)")
with open(fname[0],'w') as file:
text='UserName\t'+ str(self.ui.lineEdit_UserName.text())+'\n'+\
'SampleName\t'+ str(self.ui.lineEdit_SampleName.text())+'\n'+\
'Comment\t'+str(self.ui.lineEdit_Comment.text())+'\n'+\
'Meas. type\t'+ str(self.ui.comboBox_MeasType.currentText())+'\n'+\
'#rep\t'+ str(self.ui.spinBox_RepNumb.value())+'\n'+\
'DelayRep\t'+ str(self.ui.spinBox_RepDelay.value())+'\n'+\
'\n'+\
'Diode1sun\t'+ str(self.ui.doubleSpinBox_DiodeNominalCurrent.value())+'\n'+\
'temperature\t'+ str(self.ui.doubleSpinBox_Temperature.value())+'\n'+\
'assume1sun\t'+ radioButton_Assume1sun +'\n'+\
'\n'+\
'minvoltage\t'+ str(self.ui.doubleSpinBox_JVminvoltage.value())+'\n'+\
'maxvoltage\t'+ str(self.ui.doubleSpinBox_JVmaxvoltage.value())+'\n'+\
'JVstepsize\t'+ str(self.ui.doubleSpinBox_JVstepsize.value())+'\n'+\
'currentlimit\t'+ str(self.ui.doubleSpinBox_JVcurrentlimit.value())+'\n'+\
'integtime\t'+ str(self.ui.doubleSpinBox_JVintegrationtime.value())+'\n'+\
'JVdelaypoints\t'+ str(self.ui.doubleSpinBox_JVdelaypoints.value())+'\n'+\
'delayshutter\t'+ str(self.ui.doubleSpinBox_JVdelayshutter.value())+'\n'+\
'scandirection\t'+ str(self.ui.comboBox_JVscandirection.currentText())+'\n'+\
'polarity\t'+ polarity+'\n'+\
'\n'+\
'startvoltage\t'+ str(self.ui.doubleSpinBox_MPPTstartvoltage.value())+'\n'+\
'MPPTstepsize\t'+ str(self.ui.spinBox_MPPTstepsize.value())+'\n'+\
'voltagelimit\t'+ str(self.ui.doubleSpinBox_MPPTvoltagelimit.value())+'\n'+\
'delaypointsmpp\t'+ str(self.ui.doubleSpinBox_MPPTdelaypoints.value())+'\n'+\
'lighton\t'+checkBox_MPPTlighton +'\n'+\
'keeplightafter\t'+checkBox_MPPTlightonafter +'\n'+\
'\n'+\
'PixelA\t'+checkBox_pixA +'\n'+\
'PixelB\t'+checkBox_pixB +'\n'+\
'PixelC\t'+checkBox_pixC +'\n'+\
'PixelD\t'+checkBox_pixD +'\n'+\
'PixelE\t'+checkBox_pixE +'\n'+\
'PixelF\t'+checkBox_pixF +'\n'+\
'A-area\t'+ str(self.ui.doubleSpinBox_pixAarea.value())+'\n'+\
'B-area\t'+ str(self.ui.doubleSpinBox_pixBarea.value())+'\n'+\
'C-area\t'+ str(self.ui.doubleSpinBox_pixCarea.value())+'\n'+\
'D-area\t'+ str(self.ui.doubleSpinBox_pixDarea.value())+'\n'+\
'E-area\t'+ str(self.ui.doubleSpinBox_pixEarea.value())+'\n'+\
'F-area\t'+ str(self.ui.doubleSpinBox_pixFarea.value())+'\n'+\
'Allpix\t'+ radioButton_pixAll
file.write(text)
|
import pyttsx3
# initialize the engine
engine = pyttsx3.init()
def speak(words):
# set the voice and the rate to your wish
voices = engine.getProperty('voices')
female_voice_id = voices[1].id
voice_rate = 145
# set the properties that you like
engine.setProperty('voice', female_voice_id)
engine.setProperty('rate', voice_rate)
engine.say(words)
engine.runAndWait()
return 0
def main():
# Default Text.
speak("Hello,Nice to meet you.")
if __name__ == '__main__':
main() |
'''OpenGL extension OES.primitive_bounding_box
This module customises the behaviour of the
OpenGL.raw.GLES2.OES.primitive_bounding_box to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/primitive_bounding_box.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.primitive_bounding_box import *
from OpenGL.raw.GLES2.OES.primitive_bounding_box import _EXTENSION_NAME
def glInitPrimitiveBoundingBoxOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
# coding: utf-8
# In[58]:
with open('mutants.in') as f:
data = f.read().split('\n')
N = int(data[0])
if N is 0:
mutants = []
else:
mutants = [int(x) for x in data[1].split(' ')]
t = int(data[2])
colors = [int(x) for x in data[3].split(' ')]
# In[59]:
def binsearch_first(array, element):
answer = None
L = -1
R = len(array) - 1
while L+1 < R:
mid = (L+R)/2
if array[mid] < element:
L = mid
else:
R = mid
if (array != []) and (element == array[R]):
answer = R
return answer
# In[60]:
def binsearch_last(array, element):
answer = None
L = 0
R = len(array)
while L+1 < R:
mid = (L+R)/2
if array[mid] > element:
R = mid
else:
L = mid
if (array != []) and (element == array[L]):
answer = L
return answer
# In[61]:
with open('mutants.out', 'w') as F:
for color in colors:
last = binsearch_last(mutants, color)
first = binsearch_first(mutants, color)
if (last or first) is None:
F.write('0\n')
else:
F.write(str((last+1) - first) + '\n')
|
from django import forms
class login_client_form(forms.Form):
username = forms.CharField(max_length=100)
password = forms.CharField(widget=forms.PasswordInput, max_length=100)
|
import re
from collections import defaultdict
from typing import List, Dict
from app.application.models import User
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
def validate_email_format(email: str) -> (bool, str):
if not EMAIL_REGEX.match(email):
return False, 'Invalid Email format.'
return True, ''
def validate_email_already_exists(email: str):
if User.query.filter_by(email=email).first():
return False, 'Email already exists.'
return True, ''
def validate_field_required(val):
if val in [None, '']:
return False, 'This field is required'
return True, ''
def run_validators(validators: List[Dict]) -> (bool, dict):
"""
get a list of objects, each object should have the keys
(field_name: contains the field name, field_val: contains the actual value of the field
func: is the validator function will be called)
:param validators:
:return: errors dictionary in the format {field1: [err1, err2]}
"""
errors = defaultdict(lambda: [])
for validator_obj in validators:
is_valid, error_message = validator_obj['func'](validator_obj['field_val'])
if not is_valid:
errors[validator_obj['field_name']].append(error_message)
return errors
|
name = str(input("Привет, как тебя зовут? "))
print ("Приятно познакомиться ", name)
question_1 = int(input(name + " сколько тебе лет?"))
|
from flask_testing import TestCase
from flask import url_for
from app import app
class TestBase(TestCase):
def create_app(self):
return app
class TestResponse(TestBase):
def test_service3(self):
response = self.client.get(url_for("pick"))
self.assertIn(response.json, range(1,224)) |
import os, datetime
from werkzeug.utils import secure_filename
from flask import Flask, request, jsonify, render_template, send_from_directory
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager, create_access_token, get_jwt_identity, jwt_required
from models import db, User, Certificatw
from libs.utils import allowed_file
UPLOAD_FOLDER = "static"
ALLOWED_EXTENSIONS_IMGS = {'png', 'jpg', 'jpeg', 'gif'}
ALLOWED_EXTENSIONS_FILES = {'pdf', 'png', 'jpg', 'jpeg'}
app = Flask(__name__)
app.url_map.strict_slashes = False
app.config['DEBUG'] = True
app.config['ENV'] = 'development'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
app.config['JWT_SECRET_KEY'] = 'secret-key'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
db.init_app(app)
Migrate(app, db)
CORS(app)
jwt = JWTManager(app)
bcrypt = Bcrypt(app)
manager = Manager(app)
manager.add_command("db", MigrateCommand) # init migrate and upgrade
@app.route("/")
def main():
return render_template('index.html')
@app.route("/register", methods=['POST'])
def register():
email = request.json.get("email", None)
password = request.json.get("password", None)
if not email:
return jsonify({"msg": "Email is required"}), 400
if not password:
return jsonify({"msg": "Password is required"}), 400
user = User.query.filter_by(email=email).first()
if user:
return jsonify({"msg": "Email already exists"}), 400
user = User()
user.name = request.json.get("name", "")
user.email = email
user.password = bcrypt.generate_password_hash(password)
user.active = request.json.get("active", False)
user.save()
return jsonify({"success": "Register successfully!, please Log In"}), 200
@app.route("/login", methods=['POST'])
def login():
email = request.json.get("email", None)
password = request.json.get("password", None)
if not email:
return jsonify({"msg": "Email is required"}), 400
if not password:
return jsonify({"msg": "Password is required"}), 400
user = User.query.filter_by(email=email, active=True).first()
if not user:
return jsonify({"msg": "email/password are incorrects"}), 400
if not bcrypt.check_password_hash(user.password, password):
return jsonify({"msg": "email/password are incorrects"}), 400
expires = datetime.timedelta(days=3)
data = {
"access_token": create_access_token(identity=user.email, expires_delta=expires),
"user": user.serialize()
}
return jsonify({"success": "Log In succesfully!", "data": data}), 200
@app.route("/update-profile", methods=['POST'])
@jwt_required
def update_profile():
if 'avatar' not in request.files:
return jsonify({"msg": "Avatar is required"}), 400
file = request.files['avatar']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return jsonify({"msg": "Not Selected File"}), 400
if file and allowed_file(file.filename, ALLOWED_EXTENSIONS_IMGS):
email = get_jwt_identity()
user = User.query.filter_by(email=email).first()
filename = secure_filename(file.filename)
filename = "user_" + str(user.id) + "_" + filename
file.save(os.path.join(app.config['UPLOAD_FOLDER']+"/images", filename))
user.avatar = filename
user.update()
return jsonify({"success": "Profile updated successfully!"}), 200
#return redirect(url_for('uploaded_file', filename=filename))
return jsonify({"msg": "Image not allowed!"}), 400
@app.route('/images-profile/<filename>')
def image_profile(filename):
return send_from_directory(app.config['UPLOAD_FOLDER']+"/images",
filename)
if __name__ == "__main__":
manager.run() |
### DO NOT REMOVE THIS
from typing import List
### DO NOT REMOVE THIS
class Solution:
def optimalDivision(self, nums: List[int]) -> str:
if len(nums)==1:
return "{}".format(nums[0])
elif len(nums)==2:
return "{0}/{1}".format(nums[0],nums[1])
res=""
res+=str(nums[0])
res+="/"
res+="("
res+=str(nums[1])
for i in range(2,len(nums)):
res+="/"
res+=str(nums[i])
res+=")"
return res
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from unittest.mock import MagicMock, patch
import pytest
from databricks.sql.types import Row
from openlineage.client.facet import SchemaDatasetFacet, SchemaField, SqlJobFacet
from openlineage.client.run import Dataset
from airflow.models.connection import Connection
from airflow.providers.common.sql.hooks.sql import fetch_all_handler
from airflow.providers.snowflake.hooks.snowflake import SnowflakeHook
from airflow.providers.snowflake.operators.snowflake import SnowflakeOperator
DATE = "2017-04-20"
TASK_ID = "databricks-sql-operator"
DEFAULT_CONN_ID = "snowflake_default"
@pytest.mark.parametrize(
"sql, return_last, split_statement, hook_results, hook_descriptions, expected_results",
[
pytest.param(
"select * from dummy",
True,
True,
[Row(id=1, value="value1"), Row(id=2, value="value2")],
[[("id",), ("value",)]],
([{"id": 1, "value": "value1"}, {"id": 2, "value": "value2"}]),
id="Scalar: Single SQL statement, return_last, split statement",
),
pytest.param(
"select * from dummy;select * from dummy2",
True,
True,
[Row(id=1, value="value1"), Row(id=2, value="value2")],
[[("id",), ("value",)]],
([{"id": 1, "value": "value1"}, {"id": 2, "value": "value2"}]),
id="Scalar: Multiple SQL statements, return_last, split statement",
),
pytest.param(
"select * from dummy",
False,
False,
[Row(id=1, value="value1"), Row(id=2, value="value2")],
[[("id",), ("value",)]],
([{"id": 1, "value": "value1"}, {"id": 2, "value": "value2"}]),
id="Scalar: Single SQL statements, no return_last (doesn't matter), no split statement",
),
pytest.param(
"select * from dummy",
True,
False,
[Row(id=1, value="value1"), Row(id=2, value="value2")],
[[("id",), ("value",)]],
([{"id": 1, "value": "value1"}, {"id": 2, "value": "value2"}]),
id="Scalar: Single SQL statements, return_last (doesn't matter), no split statement",
),
pytest.param(
["select * from dummy"],
False,
False,
[[Row(id=1, value="value1"), Row(id=2, value="value2")]],
[[("id",), ("value",)]],
[([{"id": 1, "value": "value1"}, {"id": 2, "value": "value2"}])],
id="Non-Scalar: Single SQL statements in list, no return_last, no split statement",
),
pytest.param(
["select * from dummy", "select * from dummy2"],
False,
False,
[
[Row(id=1, value="value1"), Row(id=2, value="value2")],
[Row(id2=1, value2="value1"), Row(id2=2, value2="value2")],
],
[[("id",), ("value",)], [("id2",), ("value2",)]],
[
([{"id": 1, "value": "value1"}, {"id": 2, "value": "value2"}]),
([{"id2": 1, "value2": "value1"}, {"id2": 2, "value2": "value2"}]),
],
id="Non-Scalar: Multiple SQL statements in list, no return_last (no matter), no split statement",
),
pytest.param(
["select * from dummy", "select * from dummy2"],
True,
False,
[
[Row(id=1, value="value1"), Row(id=2, value="value2")],
[Row(id2=1, value2="value1"), Row(id2=2, value2="value2")],
],
[[("id",), ("value",)], [("id2",), ("value2",)]],
[
([{"id": 1, "value": "value1"}, {"id": 2, "value": "value2"}]),
([{"id2": 1, "value2": "value1"}, {"id2": 2, "value2": "value2"}]),
],
id="Non-Scalar: Multiple SQL statements in list, return_last (no matter), no split statement",
),
],
)
def test_exec_success(sql, return_last, split_statement, hook_results, hook_descriptions, expected_results):
"""
Test the execute function in case where SQL query was successful.
"""
with patch("airflow.providers.common.sql.operators.sql.BaseSQLOperator.get_db_hook") as get_db_hook_mock:
op = SnowflakeOperator(
task_id=TASK_ID,
sql=sql,
do_xcom_push=True,
return_last=return_last,
split_statements=split_statement,
)
dbapi_hook = MagicMock()
get_db_hook_mock.return_value = dbapi_hook
dbapi_hook.run.return_value = hook_results
dbapi_hook.descriptions = hook_descriptions
execute_results = op.execute(None)
assert execute_results == expected_results
dbapi_hook.run.assert_called_once_with(
sql=sql,
parameters=None,
handler=fetch_all_handler,
autocommit=False,
return_last=return_last,
split_statements=split_statement,
)
def test_execute_openlineage_events():
DB_NAME = "DATABASE"
DB_SCHEMA_NAME = "PUBLIC"
class SnowflakeHookForTests(SnowflakeHook):
get_conn = MagicMock(name="conn")
get_connection = MagicMock()
def get_first(self, *_):
return [f"{DB_NAME}.{DB_SCHEMA_NAME}"]
dbapi_hook = SnowflakeHookForTests()
class SnowflakeOperatorForTest(SnowflakeOperator):
def get_db_hook(self):
return dbapi_hook
sql = """CREATE TABLE IF NOT EXISTS popular_orders_day_of_week (
order_day_of_week VARCHAR(64) NOT NULL,
order_placed_on TIMESTAMP NOT NULL,
orders_placed INTEGER NOT NULL
);
FORGOT TO COMMENT"""
op = SnowflakeOperatorForTest(task_id="snowflake-operator", sql=sql)
rows = [
(DB_SCHEMA_NAME, "POPULAR_ORDERS_DAY_OF_WEEK", "ORDER_DAY_OF_WEEK", 1, "TEXT"),
(DB_SCHEMA_NAME, "POPULAR_ORDERS_DAY_OF_WEEK", "ORDER_PLACED_ON", 2, "TIMESTAMP_NTZ"),
(DB_SCHEMA_NAME, "POPULAR_ORDERS_DAY_OF_WEEK", "ORDERS_PLACED", 3, "NUMBER"),
]
dbapi_hook.get_connection.return_value = Connection(
conn_id="snowflake_default",
conn_type="snowflake",
extra={
"account": "test_account",
"region": "us-east",
"warehouse": "snow-warehouse",
"database": DB_NAME,
},
)
dbapi_hook.get_conn.return_value.cursor.return_value.fetchall.side_effect = [rows, []]
lineage = op.get_openlineage_facets_on_start()
assert len(lineage.inputs) == 0
assert lineage.outputs == [
Dataset(
namespace="snowflake://test_account.us-east.aws",
name=f"{DB_NAME}.{DB_SCHEMA_NAME}.POPULAR_ORDERS_DAY_OF_WEEK",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaField(name="ORDER_DAY_OF_WEEK", type="TEXT"),
SchemaField(name="ORDER_PLACED_ON", type="TIMESTAMP_NTZ"),
SchemaField(name="ORDERS_PLACED", type="NUMBER"),
]
)
},
)
]
assert lineage.job_facets == {"sql": SqlJobFacet(query=sql)}
assert lineage.run_facets["extractionError"].failedTasks == 1
|
import tensorflow as tf
def tf_expdec(t, t0, t1, v0, v1):
"""
Return `v0` until `e` reaches `e0`, then exponentially decay
to `v1` when `e` reaches `e1` and return `v1` thereafter.
Copyright (C) 2018 Lucas Beyer - http://lucasb.eyer.be =)
"""
return tf.train.piecewise_constant(
t, boundaries=[t0, t1],
values=[v0, tf.train.exponential_decay(v0, t-t0, t1-t0, v1/v0), v1])
|
from collections import defaultdict
from itertools import product
class Solution:
def pyramidTransition(self, bottom: str, allowed: List[str]) -> bool:
blocks = defaultdict(list)
for x in allowed:
blocks[x[:2]].append(x[-1])
def explore(s):
if len(s) == 2: return s in blocks
for it in product(*[blocks[s[i:i+2]] for i in range(0, len(s)-1)]):
if explore("".join(it)): return True
return False
return explore(bottom)
|
print('ABC'.encode('ascii'))
print('中文'.encode('utf-8'))
print(b'ABC'.decode('ascii'))
print(b'\xe4\xb8\xad\xe6\x96\x87'.decode('utf-8',errors='ignore'))
len('jhfdsjkfkskk')#1111111111111111111111111111111
len('ghjhgjkhgjkbhjkl') |
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
import time
class BrowserInteraction():
def test(self):
binary = FirefoxBinary("C:\Program Files (x86)\Mozilla Firefox\Firefox.exe")
driver = webdriver.Firefox(firefox_binary= binary)
baseURL = "https://letskodeit.teachable.com/p/practice"
driver.get(baseURL)
#Maximize Window
driver.maximize_window()
print("Window is maximized")
#Open the URL
driver.get(baseURL)
#Current URL
currentURL = driver.current_url
print ("The Current URL is " + str(currentURL))
#Browser Refresh
driver.refresh()
print("Page is Refreshed")
#Browser Refresh
driver.get(currentURL)
#Open another URL
driver.get("https://www.google.com")
print ("Google Page")
#Browser Back
driver.back()
print ("Browser is Gone back by one step")
#Browser Forward
driver.forward()
print ("Browser is Forwarded")
#Page Source
pagesource = driver.page_source
print (pagesource)
#Browser Quit/Close
driver.close()
driver.quit()
ff = BrowserInteraction()
ff.test() |
class Solution(object):
def dailyTemperatures(self, temperatures):
"""
:type temperatures: List[int]
:rtype: List[int]
"""
arr = [-1 for _ in range(71)]
res = [0] * len(temperatures)
for i in range(len(temperatures)-1,-1,-1):
t = temperatures[i]
j = [x for x in arr[t-30+1:] if x > -1]
res[i] = 0 if len(j) == 0 else min(j)-i
arr[t-30] = i
return res
print Solution().dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73]) |
from typing import Iterable, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class WaveNetLayer(nn.Module):
def __init__(
self,
num_channels: int,
kernel_size: int,
dilation: int,
drop: float = 0.25,
leaky: bool = False,
):
super().__init__()
self.num_channels = num_channels
self.kernel_size = kernel_size
self.dilation = dilation
self.leaky = leaky
self.dilated_conv = nn.Conv1d(
in_channels=self.num_channels,
out_channels=self.num_channels,
kernel_size=self.kernel_size,
dilation=self.dilation,
padding=self.dilation,
)
self.conv_1x1 = nn.Conv1d(
in_channels=self.num_channels, out_channels=self.num_channels, kernel_size=1
)
self.drop = nn.Dropout(drop)
if self.leaky:
self.non_lin_func = F.leaky_relu
else:
self.non_lin_func = F.relu
def apply_non_lin(self, y: Tensor) -> Tensor:
return self.non_lin_func(y)
def forward(self, x: Tensor) -> Tensor:
"""
:param x: [B x num_channels x T]
:return: [B x num_channels x T]
"""
y = self.dilated_conv.forward(x)
y = self.apply_non_lin(y) # non-linearity
y = self.conv_1x1.forward(y)
y = self.drop.forward(y) # dropout
y += x # residual connection
return y
class NoFt(nn.Module):
def __init__(self, in_chnnels: int, out_dims: int, kernel_size: int = 1):
super().__init__()
self.in_chnnels = in_chnnels
self.out_dims = out_dims
self.kernel_size = kernel_size
self.last_conv = nn.Conv1d(
in_channels=self.in_chnnels,
out_channels=self.out_dims,
kernel_size=self.kernel_size,
)
def forward(self, x: Tensor) -> Tensor:
"""
:param x: [B x in_channels x T]
:return: [B x out_dims x T]
"""
return self.last_conv.forward(x)
class WaveNetBlock(nn.Module):
def __init__(
self,
in_channels: int,
stages: List[int] = (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024),
out_dims: int = 64,
kernel_size: int = 3,
pooling=True,
pooling_layers: Iterable[int] = (1, 2, 4, 8),
pooling_type: str = "max", # could be "max" and "sum"
dropout_rate=0.25,
leaky=False,
):
super().__init__()
self.in_channels = in_channels
self.num_stages = len(stages)
self.stages = stages
self.out_dims = out_dims
self.kernel_size = kernel_size
self.layers = []
self.pooling = pooling
self.pooling_type = pooling_type
self.pooling_layers = pooling_layers
self.dropout_rate = dropout_rate
self.leaky = leaky
if self.leaky:
self.non_lin_fun = F.leaky_relu
else:
self.non_lin_fun = F.relu
self.first_conv = nn.Conv1d(
in_channels=self.in_channels, out_channels=self.out_dims, kernel_size=1
)
self.last_conv = nn.Conv1d(
in_channels=self.out_dims, out_channels=self.out_dims, kernel_size=1
)
for i in range(self.num_stages):
stage = self.stages[i]
layer = WaveNetLayer(
self.out_dims,
kernel_size=self.kernel_size,
dilation=stage,
drop=self.dropout_rate,
leaky=self.leaky,
)
self.layers.append(layer)
self.add_module("l_{}".format(i), layer)
def forward(self, x: Tensor) -> Tensor:
"""
:param x: [B x in_channels x T]
:return: [B x out_dims x T]
"""
x = self.non_lin_fun(self.first_conv.forward(x))
for i, l in enumerate(self.layers):
x = l.forward(x)
if i in self.pooling_layers and self.pooling:
if self.pooling_type == "max":
x = F.max_pool1d(x, kernel_size=2)
else:
x = F.avg_pool1d(x, kernel_size=2)
x = x * 2
x = self.non_lin_fun(x)
x = self.last_conv.forward(x)
return x
class MSTCNPPFirstStage(nn.Module):
def __init__(
self, num_layers, num_f_maps, input_dim, output_dim, pooling_layers=(1, 2, 4, 8)
):
super().__init__()
self.num_layers = num_layers
self.conv_1x1_in = nn.Conv1d(input_dim, num_f_maps, 1)
self.conv_dilated_1 = nn.ModuleList(
(
nn.Conv1d(
num_f_maps,
num_f_maps,
3,
padding=2 ** (num_layers - 1 - i),
dilation=2 ** (num_layers - 1 - i),
)
for i in range(num_layers)
)
)
self.conv_dilated_2 = nn.ModuleList(
(
nn.Conv1d(num_f_maps, num_f_maps, 3, padding=2 ** i, dilation=2 ** i)
for i in range(num_layers)
)
)
self.conv_fusion = nn.ModuleList(
(nn.Conv1d(2 * num_f_maps, num_f_maps, 1) for i in range(num_layers))
)
self.dropout = nn.Dropout()
self.conv_out = nn.Conv1d(num_f_maps, output_dim, 1)
self.pooling_layers = pooling_layers
def forward(self, x):
"""
:param x: [B x in_channels x T]
:return: [B x out_dims x T]
"""
f = self.conv_1x1_in(x)
for i in range(self.num_layers):
f_in = f
f = self.conv_fusion[i](
torch.cat([self.conv_dilated_1[i](f), self.conv_dilated_2[i](f)], 1)
)
f = F.relu(f)
f = self.dropout(f)
f = f + f_in
if i in self.pooling_layers:
f = F.max_pool1d(f, kernel_size=2)
out = self.conv_out(f)
return out
if __name__ == "__main__":
inp = torch.rand((1, 64, 1024))
n1 = WaveNetBlock(
in_channels=64,
stages=[1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024],
out_dims=64,
pooling_layers=[1, 2],
)
n2 = MSTCNPPFirstStage(
num_layers=11,
num_f_maps=64,
input_dim=64,
output_dim=64,
pooling_layers=[1, 2],
)
n3 = NoFt(
in_chnnels=64,
out_dims=64,
kernel_size=1
)
print(inp.shape)
o1 = n1.forward(inp)
o2 = n2.forward(inp)
o3 = n3.forward(inp)
print(o1.shape, o2.shape, o3.shape)
|
def dfs(val):
if val not in elems:
return 0
else:
return
def find(x, parents):
while parents[x] != x:
x = parents[x]
return x
def union(x, y)
def main(nums):
distinctelems = set(nums)
g = {x: x+1 for x in nums if x+1 in distinctelems}
parents = {}
for el in nums:
if el - 1 in g:
parents[el] = el - 1
else:
parents[el] = el
distinctelems = set(nums)
visited = {}
max_cons = 0
print(f'{distinctelems=}')
for elem in nums:
if elem not in visited:
max_cons = max(
max_cons, dfs(elem, distinctelems, visited)
)
return max_cons
nums = [3, 4, 2]
print(main(nums)) |
from django.db import models
# Create your models here.
class Post(models.Model):
sno = models.AutoField(primary_key=True)
title = models.CharField(max_length=255)
content = models.TextField()
author = models.CharField(max_length=255)
# views = models.IntegerField(default=0)
slug = models.CharField(max_length=255)
timeStamp = models.DateTimeField(blank = True)
def __str__(self):
return self.title + ' by '+ self.author
|
"""change update to use timestamp
Revision ID: d0fd292e452
Revises: 1617b96530fc
Create Date: 2016-01-24 17:31:53.319131
"""
# revision identifiers, used by Alembic.
revision = 'd0fd292e452'
down_revision = '1617b96530fc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('update_log', 'datetime', type_=sa.types.DateTime(timezone=True))
def downgrade():
op.alter_column('update_log', 'datetime', type_=sa.types.DateTime(timezone=False))
|
# Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
import torch as th
TensorDict = Dict[Union[str, int], th.Tensor]
class DictRolloutBufferSamples():
def __init__(self, observations, actions, old_values, old_log_prob, advantages, returns, aux_angle, aux_angle_gt):
self.observations = observations
self.actions = actions
self.old_values = old_values
self.old_log_prob = old_log_prob
self.advantages = advantages
self.returns = returns
self.aux_angle = aux_angle
self.aux_angle_gt = aux_angle_gt
|
from django.db import models
from django.db.models.signals import pre_save
from django.urls import reverse
# Create your models here.
from ecommerce.utils import unique_slug_generator
class ProductManager(models.Manager):
def get_by_id(self,id):
qs=self.get_queryset().filter(id=id)
if qs.count () == 1:
return qs.first ()
return None
class Product(models.Model):
title=models.CharField(max_length=50)
slug =models.SlugField(blank=True,unique=True)
description=models.TextField(max_length=1000)
price=models.DecimalField(max_digits=10, decimal_places=2,default=0.0)
image=models.ImageField(upload_to="products/",null=True,blank=True)
featured=models.BooleanField(default=True)
active=models.BooleanField(default=True)
timestamp=models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
objects=ProductManager()
def get_absolute_url(self):
#return "/products/{slug}/".format(slug=self.slug)
return reverse("productsdetailfeatured",kwargs={"slug":self.slug} )
def product_presave(sender,instance,**args):
if not instance.slug:
instance.slug=unique_slug_generator(instance)
pre_save.connect(product_presave,sender=Product)
|
import sys
def convert_color(l):
term_reset = '\x1b[0m'
html_end_balise = "</span>"
for col, int in color_dict.items():
l = l.replace(f'\x1b[1;{int}m', f'<span style="color:{col};font-weight:bold;">')
l = l.replace(term_reset, html_end_balise)
return l
if __name__ == '__main__':
color_dict = {'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'purple': 35,
'teal': 36,
'white': 37}
aln_file = sys.argv[1]
html_file = aln_file[:-len(".aln")] + '.html'
html_header = """<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>stdin</title>
</head>
<body>
<pre>
"""
with open(html_file, 'w') as html_wr:
html_wr.write(html_header)
for l in open(aln_file):
html_wr.write(convert_color(l))
html_wr.write('</pre>')
#
|
from entities.account import Account
from daos.account_dao import AccountDAO
from unittest import TestCase
from daos.account_dao_local import AccountDaoLocal
from exceptions.resource_not_found import ResourceNotFound
from daos.account_dao_postgres import AccountDaoPostgres
from entities.customer import Customer
from daos.customer_dao import CustomerDAO
from daos.customer_dao_postgres import CustomerDaoPostgres
from daos.customer_dao_local import CustomerDaoLocal
account_dao: AccountDAO = AccountDaoPostgres()
#account_dao: AccountDAO = AccountDaoLocal()
customer_dao: CustomerDAO = CustomerDaoPostgres()
#customer_dao: CustomerDAO = CustomerDaoLocal()
# An entity that has not been saved should have an id of 0
# This is the well established convention in every tech stack
# Many applications store data information as the unix epoch
# Seconds from Jan 1st midnight 1970
test_customer = Customer(0, "Mohammad", "Asif", "2222222", "hotmail.com", " 21 Wes Lane")
customer_dao.create_customer(test_customer)
test_account = Account(0, 100.0, "Saving", 0)
test_account1 = Account(-1, 100.0, "Saving", 3)
# Pytest runs tests in order from top to bottom
def test_create_account():
test_account.customer_id = test_customer.customer_id
account_dao.create_account(test_account)
assert test_account.account_number != 0
def test_get_account_by_id():
test_account = account_dao.create_account(test_account)
result = account_dao.get_account_by_aid(test_account.account_number)
# result = account_dao.get_account_by_id(test_account.account_number)
TestCase().assertDictEqual(result.as_jason_dic(), account_number.as_jason_dic())
def test_get_account_by_id_fail():
try:
account_dao.get_account_by_aid(-1)
assert False
except ResourceNotFound:
assert True
def test_delete_account_by_id():
account = account_dao.create_account(test_account)
assert account_dao.delete_account(account.account_number)
try:
account_dao.get_account_by_id(account.account_number)
assert False
except ResourceNotFound:
assert True
def test_update_account():
account = account_dao.create_account(test_account)
account.account_type = "CheckingSave"
updated_account = account_dao.update_account(test_account)
assert updated_account == account_dao.create_account(test_account)
def test_update_account_fail():
try:
account_dao.update_account(test_account1)
assert False
except ResourceNotFound:
assert True
|
import numpy as np
class UnionFind:
"""
Union-Find data structure.
"""
_size = 0
# Массив родителей элементов в лесу.
# Если родитель равен элементу, то элемент — корень дерева.
_parent = []
# Глубина узла в дереве.
# Используется для балансировки дерева.
_rank = []
def __init__(self, size):
self._size = size
self._parent = [-1] * size
self._rank = [-1] * size
def makeset(self, x):
"""
Создать новый класс с представителем x.
:param x: представитель нового класса
:return:
"""
self._parent[x] = x
self._rank[x] = 0
def union(self, x, y):
"""
Объединение двух классов с представителями x и y,
x назначается представителем нового класса.
:param x: представитель класса, в который вливается другой класс
:param y: представитель вливаемого класса
:return:
"""
root_x = self.find(x)
root_y = self.find(y)
# Eсли элементы принадлежат к одному классу, то всё хорошо
if root_x != root_y:
# Прикрепляем дерево с меньшей глубиной к дереву с большей глубиной
if self._rank[root_x] < self._rank[root_y]:
self._parent[root_x] = root_y
else:
self._parent[root_y] = root_x
# если глубина дерева увеличилась
if self._rank[root_x] == self._rank[root_y]:
self._rank[root_x] += 1
def find(self, x):
"""
Определить класс, к которому принадлежит элемент x.
Рекурсивно поднимаемся по дереву до корня.
:param x:
:return:
"""
return x if x == self._parent[x] else self.find(self._parent[x])
def __repr__(self):
return str(self._parent)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/21 上午10:32
# @Author : jlinka
# @File : complex_search.py
import pandas as pd
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from collections import defaultdict
import time
DIMENSION = 128
def kmeans_search_condition(cursor): # 使用条件的kmeans聚类查询
start = time.time()
c = []
classfi = cursor.count() // 5
if classfi == 0:
classfi == 1
cop = cursor.clone()
for i in range(cursor.count()):
# print(i.get('content'))
# c.append(jieba.lcut(i.get('content'), cut_all=False, HMM=True))
c.append(cursor[i].get('content'))
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(vectorizer.fit_transform(c))
word = vectorizer.get_feature_names()
weight = tfidf.toarray()
print(weight)
estimator = PCA(n_components=DIMENSION)
pca_x_train = estimator.fit_transform(weight)
print(pca_x_train)
kmeans = KMeans(n_clusters=classfi, random_state=0).fit(pca_x_train)
center = kmeans.cluster_centers_
df_center = pd.DataFrame(center)
labels = kmeans.labels_
print(list(labels))
print(c)
i = 0
kdata = []
s = list(labels)
d = defaultdict(list)
for k, va in [(v, i) for i, v in enumerate(s)]:
d[k].append([{"title": cop[va].get("title"), "createTime": cop[va].get("createTime"),
"source": cop[va].get("source"), "auther": cop[va].get("auther"),
"content": cop[va].get("content"), "_id": cop[va].get("_id"), "url": cop[va].get("url")}])
end = time.time()
print(end - start)
print(d)
return classfi, d
# ndict = {}
# for i in range(classfi):
# ndict[i] = []
# for j in d[i]:
# ndict[i].append([{"title": cop[j].get("title")}, {"createTime": cop[j].get("createTime")},
# {"source": cop[j].get("source")}, {"auther": cop[j].get("auther")},
# {"content": cop[j].get("content")}])
# kdata.append(ndict)
# while i < classfi:
# j = 0
# ndict = {}
# ndict[i] = []
# while j < cop.count():
# if labels[j] == i:
# ndict[i].append([{"title": cop[j].get("title")}, {"createTime": cop[j].get("createTime")},
# {"source": cop[j].get("source")}, {"auther": cop[j].get("auther")},
# {"content": cop[j].get("content")}])
# j += 1
# kdata.append(ndict)
# i += 1
print("test")
|
# -*- coding: utf-8 -*-
# Sample script which demonstrates how to work with custom DEM in maperipy.
# The script downloads a DEM tile of a small part of Alps from http://www.viewfinderpanoramas.org/.
# It then generates a hillshading using this DEM tile.
# Author: Igor Brejc
# License: public domain
from maperipy import *
from maperipy.relief import HillShadingProcessor
from maperipy.relief import IgorHillshader
from maperipy.relief import ReliefUtils
from maperipy.relief import CustomSrtm1Source
import os
import urllib
import zipfile
if Map.coord_srid != Srid.Wgs84LonLat:
raise AssertionError("Your map needs to use WGS84 SRS in order to be able to run this script.")
# The subdirectory where we will store the DEM data.
dem_dir = 'viewfinder'
if not os.path.exists(dem_dir):
os.makedirs(dem_dir)
# Download the tile if it's not already on the disk.
if not os.path.exists('viewfinder/N46E010.hgt'):
zip_name = 'viewfinder/N46E010.zip'
# Download a sample DEM tile from Viewfinder
print 'Downloading the DEM zip file...'
urllib.urlretrieve("http://www.viewfinderpanoramas.org/dem1/N46E010.zip", zip_name)
print 'Extracting the DEM tile from the zip file...'
myzip = zipfile.ZipFile(zip_name, 'r')
myzip.extractall(dem_dir)
myzip.close()
# Create a custom DEM source which uses the specific disk directory.
dem_source = CustomSrtm1Source(dem_dir)
# We define the area of interest.
relief_bbox = BoundingBox(Srid.Wgs84LonLat, 10.1, 46.2, 10.8, 46.8)
# Here we create a custom map layer for contours and hillshading.
custom_layer = Map.add_custom_layer()
# Fetch DEM for that area...
dem = dem_source.fetch_dem(relief_bbox)
# Make sure the DEM raster is not empty
if dem.is_empty:
raise AssertionError("The DEM is empty.")
# Now we create a hillshading bitmap for the same area.
# We use intensity of 2 and the blue color.
bitmap = HillShadingProcessor.shade(dem, relief_bbox, Map.proj_srid, 1, IgorHillshader(), Color("blue"), 2)
# Here we create a RasterSymbol for our bitmap...
bitmap_symbol = RasterSymbol(bitmap.srid)
# ... and add the bitmap to the symbol.
bitmap_symbol.add(bitmap)
# Finally we add our raster symbol to the custom layer.
custom_layer.add_symbol(bitmap_symbol)
# Zoom to the hillshading.
Map.zoom_area(custom_layer.bounding_box)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.