hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
3ca8a031e834da99072825ced0345d9b09b41442
62,535
py
Python
ActionFunctions.py
medmatix/DataSciCalc
0a887ab53824994c7bf7c76a20bd20c7507545f9
[ "MIT" ]
null
null
null
ActionFunctions.py
medmatix/DataSciCalc
0a887ab53824994c7bf7c76a20bd20c7507545f9
[ "MIT" ]
3
2018-09-17T16:41:49.000Z
2018-11-12T20:52:56.000Z
ActionFunctions.py
medmatix/DataSciCalc
0a887ab53824994c7bf7c76a20bd20c7507545f9
[ "MIT" ]
null
null
null
''' Module: Function Key action methods for DataSciCalc Calculator Created on Sep 8, 2018 updated Sep 21, 2018 13:46PM @version: DSC0.019 @license: MIT @author: David York @copyright: 2018 David A York ''' import tkinter as tk from tkinter import ttk from tkinter import filedialog, simpledialog from tkinter import messagebox as mBox import os import csv import time from datetime import datetime import math as mt import statistics as st import numpy as np from scipy import stats import pandas as pd import matplotlib.mlab as mlab import matplotlib.pyplot as plt from builtins import list from _hashlib import new from mbox import MessageBox from pandastable import Table, TableModel from tkinter import * from munging import Munging as mg #===================================================== # Class definitions #===================================================== class ActionFunctions(): ''' GUI element activation Function calls, actions called in response to button presses General Mathematical and Statistical helper Functions for callbacks etc. ''' ''' Constructor for ActionFunction selt tests ''' def __init__(self): print("initialized ActionFunctions") # module variables and constants # Register and variable cleanup functions ############################ ''' Key pad Implementation Functions and Methods ''' def do_clrx(self): # clear the entry in the current input register self.inxRegStr = '' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.history.insert(tk.END, 'CLEAR x Reg \n') self.history.see(tk.END) print('cleared x register') def do_clrL(self): # clear the entry in the current input register self.inLRegStr = '' self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, self.inLRegStr) self.history.insert(tk.END, 'CLEAR L Reg \n') self.history.see(tk.END) print('cleared List register') def do_clrAllRegr(self): # clear all the registers and variables for a new calculation stream self.inxRegStr = '' self.inLRegStr = '' self.inxStr.delete(0, tk.END) self.inLStr.delete(1.0, tk.END) self.x = 0.0 self.y = 0.0 self.z = 0.0 self.L = [0] self.resVar = 0.0 self.Lflag = False self.xFlag = False # log action to history self.history.insert(tk.END, 'CLEAR ALL \n') self.history.see(tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.inLStr.insert(tk.INSERT, self.inLRegStr) print('cleared all registers and variables') def do_enterx(self): self.y = self.x self.yValStr['text'] = str(self.y) try: self.inxRegStr = self.inxStr.get() self.x = float(self.inxStr.get()) except: self.castError() print("the x input can't be blank, a '0' is at least needed") ActionFunctions.do_clrx(self) # log action to history self.history.insert(tk.END, 'x ENTERED ' + str(self.x) + '\n') self.history.see(tk.END) self.xFlag = True self.inxStr.focus() print('current Register: ' + self.inxRegStr) print('current Variable: ' + str(self.x)) print("Entered x register into x variable and clear x register") print('y Variable: ' + str(self.y)) def do_enterL(self): tmpL = [] try: self.inLRegStr = self.inLStr.get(1.0, tk.END).split(',') print(self.inLRegStr) for i in self.inLRegStr: tmpL.append(float(i)) self.L = tmpL tmpL = [] except: self.listError() print("There is an error in the list entry") # log action to history self.history.insert(tk.END, 'LIST ENTERED ' + str(self.L) + '\n') self.history.see(tk.END) self.Lflag = True self.xyFunctKeys.grid_forget() self.listFunctKeys.grid() self.inLStr.focus() print('list L is: ' + str(self.L)) def do_appendx(self): tmpL = self.L self.y = self.x self.yValStr['text'] = str(self.y) try: self.inxRegStr = self.inxStr.get() tmpxs = self.inxRegStr print(tmpxs) tmpxf = float(tmpxs) print("x to l = {}".format(tmpxf)) tmpL.append(tmpxf) print(tmpL) print(self.L) except: self.castError() print("the x input can't be blank, a '0' is at least needed") return self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, self.L.__str__()) ActionFunctions.do_clrx(self) # log action to history self.history.insert(tk.END, 'x ENTERED ' + str(self.x) + '\n') self.history.see(tk.END) self.xFlag = True self.inxStr.focus() print('current Register: ' + self.inxRegStr) print('current Variable: ' + str(self.x)) print("Entered x register into x variable and clear x register") print('y Variable: ' + str(self.y)) # appending digits to input def append_digit0(self): self.inxRegStr = self.inxRegStr + '0' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit1(self): self.inxRegStr = self.inxRegStr + '1' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit2(self): self.inxRegStr = self.inxRegStr + '2' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit3(self): self.inxRegStr = self.inxRegStr + '3' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit4(self): self.inxRegStr = self.inxRegStr + '4' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit5(self): self.inxRegStr = self.inxRegStr + '5' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit6(self): self.inxRegStr = self.inxRegStr + '6' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit7(self): self.inxRegStr = self.inxRegStr + '7' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit8(self): self.inxRegStr = self.inxRegStr + '8' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_digit9(self): self.inxRegStr = self.inxRegStr + '9' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_minSgn(self): self.inxRegStr = self.inxRegStr + '-' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_dec(self): self.inxRegStr = self.inxRegStr + '.' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False def append_comma(self): # append a comma to register self.inxRegStr = self.inxRegStr + ',' self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, self.inxRegStr) self.xFlag = False # doing discrete XY operations and functions ####################### ''' Discrete Variable Functions ''' def do_add(self): # check for entered button if not self.xFlag: self.arithmeticError() return # add variables entered together self.resVar = self.y + self.x # log action to history self.history.see(tk.END) # clear register before transfering result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'SUM ' + str(self.resVar) + '\n') # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("adding") print("sum is {}".format(self.resVar)) def do_subt(self): # check for entered button if not self.xFlag: self.arithmeticError() return # subtract variables entered self.resVar = self.y - self.x # log action to history self.history.see(tk.END) # clear register before transfering result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'DIFF ' + str(self.resVar) + '\n') # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("subtracting") print("difference is {}".format(self.resVar)) def do_mult(self): # check for entered button if not self.xFlag: self.arithmeticError() return # multiply variables entered self.resVar = self.y * self.x # log action to history self.history.see(tk.END) # clear register before transfering result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'PROD ' + str(self.resVar) + '\n') # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("multiplying") print("product is {}".format(self.resVar)) def do_div(self): # check for entered button if not self.xFlag: self.arithmeticError() return # divide variables entered, second from first try: self.resVar = self.y / self.x except: self.improperInputError() return # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'DIVD ' + str(self.resVar) + '\n') # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("dividing") print("dividend is {}".format(self.resVar)) def do_switchxy(self): temp = self.y self.y = self.x self.x = temp self.yValStr['text'] = str(self.y) self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.history.insert(tk.END, 'EXCHG X & Y \n' + 'x = ' + str(self.x)+', y = ' + str(self.y) + '\n') print ('switch x and y') print ('x = {}, y = {}'.format(self.x, self.y)) def do_xpowy(self): # check for entered button if not self.xFlag: self.arithmeticError() return self.resVar = (self.y)**(self.x) self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'x^y ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() # do something else to (x) print('x^y') print("y power of x is {}".format(self.resVar)) def do_sqrt(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate sqrt(x) self.resVar = mt.sqrt(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'SQRT ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("square of x is {}".format(self.resVar)) print('sqrt') def do_invert(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate square of (x) self.resVar = 1/self.x # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'INVERSE ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("inverse of x is {}".format(self.resVar)) print('inverse') print('inverse of x') # calculate inverse (x) print('inverted x') def do_power2(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate square of (x) self.resVar = self.x**2 # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'POWER2 ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("square of x is {}".format(self.resVar)) print('sqrt') print('squared x') def do_sgn(self): # check for entered button if not self.xFlag: ActionFunctions.do_enterx(self) # do change of sign too (x) self.x = self.x * -1 # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.history.insert(tk.END, ' +/- ' + str(self.x) + '\n') self.history.see(tk.END) print("sign changed, x is now {}".format(self.x)) print('change of sign') def do_cos(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate cos(x) (x in radians!!! self.resVar = mt.cos(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'COS ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("cosine of x is {}".format(self.resVar)) print('cosine') def do_sin(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate sqrt(x) self.resVar = mt.sin(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'SIN ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print(" sine of x is {}".format(self.resVar)) print('sine') def do_tan(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate tangent(x) self.resVar = mt.tan(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'TAN ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("tangent of x is {}".format(self.resVar)) print('tangent') def do_acos(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate cos(x) (x in radians!!! self.resVar = mt.acos(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'COS ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("cosine of x is {}".format(self.resVar)) print('cosine') def do_asin(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate sqrt(x) self.resVar = mt.asin(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'SIN ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print(" sine of x is {}".format(self.resVar)) print('sine') def do_atan(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate tangent(x) self.resVar = mt.atan(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'TAN ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("tangent of x is {}".format(self.resVar)) print('tangent') def do_log10(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate base 10 log(x) try: self.resVar = mt.log10(self.x) except: self.improperInputError() return # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'LOG10 ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("log10 of x is {}".format(self.resVar)) print('LOG') def do_ln(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate natural log(x) try: self.resVar = mt.log(self.x) except: self.improperInputError() return # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'LN ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("ln of x is {}".format(self.resVar)) print('ln') def get_pi(self): # get constant pi self.x = mt.pi # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.history.insert(tk.END, ' PI ' + str(self.x) + '\n') self.history.see(tk.END) self.xFlag = True self.inxStr.focus() print("PI is {}".format(self.x)) print('pi ') def do_exp(self): # check for entered button if not self.xFlag: self.arithmeticError() return # calculate exp(x) self.resVar = mt.exp(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'EXP ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("exp of x is {}".format(self.resVar)) print('exp()') def do_factorial(self): print("factorial pending") def get_e(self): # get constant e self.x = mt.e # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.history.insert(tk.END, ' e is ' + str(self.x) + '\n') self.history.see(tk.END) self.xFlag = True self.inxStr.focus() print("e is {}".format(self.x)) print(' e ') def get_phi(self): # calculate PHI - golden ratio self.x = (1 + mt.sqrt(5))/2 # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.history.insert(tk.END, ' PHI is ' + str(self.x) + '\n') self.history.see(tk.END) self.xFlag = True self.inxStr.focus() print("golden ratio (PHI) is {}".format(self.x)) print(" phi ") def do_deg2rad(self): # check for entered button if not self.xFlag: self.arithmeticError() return # convert degrees in x to radians (x) self.resVar = mt.radians(self.x) # log action to history self.history.see(tk.END) # clear register before transferring result there self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.resVar)) self.history.insert(tk.END, 'DEG2RAD ' + str(self.resVar) + '\n') self.history.see(tk.END) # set up for chain operation self.x = self.resVar self.xFlag = True self.inxStr.focus() print("Deg to Radians of x is {}".format(self.resVar)) print('DEG2RAD') # doing L x operations and functions ------------------------------------------ def do_addL(self): # check for entered button if not self.xFlag: self.arithmeticError() return if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [i + self.x for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transfering result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'x ADDto L ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("adding x to L") print("L + x is {}".format(self.L)) def do_subtL(self): # check for entered button if not self.xFlag: self.arithmeticError() return if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [i - self.x for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'x SUBTfrom L ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("subtracting x from all L") print("differences are {}".format(self.L)) def do_multL(self): # check for entered button if not self.xFlag: self.arithmeticError() return if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [i * self.x for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'L MULTby x ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("multiplying L by") print("product is {}".format(self.L)) def do_divL(self): # check for entered button if not self.xFlag: self.arithmeticError() return if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [i / self.x for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'L DIVby x ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("dividing L by x") print("dividend is {}".format(self.L)) # List Functions ####################################### ''' List Mathematics FUnctions ''' def do_sumL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = sum(self.L) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) print("cleared") self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'SUM of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("sum L") print("sum is {}".format(self.x)) def do_prodL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = np.prod(self.L) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'PROD of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("PROD L") print("product is {}".format(self.x)) def do_sqrtL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.sqrt(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'L squared' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("L squared") print("L squared is {}".format(self.L)) def do_invertL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [1/i for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'INVERSE of L' + str(self.L) + '\n') self.history.see(tk.END) # set up for chain operation self.xFlag = True self.inxStr.focus() print("inverse of x is {}".format(self.resVar)) print('inverse') print('inverse of x') # calculate inverse (x) print('inverted x') # do something else to (x) print('x^y') print("y power of x is {}".format(self.resVar)) def do_Lpowx(self): # check for entered button if not self.xFlag: self.arithmeticError() return if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [i**self.x for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'L toPOWER x ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("L toPOWER x") print("result list is {}".format(self.L)) def do_Lpower2(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [i**2 for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'L squared' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("L squared") print("L squared is {}".format(self.L)) def do_sgnL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # do change of sign too (x) self.L = [i*(-1) for i in self.L] # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, ' +/- ' + str(self.L) + '\n') self.history.see(tk.END) print("sign changed, L is now {}".format(self.L)) print('change of sign all L') def do_cosL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.cos(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'cosine of L' + str(self.L) + '\n') self.history.see(tk.END) # set up for chain operation self.xFlag = True self.inxStr.focus() print("cosine of L is {}".format(self.L)) print('cosine') def do_sinL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.sin(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'sine of L' + str(self.L) + '\n') self.history.see(tk.END) # set up for chain operation self.xFlag = True self.inxStr.focus() print("sine of L is {}".format(self.L)) print('sine') def do_tanL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.tan(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'tangent of L' + str(self.L) + '\n') self.history.see(tk.END) # set up for chain operation self.xFlag = True self.inxStr.focus() print("tangent of L is {}".format(self.L)) print('tangent') def do_acosL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.acos(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'arcCosine of L' + str(self.L) + '\n') self.history.see(tk.END) # set up for chain operation self.xFlag = True self.inxStr.focus() print("arcCosine of L is {}".format(self.L)) print('arcCosine') def do_asinL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.asin(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'arcsine of L' + str(self.L) + '\n') self.history.see(tk.END) # set up for chain operation self.xFlag = True self.inxStr.focus() print("arcsine of L is {}".format(self.L)) print('arcsine') def do_atanL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.atan(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'arctan of L' + str(self.L) + '\n') self.history.see(tk.END) # set up for chain operation self.xFlag = True self.inxStr.focus() print("arctan of L is {}".format(self.L)) print('arctan') def do_log10L(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.log10(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'log10 of L' + str(self.L) + '\n') self.history.see(tk.END) # set up for chain operation self.xFlag = True self.inxStr.focus() print("log10 of L is {}".format(self.L)) print('Log10') def do_10powL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [10**i for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, '10 toPOWER of all L ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("10 toPOWER L") print("10 to power of L is {}".format(self.L)) def do_lnL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.log(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'ln of all L ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("ln L") print("ln L is {}".format(self.L)) def do_expL(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.exp(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) print("cleared") self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'EXP L ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("EXP L") print("exp of L is {}".format(self.L)) def do_xrootL(self): # check for entered button if not self.xFlag: self.arithmeticError() return # check for entered button if not self.xFlag: self.arithmeticError() return if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [(mt.exp(i)/self.x) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'xROOTL ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print("x root L") print("x root of L is {}".format(self.L)) def do_Ldeg2Lrad(self): # check for entered button if not self.Lflag: self.arithmeticError() return # add variables entered together newL = [mt.radians(i) for i in self.L] self.L = newL # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'DEG2RAD L ' + str(self.L) + '\n') # set up for chain operation self.x = 0 self.xFlag = True self.inxStr.focus() print('DEG2RAD') print("Deg to Radians of L is {}".format(self.L)) ''' List Statistics Functions ''' def do_LStats(self): self.listFunctKeys.grid_forget() self.xyFunctKeys.grid_forget() self.listStatsKeys.grid() def do_meanL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = st.mean(self.L) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inLStr.get(1.0,tk.END))) > 0: self.inLStr.delete(1.0,tk.END) if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) print("cleared") self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'MEAN of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("mean L") print("mean of L is {}".format(self.x)) def do_medianL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = st.median(self.L) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'MEDIAN of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("median L") print("median of L is {}".format(self.x)) def do_minL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = min(self.L) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'MIN of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("minimum L") print("minimum value of L is {}".format(self.x)) def do_maxL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = max(self.L) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'MAX of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("maximum L") print("maximum value of L is {}".format(self.x)) def do_pstdevL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = st.pstdev(self.L) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'PSD of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("pop StDev L") print("Pop StDev of L is {}".format(self.x)) def do_countL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = len(self.L) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, 'n of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("n L") print("n of L is {}".format(self.x)) def do_quartile1L(self): if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = np.percentile(self.L, 25) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, '1st QUART of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("1st QUART of L") print("1st QUART of L is {}".format(self.x)) def do_quartile3L(self): if not self.Lflag: self.arithmeticError() return # add variables entered together self.x = np.percentile(self.L, 75) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) self.inxStr.insert(tk.INSERT, str(self.x)) self.inLStr.insert(tk.INSERT, str(self.L)) self.history.insert(tk.END, '3rd QUART of L ' + str(self.x) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("3rd QUART of L") print("3rd QUART of L is {}".format(self.x)) def do_svTtestL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together popmean = 0 TP = stats.ttest_1samp(self.L, popmean) print(TP) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) print("cleared") self.inxStr.insert(tk.INSERT, str(TP)) self.history.insert(tk.END, 'SVTtest of L ' + str(TP) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("SVTtestan L") print("SVTtest of L is {}".format(TP)) def do_svZtestL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together popmean = 0 TP = stats.ttest_1samp(self.L, popmean) # log action to history self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) print("cleared") self.inxStr.insert(tk.INSERT, str(TP)) self.history.insert(tk.END, 'SVTtest of L ' + str(TP) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("SVTtestan L") print("SVTtest of L is {}".format(TP)) def do_CI95L(self): CI95 = stats.t.interval(0.95, len(self.L)-1, loc=np.mean(self.L), scale=stats.sem(self.L)) self.history.see(tk.END) # clear register before transferring result there if (len(self.inxStr.get())) > 0: self.inxStr.delete(0,tk.END) print("cleared") self.inxStr.insert(tk.INSERT, str(CI95)) self.history.insert(tk.END, 'SVTtest of L ' + str(CI95) + '\n') # set up for chain operation self.xFlag = True self.inxStr.focus() print("SVTtestan L") print("SVTtest of L is {}".format(CI95)) def do_histL(self): if not self.Lflag: self.arithmeticError() return # add variables entered together mu = st.mean(self.L) sigma = st.pstdev(self.L) # the histogram of the data n, bins, patches = plt.hist(self.L, 10, normed=1, facecolor='green', alpha=0.75) #best fit line y = mlab.normpdf( bins, mu, sigma) l = plt.plot(bins, y, 'r--', linewidth=1) plt.xlabel('Values') plt.ylabel('Probability') plt.title(r'Histogram of L:') plt.axis([min(self.L), max(self.L), 0, 0.5]) plt.grid(True) plt.show() def do_blank(self): # check for entered button if not self.xFlag: self.arithmeticError() return self.history.insert(tk.END, 'NOP \n') self.history.see(tk.END) self.underConstruction() # do something else to (x) print('unused key') # Note pad Functions ################################################### ''' Note Pad and History Implementation Functions ''' def do_note(self): # check for entered button # do something else to (x) self.underConstruction() print('note function') def do_clr_notes(self, scr_notes): # clear the calculation history scr_notes.delete(1.0,tk.END) #self.history.insert(tk.END, 'CLEAR HISTORY\n') scr_notes.see(tk.END) print('cleared the notes pad') def do_prt_notes(self, scr_notes): print("\n Notes:\n") print(scr_notes.get(1.0, tk.END) + '\n') # to Console self.history.insert(tk.END, 'PRINT NOTES \n') self.history.see(tk.END) self.notesToDialog() # and show in a dialog def do_log_notes(self): self.history.insert(tk.END, self.scr_notes.get(1.0, tk.END) + '\n') self.history.see(tk.END) def do_save_note(self): notesFile = 'CalcNotes' + '.note' notesFolder = './notes/' if not os.path.exists(notesFolder): os.makedirs(notesFolder, exist_ok = True) openedFile = open(notesFolder + notesFile,"w") openedFile.write(self.scr_notes.get(1.0, tk.END) + '\n') openedFile.close() self.history.insert(tk.END, 'SAVED NOTES \n') self.history.see(tk.END) print("notes save finished") def do_load_note(self): print('Unable to Load notes, not implemented yet') self.underConstruction() # history functions ################## def do_clr_history(self, history): # clear the calculation history history.delete(1.0,tk.END) #self.history.insert(tk.END, 'CLEAR HISTORY\n') history.see(tk.END) print('cleared the history pad') def do_prt_history(self, history): print("\n History:\n") print(self.history.get(1.0, tk.END) + '\n') # to Console self.history.insert(tk.END, 'PRINT NOTES \n') self.history.see(tk.END) self.historyToDialog() # and show in a dialog def do_log_history(self): self.history.insert(tk.END, self.scr_notes.get(1.0, tk.END) + '\n') self.history.see(tk.END) def do_save_history(self): historyFile = 'CalcHistory' + '.hist' historyFolder = './history/' if not os.path.exists(historyFolder): os.makedirs(historyFolder, exist_ok = True) openedFile = open(historyFolder + historyFile,"w") openedFile.write(self.history.get(1.0, tk.END) + '\n') openedFile.close() self.history.insert(tk.END, 'SAVED HISTORY \n') self.history.see(tk.END) print("history save finished") # Menubar functions ''' Menubar Function Implementations ''' def do_toggleList(self): # toggle function flag if not self.Lflag: self.Lflag = True else: self.Lflag = False #now show appropriate flag if self.Lflag: self.xyFunctKeys.grid_forget() self.listFunctKeys.grid() else: self.listFunctKeys.grid_forget() self.listStatsKeys.grid_forget() self.xyFunctKeys.grid() print('switch function keys') def do_setActiveDataset(self): root = self.win def mbox(msg, b1, b2, parent, cbo=False, cboList=[]): msgbox = MessageBox(msg, b1, b2, parent, cbo, cboList) msgbox.root.mainloop() msgbox.root.destroy() return msgbox.returning prompt = {} allowedItems = ['list','dataframe','series','array'] prompt['answer'] = mbox('Select dataset to use', ('OK', 'ok'), ('Cancel', 'cancel'), root, cbo=True, cboList=allowedItems) ans = prompt['answer'] print(ans) if (ans == 'array'): self.active_Dataset = "array" elif (ans == 'dataframe'): self.active_Dataset = "table dfT" elif (ans == 'series'): self.active_Dataset = "series S" elif (ans == 'list'): self.active_Dataset = "List L" else: # list # do stuff print("There is no such dataset in memory, have you loaded it yet?") def getNewList(self): # Ask the user to select a single file name. root=self.win # Build a list of tuples for each file type the file dialog should display my_filetypes = [('all files', '.*'), ('text files', '.txt'), ('comma separated', ".csv"), ('MS Excel ', ".xlt")] answer = filedialog.askopenfilename(parent=root, initialdir=os.getcwd(), title="Please select a file:", filetypes=my_filetypes) # reset list L to empty fh = open(answer, 'r') fline = fh.readline() fh.close() numVar = len(fline.split(',')) if (numVar != 1): mBox.showinfo('Variable Count in csv file', 'There are too many dimensions for a single list or series\nLoad as a table instead\nThe number of variables is: {}'.format(numVar)) print("too many dimensions for a single list or series") else: self.L = list() with open(answer, 'r') as csvfile: listreader = csv.reader(csvfile, delimiter=',') for row in listreader: print(row[0]) if row[0].isnumeric(): self.L.append(float(row[0])) self.Lflag = True def convertData(self, conversionData): ''' Method for interconversion of data, List, Series 1, Series 2, dataframe Allows moving data during manipulation and munging. Also, constructs a true bivariate data set. Finally allows new data entry in from keyboard to all variable typea ''' if (conversionData == "LtoS1"): # list into series data pass elif (conversionData == "S1toS2"): # series 1 into series 2 pass elif (conversionData == "S2toL"): # series 1 data into list data pass elif (conversionData == "S1S2todf"): # series data to bivariate data pass # or gradual dataframe enlargement else: #to Array or matrix pass def refresh_DSet(self): self.dataSetStr.grid_forget() currentDSet = self.active_Dataset self.dataSetStr = ttk.Label(self.statsdata, text=currentDSet) self.dataSetStr.grid(column=1,row=0) def loadData(self, active_Dataset): #Build a list of tuples for each file type the file dialog should display dataset = active_Dataset my_filetypes = [('all files', '.*'), ('text files', '.txt'), ('comma separated', ".csv"), ('MS Excel ', ".xlt")] answer = filedialog.askopenfilename(parent=self.win, initialdir=os.getcwd(), title="Please select a file:", filetypes=my_filetypes) with open(answer, 'r') as fh: fline = fh.readline() with open(answer, 'r') as csvfile: sniffer = csv.Sniffer() has_header = sniffer.has_header(csvfile.read(2048)) #check for dimension numVar = len(fline.split(',')) #check for string index #check for header if(has_header): print("header present") else: print("no header present") df = pd.read_csv(answer) if dataset == "table dfT": self.dfT = df elif dataset == "series S1": #check dimension #check for header #check for string index if (numVar == 1): self.S1 = df[df.columns[0]] else: mBox.showinfo('Variable Count in csv file', 'There are too many dimensions for a single list or series\nLoad as a table instead\nThe number of variables is: {}'.format(numVar)) elif dataset == "series S2": #check dimension #check for header #check for string index if (numVar == 1): self.S2 = df[df.columns[0]] else: mBox.showinfo('Variable Count in csv file', 'There are too many dimensions for a single list or series\nLoad as a table instead\nThe number of variables is: {}'.format(numVar)) elif dataset == "List L": #check dimension #check for header #check for string index if(numVar == 1): S = df[df.columns[0]] L = S.tolist() self.L = L else: mBox.showinfo('Variable Count in csv file', 'There are too many dimensions for a single list or series\nLoad as a table instead\nThe number of variables is: {}'.format(numVar)) elif dataset == "array": self.arry = df.values if(True): pass else: print("Can't put mixed datatype in an array") else: # if unsure always put it in a pandas dataframe self.dfT = pd.read_csv(answer)
34.246988
192
0.549916
7,907
62,535
4.33034
0.071709
0.039282
0.017523
0.042056
0.808207
0.781513
0.765975
0.749095
0.738551
0.723014
0
0.008707
0.329639
62,535
1,825
193
34.265753
0.808068
0.165667
0
0.639717
0
0.00314
0.083691
0
0.00157
0
0
0
0
1
0.076923
false
0.00471
0.016484
0
0.145212
0.139717
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
3caf4212a67abef07e822000856cb8387ace32d9
22
py
Python
scarlet_extensions/initialization/__init__.py
gcmshadow/scarlet_extensions
49b37166fd648c628fec7aa3adcbb77bc0a45ad4
[ "MIT" ]
5
2018-07-10T12:30:12.000Z
2022-03-30T18:04:17.000Z
scarlet_extensions/initialization/__init__.py
gcmshadow/scarlet_extensions
49b37166fd648c628fec7aa3adcbb77bc0a45ad4
[ "MIT" ]
3
2018-07-03T23:34:42.000Z
2018-07-04T00:52:20.000Z
knoxdata/__init__.py
knoxdata/knoxville-opendata-notebooks
5035b6dcbf02186ddac7bd53c89f592f877384d7
[ "MIT" ]
2
2018-06-29T19:46:30.000Z
2018-07-26T14:04:47.000Z
from .source import *
11
21
0.727273
3
22
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.181818
22
1
22
22
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a72b83610582a6be29d959fffae10843d6a7f86c
191
py
Python
rule_surrogate/server/__init__.py
myaooo/rule-surrogate
3f909062eef86419d86a9d8056521e9be519d537
[ "MIT" ]
null
null
null
rule_surrogate/server/__init__.py
myaooo/rule-surrogate
3f909062eef86419d86a9d8056521e9be519d537
[ "MIT" ]
null
null
null
rule_surrogate/server/__init__.py
myaooo/rule-surrogate
3f909062eef86419d86a9d8056521e9be519d537
[ "MIT" ]
null
null
null
from rule_surrogate.server.model_cache import get_model, available_models, get_model_data from rule_surrogate.server.app import app, HashableList from rule_surrogate.server.routes import *
31.833333
89
0.858639
28
191
5.571429
0.5
0.153846
0.326923
0.442308
0
0
0
0
0
0
0
0
0.089005
191
5
90
38.2
0.896552
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
5976ee8eded046b809420908fee9db01f74e6cba
195
py
Python
api/v1/session/message.py
anthill-gaming/exec
d3360d71f51ae7d8d5795926df6c904da3f31bc6
[ "MIT" ]
null
null
null
api/v1/session/message.py
anthill-gaming/exec
d3360d71f51ae7d8d5795926df6c904da3f31bc6
[ "MIT" ]
null
null
null
api/v1/session/message.py
anthill-gaming/exec
d3360d71f51ae7d8d5795926df6c904da3f31bc6
[ "MIT" ]
null
null
null
from anthill.platform.api.internal import connector from functools import partial from .base import session_api def message_request(): return partial(connector.internal_request, 'message')
24.375
57
0.815385
25
195
6.24
0.6
0
0
0
0
0
0
0
0
0
0
0
0.117949
195
7
58
27.857143
0.906977
0
0
0
0
0
0.035897
0
0
0
0
0
0
1
0.2
true
0
0.6
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
1
1
0
0
6
5994a6fb55a685997355dfe56d9e71977650a679
137
py
Python
pobarajpomosh/auth/__init__.py
MatejMecka/SpeakOut-Backend
5b2cbd35b60ab3aaa15921077173aa8de7aa60b8
[ "Apache-2.0" ]
2
2019-06-12T03:16:11.000Z
2020-05-11T22:45:22.000Z
pobarajpomosh/auth/__init__.py
MatejMecka/SpeakOut-Backend
5b2cbd35b60ab3aaa15921077173aa8de7aa60b8
[ "Apache-2.0" ]
null
null
null
pobarajpomosh/auth/__init__.py
MatejMecka/SpeakOut-Backend
5b2cbd35b60ab3aaa15921077173aa8de7aa60b8
[ "Apache-2.0" ]
null
null
null
from flask import Blueprint auth_bp = Blueprint('auth', __name__) from pobarajpomosh.auth import views import pobarajpomosh.auth.models
22.833333
37
0.824818
18
137
6
0.555556
0.240741
0
0
0
0
0
0
0
0
0
0
0.109489
137
5
38
27.4
0.885246
0
0
0
0
0
0.029197
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
59e6b5ce93faf7af1380b63367a7fc7af018c066
24,048
py
Python
models.py
xiaosha007/Saliency-retargeting
3b81d745e71caac470cb00e0b0dc0b45c605bffa
[ "MIT" ]
null
null
null
models.py
xiaosha007/Saliency-retargeting
3b81d745e71caac470cb00e0b0dc0b45c605bffa
[ "MIT" ]
null
null
null
models.py
xiaosha007/Saliency-retargeting
3b81d745e71caac470cb00e0b0dc0b45c605bffa
[ "MIT" ]
null
null
null
import tensorflow as tf import tensorflow_addons as tfa from custom_layers import ReflectionPadding2D, ResizeLayer, SqueezeExciteBlock class DSR_Base(tf.keras.Model): def __init__(self, **kwargs): super(DSR_Base, self).__init__(**kwargs) # Encoder) self.conv1_1 = tf.keras.layers.Conv2D(64, (3,3), padding='same', activation='relu', kernel_initializer='he_normal',input_shape=[192,256,4]) self.conv1_bn1 = tf.keras.layers.BatchNormalization() self.conv1_2 = tf.keras.layers.Conv2D(64, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.conv1_bn2 = tf.keras.layers.BatchNormalization() self.maxpool1 = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv2_1 = tf.keras.layers.Conv2D(128, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.conv2_bn1 = tf.keras.layers.BatchNormalization() self.conv2_2 = tf.keras.layers.Conv2D(128, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.conv2_bn2 = tf.keras.layers.BatchNormalization() self.maxpool2 = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv3_1 = tf.keras.layers.Conv2D(256, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.conv3_bn1 = tf.keras.layers.BatchNormalization() self.conv3_2 = tf.keras.layers.Conv2D(256, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.conv3_bn2 = tf.keras.layers.BatchNormalization() self.maxpool3 = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv4_1 = tf.keras.layers.Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.conv4_bn1 = tf.keras.layers.BatchNormalization() self.conv4_2 = tf.keras.layers.Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.conv4_bn2 = tf.keras.layers.BatchNormalization() self.maxpool4 = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) # Bottleneck self.diconv1 = tf.keras.layers.Conv2D(256, (3,3), dilation_rate=1, padding='same', activation='relu', kernel_initializer='he_normal') self.diconv1_bn1 = tf.keras.layers.BatchNormalization() self.diconv2 = tf.keras.layers.Conv2D(256, (3,3), dilation_rate=2, padding='same', activation='relu', kernel_initializer='he_normal') self.diconv2_bn1 = tf.keras.layers.BatchNormalization() self.diconv3 = tf.keras.layers.Conv2D(256, (3,3), dilation_rate=4, padding='same', activation='relu', kernel_initializer='he_normal') self.diconv3_bn1 = tf.keras.layers.BatchNormalization() self.diconv4 = tf.keras.layers.Conv2D(256, (3,3), dilation_rate=8, padding='same', activation='relu', kernel_initializer='he_normal') self.diconv4_bn1 = tf.keras.layers.BatchNormalization() self.diconcat = tf.keras.layers.Concatenate(axis=-1) # Decoder self.ups1 = tf.keras.layers.UpSampling2D((2,2)) self.deconv1_1 = tf.keras.layers.Conv2D(512, (2,2), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv1_bn1 = tf.keras.layers.BatchNormalization() self.deconv1_se1 = SqueezeExciteBlock() self.concat1 = tf.keras.layers.Concatenate(axis=-1) self.deconv1_2 = tf.keras.layers.Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv1_bn2 = tf.keras.layers.BatchNormalization() self.deconv1_3 = tf.keras.layers.Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv1_bn3 = tf.keras.layers.BatchNormalization() self.ups2 = tf.keras.layers.UpSampling2D((2,2)) self.deconv2_1 = tf.keras.layers.Conv2D(256, (2,2), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv2_bn1 = tf.keras.layers.BatchNormalization() self.deconv2_se1 = SqueezeExciteBlock() self.concat2 = tf.keras.layers.Concatenate(axis=-1) self.deconv2_2 = tf.keras.layers.Conv2D(256, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv2_bn2 = tf.keras.layers.BatchNormalization() self.deconv2_3 = tf.keras.layers.Conv2D(256, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv2_bn3 = tf.keras.layers.BatchNormalization() self.ups3 = tf.keras.layers.UpSampling2D((2,2)) self.deconv3_1 = tf.keras.layers.Conv2D(128, (2,2), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv3_bn1 = tf.keras.layers.BatchNormalization() self.deconv3_se1 = SqueezeExciteBlock() self.concat3 = tf.keras.layers.Concatenate(axis=-1) self.deconv3_2 = tf.keras.layers.Conv2D(128, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv3_bn2 = tf.keras.layers.BatchNormalization() self.deconv3_3 = tf.keras.layers.Conv2D(128, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv3_bn3 = tf.keras.layers.BatchNormalization() self.ups4 = tf.keras.layers.UpSampling2D((2,2)) self.deconv4_1 = tf.keras.layers.Conv2D(64, (2,2), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv4_bn1 = tf.keras.layers.BatchNormalization() self.deconv4_se1 = SqueezeExciteBlock() self.concat4 = tf.keras.layers.Concatenate(axis=-1) self.deconv4_2 = tf.keras.layers.Conv2D(64, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv4_bn2 = tf.keras.layers.BatchNormalization() self.deconv4_3 = tf.keras.layers.Conv2D(64, (3,3), padding='same', activation='relu', kernel_initializer='he_normal') self.deconv4_bn3 = tf.keras.layers.BatchNormalization() self.outputs = tf.keras.layers.Conv2D(3, (1,1), activation='sigmoid') def call(self, inputs, training=False, **kwargs): # Encoder c1_1 = self.conv1_1(inputs) c1_bn1 = self.conv1_bn1(c1_1, training=training) c1_2 = self.conv1_2(c1_bn1) c1_bn2 = self.conv1_bn2(c1_2, training=training) mp1 = self.maxpool1(c1_bn2) c2_1 = self.conv2_1(mp1) c2_bn1 = self.conv2_bn1(c2_1, training=training) c2_2 = self.conv2_2(c2_bn1) c2_bn2 = self.conv2_bn2(c2_2, training=training) mp2 = self.maxpool2(c2_bn2) c3_1 = self.conv3_1(mp2) c3_bn1 = self.conv3_bn1(c3_1, training=training) c3_2 = self.conv3_2(c3_bn1) c3_bn2 = self.conv3_bn2(c3_2, training=training) mp3 = self.maxpool3(c3_bn2) c4_1 = self.conv4_1(mp3) c4_bn1 = self.conv4_bn1(c4_1, training=training) c4_2 = self.conv4_2(c4_bn1) c4_bn2 = self.conv4_bn2(c4_2, training=training) mp4 = self.maxpool4(c4_bn2) # BottleNeck bt1 = self.diconv1(mp4) bt1 = self.diconv1_bn1(bt1,training=training) bt2 = self.diconv2(mp4) bt2 = self.diconv2_bn1(bt2,training=training) bt3 = self.diconv3(mp4) bt3 = self.diconv3_bn1(bt3,training=training) bt4 = self.diconv4(mp4) bt4 = self.diconv4_bn1(bt4,training=training) btc = self.diconcat([bt1,bt2,bt3,bt4]) # Decoder x = self.ups1(btc) x = self.deconv1_1(x) x = self.deconv1_bn1(x, training=training) x = self.deconv1_se1(x) x = self.concat1([x,c4_1]) x = self.deconv1_2(x) x = self.deconv1_bn2(x, training=training) x = self.deconv1_3(x) x = self.deconv1_bn3(x, training=training) x = self.ups2(x) x = self.deconv2_1(x) x = self.deconv2_bn1(x, training=training) x = self.deconv2_se1(x) x = self.concat2([x,c3_1]) x = self.deconv2_2(x) x = self.deconv2_bn2(x, training=training) x = self.deconv2_3(x) x = self.deconv2_bn3(x, training=training) x = self.ups3(x) x = self.deconv3_1(x) x = self.deconv3_bn1(x, training=training) x = self.deconv3_se1(x) x = self.concat3([x,c2_1]) x = self.deconv3_2(x) x = self.deconv3_bn2(x, training=training) x = self.deconv3_3(x) x = self.deconv3_bn3(x, training=training) x = self.ups4(x) x = self.deconv4_1(x) x = self.deconv4_bn1(x, training=training) x = self.deconv4_se1(x) x = self.concat4([x,c1_1]) x = self.deconv4_2(x) x = self.deconv4_bn2(x, training=training) x = self.deconv4_3(x) x = self.deconv4_bn3(x, training=training) outputs = self.outputs(x) return outputs class DSR_Reflect(tf.keras.Model): def __init__(self, **kwargs): super(DSR_Reflect, self).__init__(**kwargs) # Encoder self.conv1_1 = tf.keras.layers.Conv2D(64, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.conv1_pd1 = ReflectionPadding2D() self.conv1_bn1 = tf.keras.layers.BatchNormalization() self.conv1_2 = tf.keras.layers.Conv2D(64, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.conv1_pd2 = ReflectionPadding2D() self.conv1_bn2 = tf.keras.layers.BatchNormalization() self.maxpool1 = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv2_1 = tf.keras.layers.Conv2D(128, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.conv2_pd1 = ReflectionPadding2D() self.conv2_bn1 = tf.keras.layers.BatchNormalization() self.conv2_2 = tf.keras.layers.Conv2D(128, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.conv2_pd2 = ReflectionPadding2D() self.conv2_bn2 = tf.keras.layers.BatchNormalization() self.maxpool2 = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv3_1 = tf.keras.layers.Conv2D(256, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.conv3_pd1 = ReflectionPadding2D() self.conv3_bn1 = tf.keras.layers.BatchNormalization() self.conv3_2 = tf.keras.layers.Conv2D(256, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.conv3_pd2 = ReflectionPadding2D() self.conv3_bn2 = tf.keras.layers.BatchNormalization() self.maxpool3 = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv4_1 = tf.keras.layers.Conv2D(512, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.conv4_pd1 = ReflectionPadding2D() self.conv4_bn1 = tf.keras.layers.BatchNormalization() self.conv4_2 = tf.keras.layers.Conv2D(512, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.conv4_pd2 = ReflectionPadding2D() self.conv4_bn2 = tf.keras.layers.BatchNormalization() self.maxpool4 = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) # Bottleneck self.diconv1 = tf.keras.layers.Conv2D(256, (3,3), dilation_rate=1, padding='valid', activation='relu', kernel_initializer='he_normal') self.diconv1_pd1 = ReflectionPadding2D() self.diconv1_bn1 = tf.keras.layers.BatchNormalization() self.diconv2 = tf.keras.layers.Conv2D(256, (3,3), dilation_rate=2, padding='valid', activation='relu', kernel_initializer='he_normal') self.diconv2_pd1 = ReflectionPadding2D((2,2)) self.diconv2_bn1 = tf.keras.layers.BatchNormalization() self.diconv3 = tf.keras.layers.Conv2D(256, (3,3), dilation_rate=4, padding='valid', activation='relu', kernel_initializer='he_normal') self.diconv3_pd1 = ReflectionPadding2D((4,4)) self.diconv3_bn1 = tf.keras.layers.BatchNormalization() self.diconv4 = tf.keras.layers.Conv2D(256, (3,3), dilation_rate=8, padding='valid', activation='relu', kernel_initializer='he_normal') self.diconv4_pd1 = ReflectionPadding2D((8,8)) self.diconv4_bn1 = tf.keras.layers.BatchNormalization() self.diconcat = tf.keras.layers.Concatenate(axis=-1) # Decoder self.ups1 = ResizeLayer() self.deconv1_1 = tf.keras.layers.Conv2D(512, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv1_pd1 = ReflectionPadding2D() self.deconv1_bn1 = tf.keras.layers.BatchNormalization() self.deconv1_se1 = SqueezeExciteBlock() self.concat1 = tf.keras.layers.Concatenate(axis=-1) self.deconv1_2 = tf.keras.layers.Conv2D(512, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv1_pd2 = ReflectionPadding2D() self.deconv1_bn2 = tf.keras.layers.BatchNormalization() self.deconv1_3 = tf.keras.layers.Conv2D(512, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv1_pd3 = ReflectionPadding2D() self.deconv1_bn3 = tf.keras.layers.BatchNormalization() self.ups2 = ResizeLayer() self.deconv2_1 = tf.keras.layers.Conv2D(256, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv2_pd1 = ReflectionPadding2D() self.deconv2_bn1 = tf.keras.layers.BatchNormalization() self.deconv2_se1 = SqueezeExciteBlock() self.concat2 = tf.keras.layers.Concatenate(axis=-1) self.deconv2_2 = tf.keras.layers.Conv2D(256, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv2_pd2 = ReflectionPadding2D() self.deconv2_bn2 = tf.keras.layers.BatchNormalization() self.deconv2_3 = tf.keras.layers.Conv2D(256, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv2_pd3 = ReflectionPadding2D() self.deconv2_bn3 = tf.keras.layers.BatchNormalization() self.ups3 = ResizeLayer() self.deconv3_1 = tf.keras.layers.Conv2D(128, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv3_pd1 = ReflectionPadding2D() self.deconv3_bn1 = tf.keras.layers.BatchNormalization() self.deconv3_se1 = SqueezeExciteBlock() self.concat3 = tf.keras.layers.Concatenate(axis=-1) self.deconv3_2 = tf.keras.layers.Conv2D(128, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv3_pd2 = ReflectionPadding2D() self.deconv3_bn2 = tf.keras.layers.BatchNormalization() self.deconv3_3 = tf.keras.layers.Conv2D(128, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv3_pd3 = ReflectionPadding2D() self.deconv3_bn3 = tf.keras.layers.BatchNormalization() self.ups4 = ResizeLayer() self.deconv4_1 = tf.keras.layers.Conv2D(64, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv4_pd1 = ReflectionPadding2D() self.deconv4_bn1 = tf.keras.layers.BatchNormalization() self.deconv4_se1 = SqueezeExciteBlock() self.concat4 = tf.keras.layers.Concatenate(axis=-1) self.deconv4_2 = tf.keras.layers.Conv2D(64, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv4_pd2 = ReflectionPadding2D() self.deconv4_bn2 = tf.keras.layers.BatchNormalization() self.deconv4_3 = tf.keras.layers.Conv2D(64, (3,3), padding='valid', activation='relu', kernel_initializer='he_normal') self.deconv4_pd3 = ReflectionPadding2D() self.deconv4_bn3 = tf.keras.layers.BatchNormalization() self.outputs = tf.keras.layers.Conv2D(3, (1,1), activation='sigmoid') def call(self, inputs, training=False, **kwargs): # Encoder c1_1 = self.conv1_1(inputs) c1_pd1 = self.conv1_pd1(c1_1) c1_bn1 = self.conv1_bn1(c1_pd1, training=training) c1_2 = self.conv1_2(c1_bn1) c1_pd2 = self.conv1_pd2(c1_2) c1_bn2 = self.conv1_bn2(c1_pd2, training=training) mp1 = self.maxpool1(c1_bn2) c2_1 = self.conv2_1(mp1) c2_pd1 = self.conv2_pd1(c2_1) c2_bn1 = self.conv2_bn1(c2_pd1, training=training) c2_2 = self.conv2_2(c2_bn1) c2_pd2 = self.conv2_pd2(c2_2) c2_bn2 = self.conv2_bn2(c2_pd2, training=training) mp2 = self.maxpool2(c2_bn2) c3_1 = self.conv3_1(mp2) c3_pd1 = self.conv3_pd1(c3_1) c3_bn1 = self.conv3_bn1(c3_pd1, training=training) c3_2 = self.conv3_2(c3_bn1) c3_pd2 = self.conv3_pd2(c3_2) c3_bn2 = self.conv3_bn2(c3_pd2, training=training) mp3 = self.maxpool3(c3_bn2) c4_1 = self.conv4_1(mp3) c4_pd1 = self.conv4_pd1(c4_1) c4_bn1 = self.conv4_bn1(c4_pd1, training=training) c4_2 = self.conv4_2(c4_bn1) c4_pd2 = self.conv4_pd2(c4_2) c4_bn2 = self.conv4_bn2(c4_pd2, training=training) mp4 = self.maxpool4(c4_bn2) # BottleNeck bt1 = self.diconv1(mp4) bt1 = self.diconv1_pd1(bt1) bt1 = self.diconv1_bn1(bt1) bt2 = self.diconv2(mp4) bt2 = self.diconv2_pd1(bt2) bt2 = self.diconv2_bn1(bt2) bt3 = self.diconv3(mp4) bt3 = self.diconv3_pd1(bt3) bt3 = self.diconv3_bn1(bt3) bt4 = self.diconv4(mp4) bt4 = self.diconv4_pd1(bt4) bt4 = self.diconv4_bn1(bt4) btc = self.diconcat([bt1,bt2,bt3,bt4]) # Decoder x = self.ups1(btc, c4_pd1.shape[1:-1]) x = self.deconv1_1(x) x = self.deconv1_pd1(x) x = self.deconv1_bn1(x, training=training) x = self.deconv1_se1(x) x = self.concat1([x,c4_pd1]) x = self.deconv1_2(x) x = self.deconv1_pd2(x) x = self.deconv1_bn2(x, training=training) x = self.deconv1_3(x) x = self.deconv1_pd3(x) x = self.deconv1_bn3(x, training=training) x = self.ups2(x, c3_pd1.shape[1:-1]) x = self.deconv2_1(x) x = self.deconv2_pd1(x) x = self.deconv2_bn1(x, training=training) x = self.deconv2_se1(x) x = self.concat2([x,c3_pd1]) x = self.deconv2_2(x) x = self.deconv2_pd2(x) x = self.deconv2_bn2(x, training=training) x = self.deconv2_3(x) x = self.deconv2_pd3(x) x = self.deconv2_bn3(x, training=training) x = self.ups3(x, c2_pd1.shape[1:-1]) x = self.deconv3_1(x) x = self.deconv3_pd1(x) x = self.deconv3_bn1(x, training=training) x = self.deconv3_se1(x) x = self.concat3([x,c2_pd1]) x = self.deconv3_2(x) x = self.deconv3_pd2(x) x = self.deconv3_bn2(x, training=training) x = self.deconv3_3(x) x = self.deconv3_pd3(x) x = self.deconv3_bn3(x, training=training) x = self.ups4(x, c1_pd1.shape[1:-1]) x = self.deconv4_1(x) x = self.deconv4_pd1(x) x = self.deconv4_bn1(x, training=training) x = self.deconv4_se1(x) x = self.concat4([x,c1_pd1]) x = self.deconv4_2(x) x = self.deconv4_pd2(x) x = self.deconv4_bn2(x, training=training) x = self.deconv4_3(x) x = self.deconv4_pd3(x) x = self.deconv4_bn3(x, training=training) outputs = self.outputs(x) return outputs class Discriminator(tf.keras.Model): def __init__(self, **kwargs): super(Discriminator, self).__init__(**kwargs) self.conv1 = tf.keras.layers.Conv2D(32, (3,3), padding='same', input_shape=[192,256,3]) self.conv1_lr = tf.keras.layers.LeakyReLU(0.2) self.conv1_bn = tf.keras.layers.BatchNormalization() self.conv1_mp = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv2 = tf.keras.layers.Conv2D(64, (3,3), padding='same') self.conv2_lr = tf.keras.layers.LeakyReLU(0.2) self.conv2_bn = tf.keras.layers.BatchNormalization() self.conv2_mp = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv3 = tf.keras.layers.Conv2D(64, (3,3), padding='same') self.conv3_lr = tf.keras.layers.LeakyReLU(0.2) self.conv3_bn = tf.keras.layers.BatchNormalization() self.conv3_mp = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv4 = tf.keras.layers.Conv2D(128, (3,3), padding='same') self.conv4_lr = tf.keras.layers.LeakyReLU(0.2) self.conv4_bn = tf.keras.layers.BatchNormalization() self.conv4_mp = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.flatten = tf.keras.layers.Flatten() self.dense1 = tf.keras.layers.Dense(100, activation="tanh") self.dense2 = tf.keras.layers.Dense(2, activation="tanh") self.outputs = tf.keras.layers.Dense(1, activation="sigmoid") def call(self, inputs, training=False, **kwargs): x = self.conv1(inputs) x = self.conv1_lr(x) x = self.conv1_bn(x,training=training) x = self.conv1_mp(x) x = self.conv2(x) x = self.conv2_lr(x) x = self.conv2_bn(x,training=training) x = self.conv2_mp(x) x = self.conv3(x) x = self.conv3_lr(x) x = self.conv3_bn(x,training=training) x = self.conv3_mp(x) x = self.conv4(x) x = self.conv4_lr(x) x = self.conv4_bn(x,training=training) x = self.conv4_mp(x) x = self.flatten(x) x = self.dense1(x) x = self.dense2(x) outputs = self.outputs(x) return outputs class Discriminator_wgan(tf.keras.Model): def __init__(self, **kwargs): super(Discriminator_wgan, self).__init__(**kwargs) self.conv1 = tf.keras.layers.Conv2D(32, (3,3), padding='same', input_shape=[192,256,3]) self.conv1_lr = tf.keras.layers.LeakyReLU(0.2) self.conv1_bn = tf.keras.layers.BatchNormalization() self.conv1_mp = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv2 = tf.keras.layers.Conv2D(64, (3,3), padding='same') self.conv2_lr = tf.keras.layers.LeakyReLU(0.2) self.conv2_bn = tf.keras.layers.BatchNormalization() self.conv2_mp = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv3 = tf.keras.layers.Conv2D(64, (3,3), padding='same') self.conv3_lr = tf.keras.layers.LeakyReLU(0.2) self.conv3_bn = tf.keras.layers.BatchNormalization() self.conv3_mp = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.conv4 = tf.keras.layers.Conv2D(64, (3,3), padding='same') self.conv4_lr = tf.keras.layers.LeakyReLU(0.2) self.conv4_bn = tf.keras.layers.BatchNormalization() self.conv4_mp = tf.keras.layers.MaxPooling2D(pool_size=(2,2)) self.flatten = tf.keras.layers.Flatten() self.dense1 = tf.keras.layers.Dense(100) self.dense2 = tf.keras.layers.Dense(2) self.outputs = tf.keras.layers.Dense(1) def call(self, inputs, training=False, **kwargs): x = self.conv1(inputs) x = self.conv1_lr(x) x = self.conv1_bn(x,training=training) x = self.conv1_mp(x) x = self.conv2(x) x = self.conv2_lr(x) x = self.conv2_bn(x,training=training) x = self.conv2_mp(x) x = self.conv3(x) x = self.conv3_lr(x) x = self.conv3_bn(x,training=training) x = self.conv3_mp(x) x = self.conv4(x) x = self.conv4_lr(x) x = self.conv4_bn(x,training=training) x = self.conv4_mp(x) x = self.flatten(x) x = self.dense1(x) x = self.dense2(x) outputs = self.outputs(x) return outputs
47.904382
149
0.642673
3,235
24,048
4.609583
0.038331
0.076985
0.139485
0.0739
0.925899
0.918924
0.896929
0.869367
0.832484
0.780378
0
0.071972
0.218272
24,048
501
150
48
0.721262
0.004491
0
0.607229
0
0
0.037656
0
0
0
0
0
0
1
0.019277
false
0
0.007229
0
0.045783
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
abb1c00839feac6bfac896eb4e4751dfdc6a37c8
25,839
py
Python
sdss_catl_utils/models/tests/test_download_manager.py
vcalderon2009/sdss_catl_utils
9bfa3ae062112535aca18967fb5896c29173e3b0
[ "BSD-3-Clause" ]
null
null
null
sdss_catl_utils/models/tests/test_download_manager.py
vcalderon2009/sdss_catl_utils
9bfa3ae062112535aca18967fb5896c29173e3b0
[ "BSD-3-Clause" ]
null
null
null
sdss_catl_utils/models/tests/test_download_manager.py
vcalderon2009/sdss_catl_utils
9bfa3ae062112535aca18967fb5896c29173e3b0
[ "BSD-3-Clause" ]
null
null
null
#! /usr/bin/env python # -*- coding: utf-8 -*- # Victor Calderon # Created : 2018-12-24 # Last Modified: 2018-12-24 # Vanderbilt University from __future__ import (absolute_import, division, print_function ) __author__ = ['Victor Calderon'] __copyright__ = ["Copyright 2018 Victor Calderon, 2018"] __email__ = ['victor.calderon@vanderbilt.edu'] __maintainer__ = ['Victor Calderon'] """ Set of test functions for the `download_manager` functions """ import numpy as np import pytest from sdss_catl_utils.models.catl_models import DownloadManager from sdss_catl_utils.custom_exceptions import SDSSCatlUtils_Error ## Functions #########-------------------------------------------------------------######### #########-------------------------------------------------------------######### #### ------------------- Test `DownloadManager` function - Types ----------- ## catl_kind_arr = ['data', 'mocks'] hod_n_arr = [1,2] halotype_arr = ['so', 'fof'] clf_method_arr = [1, 2, 3] clf_seed_arr = [1, 4] dv_arr = np.arange(0.5, 2.0, 1.) sample_arr = ['19', '20', '21'] type_am_arr = ['mr', 'mstar'] cosmo_choice_arr = ['Planck', 'LasDamas'] perf_opt_arr = [True, False] remove_files_arr = [True, False] environ_name_arr = ['Env1'] sigma_clf_c_arr = [0.1, 0.2, 0.3] @pytest.mark.parametrize('catl_kind', catl_kind_arr) @pytest.mark.parametrize('hod_n', hod_n_arr) @pytest.mark.parametrize('halotype', halotype_arr) @pytest.mark.parametrize('clf_method', clf_method_arr) @pytest.mark.parametrize('clf_seed', clf_seed_arr) @pytest.mark.parametrize('dv', dv_arr) @pytest.mark.parametrize('sigma_clf_c', sigma_clf_c_arr) @pytest.mark.parametrize('sample', sample_arr) @pytest.mark.parametrize('type_am', type_am_arr) @pytest.mark.parametrize('cosmo_choice', cosmo_choice_arr) @pytest.mark.parametrize('perf_opt', perf_opt_arr) @pytest.mark.parametrize('remove_files', remove_files_arr) @pytest.mark.parametrize('environ_name', environ_name_arr) def test_DownloadManager_inputs_types(catl_kind, hod_n, halotype, clf_method, clf_seed, dv, sample, type_am, cosmo_choice, perf_opt, remove_files, environ_name, sigma_clf_c): """ Checks the function `~sdss_catl_utils.mocks_manager.download_manager.DownloadManager` for input parameters. Parameters ------------ catl_kind : {``data``, ``mocks``} `str` Kind of catalogues to download. This variable is set to ``mocks`` by default. Options: - ``data``: Downloads the SDSS DR7 real catalogues. - ``mocks``: Downloads the synthetic catalogues of SDSS DR7. hod_n : `int`, optional Number of the HOD model to use. This value is set to `0` by default. halotype : {'so', 'fof'}, `str`, optional Type of dark matter definition to use. This value is set to ``so`` by default. Options: - ``so``: Spherical Overdensity halo definition. - ``fof``: Friends-of-Friends halo definition. clf_method : {1, 2, 3}, `int`, optional Method for assigning galaxy properties to mock galaxies. This variable dictates how galaxies are assigned luminosities or stellar masses based on their galaxy type and host halo's mass. This variable is set to ``1`` by default. Options: - ``1``: Independent assignment of (g-r) colour, sersic, and specific star formation rate (`logssfr`) - ``2``: (g-r) colour dictates active/passive designation and draws values independently. - ``3``: (g-r) colour dictates active/passive designation, and assigns other galaxy properties for that given galaxy. clf_seed : `int`, optional Value of the random seed used for the conditional luminosity function. This variable is set to ``1235`` default. dv : `float`, optional Value for the ``velocity bias`` parameter. It is the difference between the galaxy and matter velocity profiles. .. math:: dv = \\frac{v_{g} - v_{c}}{v_{m} - v_{c}} where :math:`v_g` is the galaxy's velocity; :math:`v_m`, the matter velocity. sigma_clf_c : `float`, optional Value of the scatter in log(L) for central galaxies, when being assigned during the `conditional luminosity function` (CLF). This variable is set to ``0.1417`` by default. sample : {'19', '20', '21'}, `str`, optional Luminosity of the SDSS volume-limited sample to analyze. This variable is set to ``'19'`` by default. Options: - ``'19'``: :math:`M_r = 19` volume-limited sample - ``'20'``: :math:`M_r = 20` volume-limited sample - ``'21'``: :math:`M_r = 21` volume-limited sample catl_type : {'mr', 'mstar'}, `str`, optional Type of Abundance matching used in the catalogue. This variable is set to ``'mr'`` by default. Options: - ``'mr'``: Luminosity-based abundance matching used - ``'mstar'``: Stellar-mass-based abundance matching used. cosmo_choice : { ``'LasDamas'``, ``'Planck'``} `str`, optional Choice of cosmology to use. This variable is set to ``LasDamas`` by default. Options: - ``LasDamas`` : Uses the cosmological parameters from the `LasDamas <http://lss.phy.vanderbilt.edu/lasdamas/simulations.html>`_ simulations. - ``Planck`` : Uses the Planck 2015 cosmology. perf_opt : `bool`, optional If `True`, it chooses to analyze the ``perfect`` version of the synthetic galaxy/group galaxy catalogues. Otherwise, it downloads the catalogues with group-finding errors included. This variable is set to ``False`` by default. environ_name : `str` Name of the environment variable to assign to ``outdir``. This variable is set to the default ``environ_name`` from `~sdss_catl_utils.mocks_manager.mocks_default` """ # Creating dictionary input_dict = { 'catl_kind': catl_kind, 'hod_n': hod_n, 'halotype': halotype, 'clf_method': clf_method, 'clf_seed': clf_seed, 'dv': dv, 'sigma_clf_c': sigma_clf_c, 'sample': sample, 'type_am': type_am, 'cosmo_choice': cosmo_choice, 'perf_opt': perf_opt, 'remove_files': remove_files, 'environ_name': environ_name} ## Running function obj_ii = DownloadManager(**input_dict) #### ------------- Test `DownloadManager` function - Error - Types --------------- ## input_arr_type = [\ ('data', 1, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 1, 0.1), ('data', 1, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, 'str', 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', 'str', True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, '19', 'mr', 123, True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, '19', 1000, 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, 1, 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 'test', '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 1, '1', 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', '1', 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 10, 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', '2', 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), (32, 1, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 'sig')] input_str_type = 'catl_kind, hod_n, halotype, clf_method, clf_seed, dv, sample, ' input_str_type += 'type_am, cosmo_choice, perf_opt, remove_files, environ_name, ' input_str_type += 'sigma_clf_c' @pytest.mark.parametrize(input_str_type, input_arr_type) def test_DownloadManager_inputs_err_type(catl_kind, hod_n, halotype, clf_method, clf_seed, dv, sample, type_am, cosmo_choice, perf_opt, remove_files, environ_name, sigma_clf_c): """ Checks the function `~sdss_catl_utils.mocks_manager.download_manager.DownloadManager` for input parameters. Parameters ------------ catl_kind : {``data``, ``mocks``} `str` Kind of catalogues to download. This variable is set to ``mocks`` by default. Options: - ``data``: Downloads the SDSS DR7 real catalogues. - ``mocks``: Downloads the synthetic catalogues of SDSS DR7. hod_n : `int`, optional Number of the HOD model to use. This value is set to `0` by default. halotype : {'so', 'fof'}, `str`, optional Type of dark matter definition to use. This value is set to ``so`` by default. Options: - ``so``: Spherical Overdensity halo definition. - ``fof``: Friends-of-Friends halo definition. clf_method : {1, 2, 3}, `int`, optional Method for assigning galaxy properties to mock galaxies. This variable dictates how galaxies are assigned luminosities or stellar masses based on their galaxy type and host halo's mass. This variable is set to ``1`` by default. Options: - ``1``: Independent assignment of (g-r) colour, sersic, and specific star formation rate (`logssfr`) - ``2``: (g-r) colour dictates active/passive designation and draws values independently. - ``3``: (g-r) colour dictates active/passive designation, and assigns other galaxy properties for that given galaxy. clf_seed : `int`, optional Value of the random seed used for the conditional luminosity function. This variable is set to ``1235`` default. dv : `float`, optional Value for the ``velocity bias`` parameter. It is the difference between the galaxy and matter velocity profiles. .. math:: dv = \\frac{v_{g} - v_{c}}{v_{m} - v_{c}} where :math:`v_g` is the galaxy's velocity; :math:`v_m`, the matter velocity. sigma_clf_c : `float`, optional Value of the scatter in log(L) for central galaxies, when being assigned during the `conditional luminosity function` (CLF). This variable is set to ``0.1417`` by default. sample : {'19', '20', '21'}, `str`, optional Luminosity of the SDSS volume-limited sample to analyze. This variable is set to ``'19'`` by default. Options: - ``'19'``: :math:`M_r = 19` volume-limited sample - ``'20'``: :math:`M_r = 20` volume-limited sample - ``'21'``: :math:`M_r = 21` volume-limited sample catl_type : {'mr', 'mstar'}, `str`, optional Type of Abundance matching used in the catalogue. This variable is set to ``'mr'`` by default. Options: - ``'mr'``: Luminosity-based abundance matching used - ``'mstar'``: Stellar-mass-based abundance matching used. cosmo_choice : { ``'LasDamas'``, ``'Planck'``} `str`, optional Choice of cosmology to use. This variable is set to ``LasDamas`` by default. Options: - ``LasDamas`` : Uses the cosmological parameters from the `LasDamas <http://lss.phy.vanderbilt.edu/lasdamas/simulations.html>`_ simulations. - ``Planck`` : Uses the Planck 2015 cosmology. perf_opt : `bool`, optional If `True`, it chooses to analyze the ``perfect`` version of the synthetic galaxy/group galaxy catalogues. Otherwise, it downloads the catalogues with group-finding errors included. This variable is set to ``False`` by default. environ_name : `str` Name of the environment variable to assign to ``outdir``. This variable is set to the default ``environ_name`` from `~sdss_catl_utils.mocks_manager.mocks_default` """ # Creating dictionary input_dict = { 'catl_kind': catl_kind, 'hod_n': hod_n, 'halotype': halotype, 'clf_method': clf_method, 'clf_seed': clf_seed, 'dv': dv, 'sigma_clf_c': sigma_clf_c, 'sample': sample, 'type_am': type_am, 'cosmo_choice': cosmo_choice, 'perf_opt': perf_opt, 'remove_files': remove_files, 'environ_name': environ_name} ## Running function with pytest.raises(TypeError): obj_ii = DownloadManager(**input_dict) #### ------------- Test `DownloadManager` function - Error - Values --------------- ## input_arr_vals = [\ ('data', 1, 'so', 1, 1, 0.6, '19', 'mr', 'LasDamas1', True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, '19', 'mr', 'Planck1', True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, '19', 'mr2', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, '19', 'mstar2', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, '191', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 1, 1, 0.6, 'a', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 0, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 5, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so', 4, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'so1', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 1, 'fof2', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 12, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data', 98, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('data_1', 1, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1), ('mocks2', 1, 'so', 1, 1, 0.6, '19', 'mr', 'Planck', True, True, 'a', 0.1)] input_str_vals = 'catl_kind, hod_n, halotype, clf_method, clf_seed, dv, sample, ' input_str_vals += 'type_am, cosmo_choice, perf_opt, remove_files, environ_name, ' input_str_vals += 'sigma_clf_c' @pytest.mark.parametrize(input_str_vals, input_arr_vals) def test_DownloadManager_inputs_err_vals(catl_kind, hod_n, halotype, clf_method, clf_seed, dv, sample, type_am, cosmo_choice, perf_opt, remove_files, environ_name, sigma_clf_c): """ Checks the function `~sdss_catl_utils.mocks_manager.download_manager.DownloadManager` for input parameters. Parameters ------------ catl_kind : {``data``, ``mocks``} `str` Kind of catalogues to download. This variable is set to ``mocks`` by default. Options: - ``data``: Downloads the SDSS DR7 real catalogues. - ``mocks``: Downloads the synthetic catalogues of SDSS DR7. hod_n : `int`, optional Number of the HOD model to use. This value is set to `0` by default. halotype : {'so', 'fof'}, `str`, optional Type of dark matter definition to use. This value is set to ``so`` by default. Options: - ``so``: Spherical Overdensity halo definition. - ``fof``: Friends-of-Friends halo definition. clf_method : {1, 2, 3}, `int`, optional Method for assigning galaxy properties to mock galaxies. This variable dictates how galaxies are assigned luminosities or stellar masses based on their galaxy type and host halo's mass. This variable is set to ``1`` by default. Options: - ``1``: Independent assignment of (g-r) colour, sersic, and specific star formation rate (`logssfr`) - ``2``: (g-r) colour dictates active/passive designation and draws values independently. - ``3``: (g-r) colour dictates active/passive designation, and assigns other galaxy properties for that given galaxy. clf_seed : `int`, optional Value of the random seed used for the conditional luminosity function. This variable is set to ``1235`` default. dv : `float`, optional Value for the ``velocity bias`` parameter. It is the difference between the galaxy and matter velocity profiles. .. math:: dv = \\frac{v_{g} - v_{c}}{v_{m} - v_{c}} where :math:`v_g` is the galaxy's velocity; :math:`v_m`, the matter velocity. sigma_clf_c : `float`, optional Value of the scatter in log(L) for central galaxies, when being assigned during the `conditional luminosity function` (CLF). This variable is set to ``0.1417`` by default. sample : {'19', '20', '21'}, `str`, optional Luminosity of the SDSS volume-limited sample to analyze. This variable is set to ``'19'`` by default. Options: - ``'19'``: :math:`M_r = 19` volume-limited sample - ``'20'``: :math:`M_r = 20` volume-limited sample - ``'21'``: :math:`M_r = 21` volume-limited sample catl_type : {'mr', 'mstar'}, `str`, optional Type of Abundance matching used in the catalogue. This variable is set to ``'mr'`` by default. Options: - ``'mr'``: Luminosity-based abundance matching used - ``'mstar'``: Stellar-mass-based abundance matching used. cosmo_choice : { ``'LasDamas'``, ``'Planck'``} `str`, optional Choice of cosmology to use. This variable is set to ``LasDamas`` by default. Options: - ``LasDamas`` : Uses the cosmological parameters from the `LasDamas <http://lss.phy.vanderbilt.edu/lasdamas/simulations.html>`_ simulations. - ``Planck`` : Uses the Planck 2015 cosmology. perf_opt : `bool`, optional If `True`, it chooses to analyze the ``perfect`` version of the synthetic galaxy/group galaxy catalogues. Otherwise, it downloads the catalogues with group-finding errors included. This variable is set to ``False`` by default. environ_name : `str` Name of the environment variable to assign to ``outdir``. This variable is set to the default ``environ_name`` from `~sdss_catl_utils.mocks_manager.mocks_default` """ # Creating dictionary input_dict = { 'catl_kind': catl_kind, 'hod_n': hod_n, 'halotype': halotype, 'clf_method': clf_method, 'clf_seed': clf_seed, 'dv': dv, 'sigma_clf_c': sigma_clf_c, 'sample': sample, 'type_am': type_am, 'cosmo_choice': cosmo_choice, 'perf_opt': perf_opt, 'remove_files': remove_files, 'environ_name': environ_name} ## Running function with pytest.raises(ValueError): obj_ii = DownloadManager(**input_dict) #### ------------ Test `DownloadManager` function - _catl_prefix ----------- ## prefix_arr = [\ ('data', 0, 'fof', 1, 12, 0.1417, 1.0, '19', 'mr', False, 'memb', 'data/mr/Mr19/member_galaxy_catalogues'), ('data', 0, 'fof', 1, 12, 0.1, 1.0, '20', 'mr', False, 'memb', 'data/mr/Mr20/member_galaxy_catalogues'), ('data', 0, 'fof', 1, 12, 0.1, 1.0, '21', 'mr', False, 'memb', 'data/mr/Mr21/member_galaxy_catalogues'), ('mocks', 0, 'fof', 1, 12, 0.25, 1.0, '19', 'mr', False, 'memb', 'mocks/halos_fof/dv_1.0/hod_model_0/clf_seed_12/clf_method_1/sigma_c_0.25/mr/Mr19/member_galaxy_catalogues'), ('mocks', 0, 'so', 1, 12, 0.1, 1.0, '19', 'mr', False, 'memb', 'mocks/halos_so/dv_1.0/hod_model_0/clf_seed_12/clf_method_1/sigma_c_0.1/mr/Mr19/member_galaxy_catalogues'), ('mocks', 0, 'so', 1, 12, 0.1, 1.0, '19', 'mr', True, 'memb', 'mocks/halos_so/dv_1.0/hod_model_0/clf_seed_12/clf_method_1/sigma_c_0.1/mr/Mr19/perfect_member_galaxy_catalogues'), ('mocks', 0, 'so', 1, 400, 0.1, 1.0, '19', 'mr', True, 'memb', 'mocks/halos_so/dv_1.0/hod_model_0/clf_seed_400/clf_method_1/sigma_c_0.1/mr/Mr19/perfect_member_galaxy_catalogues'), ('mocks', 0, 'so', 1, 400, 0.1, 1.0, '19', 'mr', False, 'group', 'mocks/halos_so/dv_1.0/hod_model_0/clf_seed_400/clf_method_1/sigma_c_0.1/mr/Mr19/group_galaxy_catalogues'), ('mocks', 1, 'so', 1, 400, 0.1, 1.05, '21', 'mstar', False, 'group', 'mocks/halos_so/dv_1.05/hod_model_1/clf_seed_400/clf_method_1/sigma_c_0.1/mstar/Mr21/group_galaxy_catalogues'), ('mocks', 1, 'so', 1, 400, 0.1, 1.05, '21', 'mstar', False, 'gal', 'mocks/halos_so/dv_1.05/hod_model_1/clf_seed_400/clf_method_1/sigma_c_0.1/mstar/Mr21/galaxy_catalogues'), ('mocks', 1, 'so', 1, 400, 0.1, 1.05, '21', 'mstar', True, 'gal', 'mocks/halos_so/dv_1.05/hod_model_1/clf_seed_400/clf_method_1/sigma_c_0.1/mstar/Mr21/galaxy_catalogues'), ('mocks', 1, 'so', 1, 400, 0.1, 1.05, '21', 'mstar', True, 'group', 'mocks/halos_so/dv_1.05/hod_model_1/clf_seed_400/clf_method_1/sigma_c_0.1/mstar/Mr21/perfect_group_galaxy_catalogues'), ('mocks', 1, 'so', 1, 400, 0.1, 1.25, '20', 'mstar', True, 'memb', 'mocks/halos_so/dv_1.25/hod_model_1/clf_seed_400/clf_method_1/sigma_c_0.1/mstar/Mr20/perfect_member_galaxy_catalogues') ] prefix_str = 'catl_kind, hod_n, halotype, clf_method, clf_seed, sigma_clf_c,' prefix_str += 'dv, sample, type_am, perf_opt, catl_type, expected' @pytest.mark.parametrize(prefix_str, prefix_arr) def test_DownloadManager_catl_prefix(catl_kind, hod_n, halotype, clf_method, clf_seed, sigma_clf_c, dv, sample, type_am, perf_opt, catl_type, expected): """ Checks the function `~sdss_catl_utils.mocks_manager.download_manager.DownloadManager` for catalogue prefix strings. Parameters ------------ catl_kind : {``data``, ``mocks``} `str` Kind of catalogues to download. This variable is set to ``mocks`` by default. Options: - ``data``: Downloads the SDSS DR7 real catalogues. - ``mocks``: Downloads the synthetic catalogues of SDSS DR7. hod_n : `int`, optional Number of the HOD model to use. This value is set to `0` by default. halotype : {'so', 'fof'}, `str`, optional Type of dark matter definition to use. This value is set to ``so`` by default. Options: - ``so``: Spherical Overdensity halo definition. - ``fof``: Friends-of-Friends halo definition. clf_method : {1, 2, 3}, `int`, optional Method for assigning galaxy properties to mock galaxies. This variable dictates how galaxies are assigned luminosities or stellar masses based on their galaxy type and host halo's mass. This variable is set to ``1`` by default. Options: - ``1``: Independent assignment of (g-r) colour, sersic, and specific star formation rate (`logssfr`) - ``2``: (g-r) colour dictates active/passive designation and draws values independently. - ``3``: (g-r) colour dictates active/passive designation, and assigns other galaxy properties for that given galaxy. clf_seed : `int`, optional Value of the random seed used for the conditional luminosity function. This variable is set to ``1235`` default. dv : `float`, optional Value for the ``velocity bias`` parameter. It is the difference between the galaxy and matter velocity profiles. .. math:: dv = \\frac{v_{g} - v_{c}}{v_{m} - v_{c}} where :math:`v_g` is the galaxy's velocity; :math:`v_m`, the matter velocity. sigma_clf_c : `float`, optional Value of the scatter in log(L) for central galaxies, when being assigned during the `conditional luminosity function` (CLF). This variable is set to ``0.1417`` by default. sample : {'19', '20', '21'}, `str`, optional Luminosity of the SDSS volume-limited sample to analyze. This variable is set to ``'19'`` by default. Options: - ``'19'``: :math:`M_r = 19` volume-limited sample - ``'20'``: :math:`M_r = 20` volume-limited sample - ``'21'``: :math:`M_r = 21` volume-limited sample type_am : {'mr', 'mstar'}, `str`, optional Type of Abundance matching used in the catalogue. This variable is set to ``'mr'`` by default. Options: - ``'mr'``: Luminosity-based abundance matching used - ``'mstar'``: Stellar-mass-based abundance matching used. perf_opt : `bool`, optional If `True`, it chooses to analyze the ``perfect`` version of the synthetic galaxy/group galaxy catalogues. Otherwise, it downloads the catalogues with group-finding errors included. This variable is set to ``False`` by default. """ # Creating dictionary input_dict = { 'catl_kind': catl_kind, 'hod_n': hod_n, 'halotype': halotype, 'clf_method': clf_method, 'clf_seed': clf_seed, 'dv': dv, 'sigma_clf_c': sigma_clf_c, 'sample': sample, 'type_am': type_am, 'perf_opt': perf_opt} ## Initializing object download_obj = DownloadManager(**input_dict) # Catalogue prefix download_prefix = download_obj._catl_prefix(catl_type=catl_type, catl_kind=catl_kind, perf_opt=perf_opt) # Checking that strings are equal assert(download_prefix == expected)
45.81383
191
0.59356
3,480
25,839
4.249425
0.077299
0.006762
0.019881
0.039086
0.877739
0.864552
0.861509
0.859278
0.848323
0.834055
0
0.039019
0.255118
25,839
563
192
45.895204
0.729308
0.556755
0
0.354037
0
0.062112
0.288179
0.119787
0
0
0
0
0.006211
1
0.024845
false
0
0.031056
0
0.055901
0.006211
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
abba9b8544636bcc8ba6cfbf7fa5de98fce57410
60,893
py
Python
burpui/api/admin.py
PaliPalo/burp-ui
affbed705f5b35a630ca1a96c01e6dea1bfbeddb
[ "BSD-3-Clause" ]
93
2015-02-10T16:01:46.000Z
2021-12-02T21:21:42.000Z
burpui/api/admin.py
PaliPalo/burp-ui
affbed705f5b35a630ca1a96c01e6dea1bfbeddb
[ "BSD-3-Clause" ]
5
2015-12-18T19:34:46.000Z
2021-09-17T14:18:10.000Z
burpui/api/admin.py
PaliPalo/burp-ui
affbed705f5b35a630ca1a96c01e6dea1bfbeddb
[ "BSD-3-Clause" ]
17
2015-09-21T22:24:05.000Z
2021-10-01T14:28:47.000Z
# -*- coding: utf8 -*- """ .. module:: burpui.api.admin :platform: Unix :synopsis: Burp-UI admin api module. .. moduleauthor:: Ziirish <hi+burpui@ziirish.me> """ from . import api from ..engines.server import BUIServer # noqa from ..sessions import session_manager from ..misc.acl.meta import meta_grants from ..utils import NOTIF_OK from .custom import fields, Resource # from ..exceptions import BUIserverException from flask import current_app from flask_login import current_user from flask_babel import gettext import json bui = current_app # type: BUIServer ns = api.namespace("admin", "Admin methods") user_fields = ns.model( "Users", { "id": fields.String(required=True, description="User id"), "name": fields.String(required=True, description="User name"), "backend": fields.String(required=True, description="Backend name"), }, ) grant_fields = ns.model( "Grants", { "id": fields.String(required=True, description="Grant id"), "grant": fields.String(required=True, description="Grant content"), "backend": fields.String(required=True, description="Backend name"), }, ) group_fields = ns.model( "Groups", { "id": fields.String(required=True, description="Group id"), "grant": fields.String(required=True, description="Group grant content"), "members": fields.List( fields.String, required=True, description="Group members" ), "backend": fields.String(required=True, description="Backend name"), }, ) groups_fields = ns.model( "GroupsFields", { "name": fields.String(required=True, description="Group name"), "inherit": fields.List( fields.String, required=False, description="This group is inherited by" ), }, ) groups_list_fields = ns.model( "GroupsList", { "groups": fields.List( fields.Nested(groups_fields), required=True, description="Groups list" ), }, ) group_members_fields = ns.model( "GroupMembers", { "members": fields.List( fields.String, required=True, description="Group members" ), "grant": fields.String(required=True, description="Group grant content"), }, ) is_moderator_fields = ns.model( "IsModerator", { "moderator": fields.Boolean( required=True, description="Is the member a moderator" ), "inherit": fields.List( fields.String, required=False, description="What provides this grant if inherited", ), }, ) moderator_members_fields = ns.model( "ModeratorMembers", { "members": fields.List( fields.String, required=True, description="Moderator members" ), "grant": fields.String(required=True, description="Moderator grant content"), }, ) moderators_fields = ns.model( "Moderators", { "members": fields.List( fields.String, required=True, description="Moderator members" ), "grant": fields.String(required=True, description="Moderator grant content"), "backend": fields.String(required=True, description="Backend name"), }, ) is_admin_fields = ns.model( "IsAdmin", { "admin": fields.Boolean(required=True, description="Is the member an admin"), "inherit": fields.List( fields.String, required=False, description="What provides this grant if inherited", ), }, ) admin_members_fields = ns.model( "AdminMembers", { "members": fields.List( fields.String, required=True, description="Admin members" ), }, ) admins_fields = ns.model( "Admins", { "members": fields.List( fields.String, required=True, description="Admin members" ), "backend": fields.String(required=True, description="Backend name"), }, ) session_fields = ns.model( "Sessions", { "uuid": fields.String(description="Session id"), "ip": fields.String(description="IP address"), "ua": fields.String(description="User-Agent"), "permanent": fields.Boolean(description="Remember cookie"), "api": fields.Boolean(description="API login"), "expire": fields.DateTime(description="Expiration date"), "timestamp": fields.DateTime(description="Last seen"), "current": fields.Boolean(description="Is current session", default=False), }, ) acl_backend_fields = ns.model( "AclBackends", { "name": fields.String(required=True, description="Backend name"), "description": fields.String(required=True, description="Backend description"), "type": fields.String(required=False, description="Backend type"), "priority": fields.Integer(required=False, description="Backend priority"), "add_grant": fields.Boolean( required=False, default=False, description="Support grant creation" ), "mod_grant": fields.Boolean( required=False, default=False, description="Support grant edition" ), "del_grant": fields.Boolean( required=False, default=False, description="Support grant deletion" ), "add_group": fields.Boolean( required=False, default=False, description="Support group creation" ), "mod_group": fields.Boolean( required=False, default=False, description="Support group edition" ), "del_group": fields.Boolean( required=False, default=False, description="Support group deletion" ), "add_group_member": fields.Boolean( required=False, default=False, description="Support group member addition" ), "del_group_member": fields.Boolean( required=False, default=False, description="Support group member deletion" ), "add_moderator": fields.Boolean( required=False, default=False, description="Support moderator creation" ), "mod_moderator": fields.Boolean( required=False, default=False, description="Support moderator edition" ), "del_moderator": fields.Boolean( required=False, default=False, description="Support moderator deletion" ), "add_admin": fields.Boolean( required=False, default=False, description="Support admin creation" ), "del_admin": fields.Boolean( required=False, default=False, description="Support admin deletion" ), }, ) auth_backend_fields = ns.model( "Backends", { "name": fields.String(required=True, description="Backend name"), "description": fields.String(required=True, description="Backend description"), "type": fields.String(required=False, description="Backend type"), "priority": fields.Integer(required=False, description="Backend priority"), "add": fields.Boolean( required=False, default=False, description="Support user creation" ), "mod": fields.Boolean( required=False, default=False, description="Support user edition" ), "del": fields.Boolean( required=False, default=False, description="Support user deletion" ), }, ) @ns.route("/me", endpoint="admin_me") class AdminMe(Resource): """The :class:`burpui.api.admin.AdminMe` resource allows you to retrieve informations about your currently logged in user. This resource is part of the :mod:`burpui.api.admin` module. """ @ns.marshal_with(user_fields) def get(self): """Returns the current user informations **GET** method provided by the webservice. :returns: User """ ret = getattr(current_user, "real", current_user) return ret @ns.route( "/acl/isAdmin/<member>", "/acl/<backend>/isAdmin/<member>", endpoint="acl_is_admin" ) @ns.doc( params={ "member": "Username", } ) class AclIsAdmin(Resource): """The :class:`burpui.api.admin.AclIsAdmin` resources allows you to check if a given member is admin or not. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view admins list") @ns.marshal_with(is_admin_fields) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, member, backend=None): """Checks if a given member is admin""" if not backend: (ret, inh) = meta_grants.is_admin(member) return {"admin": ret, "inherit": inh} try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] return {"admin": member in loader.admins} @ns.route("/acl/admins", "/acl/<backend>/admins", endpoint="acl_admins") @ns.doc( params={ "backend": "Backend name", } ) class AclAdminss(Resource): """The :class:`burpui.api.admin.AclAdminss` resource allows you to retrieve a list of admins. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view groups list") @ns.marshal_list_with(admins_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, name=None, backend=None): """Returns a list of admins **GET** method provided by the webservice. :returns: Moderators """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") ret = [] for _, loader in handler.backends.items(): append = {"members": loader.admins, "backend": loader.name} if (backend and backend == append["backend"]) or backend is None: return [append] ret.append(append) return ret @ns.route( "/acl/admin", "/acl/<backend>/admin", "/acl/<backend>/admin/<member>", endpoint="acl_admin", ) @ns.doc( params={ "backend": "ACL backend", "member": "Admin member", } ) class AclAdmin(Resource): """The :class:`burpui.api.admin.AclAdmins` resource allows you to retrieve a list of admins and add/delete them if your acl backend support those actions. This resource is part of the :mod:`burpui.api.admin` module. """ parser = ns.parser() parser.add_argument( "memberNames", required=False, help="Admin members", action="append" ) parser.add_argument("backendName", required=False, help="Backend name") @api.acl_admin_or_moderator_required(message="Not allowed to view admins list") @ns.marshal_with(admin_members_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, backend): """Returns a list of admin users **GET** method provided by the webservice. :returns: Members """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) ret = {} loader = handler.backends[backend] ret = {"members": loader.admins} return ret @api.disabled_on_demo() @api.acl_admin_required(message="Not allowed to add admin members") @ns.expect(parser) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def put(self, backend=None, member=None): """Add a member as admin **PUT** method provided by the webservice. """ args = self.parser.parse_args() backend = backend or args["backendName"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] members = [member] if member else (args["memberNames"] or []) if loader.add_admin is False: self.abort( 500, "The '{}' backend does not support moderator member addition" "".format(backend), ) ret = [] status = 200 for member in members: success, message, code = loader.add_admin(member) status = 201 if success else 200 ret.append([code, message]) bui.audit.logger.info(f"granted {members} as admin") return ret, status @api.disabled_on_demo() @api.acl_admin_required(message="Not allowed to remove admin members") @ns.expect(parser) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def delete(self, backend=None, member=None): """Remove an admin member **DELETE** method provided by the webservice. """ args = self.parser.parse_args() backend = backend or args["backendName"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(40422, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] members = [member] if member else (args["memberNames"] or []) if loader.del_admin is False: self.abort( 500, "The '{}' backend does not support admin member deletion" "".format(backend), ) ret = [] status = 200 for member in members: success, message, code = loader.del_admin(member) status = 201 if success else 200 ret.append([code, message]) bui.audit.logger.info(f"removed admin grants of {members}") return ret, status @ns.route( "/acl/isModerator/<member>", "/acl/<backend>/isModerator/<member>", endpoint="acl_is_moderator", ) @ns.doc( params={ "member": "Username", } ) class AclIsModerator(Resource): """The :class:`burpui.api.admin.AclIsModerator` resources allows you to check if a given member is moderator or not. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view admins list") @ns.marshal_with(is_moderator_fields) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, member, backend=None): """Checks if a given member is moderator""" if not backend: (ret, inh) = meta_grants.is_moderator(member) return {"moderator": ret, "inherit": inh} try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] return {"moderator": member in loader.moderators} @ns.route("/acl/moderators", "/acl/<backend>/moderators", endpoint="acl_moderators") @ns.doc( params={ "backend": "Backend name", } ) class AclModerators(Resource): """The :class:`burpui.api.admin.AclModerators` resource allows you to retrieve a list of moderators. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view groups list") @ns.marshal_list_with(moderators_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, name=None, backend=None): """Returns a list of moderators **GET** method provided by the webservice. :returns: Moderators """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") ret = [] for _, loader in handler.backends.items(): append = { "grant": loader.moderator, "members": loader.moderators, "backend": loader.name, } if (backend and backend == append["backend"]) or backend is None: return [append] ret.append(append) return ret @ns.route( "/acl/moderator", "/acl/<backend>/moderator", "/acl/<backend>/moderator/<member>", endpoint="acl_moderator", ) @ns.doc( params={ "backend": "ACL backend", "member": "Moderator member", } ) class AclModerator(Resource): """The :class:`burpui.api.admin.AclModerator` resource allows you to retrieve a list of moderators and add/delete them if your acl backend support those actions. This resource is part of the :mod:`burpui.api.admin` module. """ parser = ns.parser() parser.add_argument( "memberNames", required=False, help="Moderator members", action="append" ) parser.add_argument("backendName", required=False, help="Backend name") parser_mod = ns.parser() parser_mod.add_argument("grant", required=False, help="Moderator grants") @api.acl_admin_or_moderator_required(message="Not allowed to view moderators list") @ns.marshal_with(moderator_members_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, backend): """Returns a list of moderator users **GET** method provided by the webservice. :returns: Members """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) ret = {} loader = handler.backends[backend] ret = {"members": loader.moderators, "grant": loader.moderator} return ret @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to add moderator members") @ns.expect(parser) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def put(self, backend=None, member=None): """Add a member as moderator **PUT** method provided by the webservice. """ args = self.parser.parse_args() backend = backend or args["backendName"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] members = [member] if member else (args["memberNames"] or []) if loader.add_moderator is False: self.abort( 500, "The '{}' backend does not support moderator member addition" "".format(backend), ) ret = [] status = 200 for member in members: success, message, code = loader.add_moderator(member) ret.append([code, message]) status = 201 if success else 200 bui.audit.logger.info(f"granted {members} as moderator") return ret, status @api.disabled_on_demo() @api.acl_admin_or_moderator_required( message="Not allowed to remove moderator members" ) @ns.expect(parser) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def delete(self, backend=None, member=None): """Remove a moderator member **DELETE** method provided by the webservice. """ args = self.parser.parse_args() backend = backend or args["backendName"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] members = [member] if member else (args["memberNames"] or []) if loader.del_moderator is False: self.abort( 500, "The '{}' backend does not support moderator member deletion" "".format(backend), ) ret = [] status = 200 for member in members: success, message, code = loader.del_moderator(member) ret.append([code, message]) status = 201 if success else 200 bui.audit.logger.info(f"removed moderator grant of {members}") return ret, status @api.disabled_on_demo() @api.acl_admin_or_moderator_required( message="Not allowed to update moderator grants" ) @ns.expect(parser_mod) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def post(self, backend): """Update moderator grants **POST** method provided by the webservice. """ args = self.parser_mod.parse_args() try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] grants = args["grant"] if loader.mod_moderator is False: self.abort( 500, "The '{}' backend does not support moderator grants edition" "".format(backend), ) success, message, code = loader.mod_moderator(grants) status = 201 if success else 200 bui.audit.logger.info(f"updated moderator grants to: {grants}") return [[code, message]], status @ns.route( "/acl/group", "/acl/<backend>/group/<name>", "/acl/<backend>/group/<name>/<member>", endpoint="acl_group_members", ) @ns.doc( params={ "name": "Group name", "backend": "ACL backend", "member": "Group member", } ) class AclGroup(Resource): """The :class:`burpui.api.admin.AclGroup` resource allows you to retrieve a list of members in a given group and add/delete them if your acl backend support those actions. This resource is part of the :mod:`burpui.api.admin` module. """ parser = ns.parser() parser.add_argument( "memberNames", required=False, help="Group members", action="append" ) parser.add_argument("groupName", required=False, help="Group name") parser.add_argument("backendName", required=False, help="Backend name") @api.acl_admin_or_moderator_required(message="Not allowed to view groups list") @ns.marshal_with(group_members_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, name, backend): """Returns a list of users in a giver group **GET** method provided by the webservice. :returns: Members """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) ret = {} loader = handler.backends[backend] groups = loader.groups gname = "@{}".format(name) if groups and gname in groups: ret = { "members": groups[gname].get("members", []), "grant": groups[gname].get("grants", ""), } return ret @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to add member in group") @ns.expect(parser) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def put(self, name=None, backend=None, member=None): """Add a member in a given group **PUT** method provided by the webservice. """ args = self.parser.parse_args() name = name or args["groupName"] backend = backend or args["backendName"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] members = [member] if member else (args["memberNames"] or []) if loader.add_group_member is False: self.abort( 500, "The '{}' backend does not support group member addition" "".format(backend), ) ret = [] status = 200 for member in members: success, message, code = loader.add_group_member(name, member) ret.append([code, message]) status = 201 if success else 200 bui.audit.logger.info(f"added {members} in group {name}") return ret, status @api.disabled_on_demo() @api.acl_admin_or_moderator_required( message="Not allowed to remove member in group" ) @ns.expect(parser) @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def delete(self, name, backend, member=None): """Remove a member from a given group **DELETE** method provided by the webservice. """ args = self.parser.parse_args() try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") if backend not in handler.backends: self.abort(404, "No acl backend '{}' found".format(backend)) loader = handler.backends[backend] members = [member] if member else (args["memberNames"] or []) if loader.del_group_member is False: self.abort( 500, "The '{}' backend does not support group member deletion" "".format(backend), ) ret = [] status = 200 for member in members: success, message, code = loader.del_group_member(name, member) ret.append([code, message]) status = 201 if success else 200 bui.audit.logger.info(f"removed {members} from group {name}") return ret, status @ns.route("/acl/groupsOf/<member>", endpoint="acl_groups_of") @ns.doc( params={ "member": "Username", } ) class AclGroupsOf(Resource): """The :class:`burpui.api.admin.AclGroupsOf` resource allows you to retrieve a list of groups of a given user. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view groups list") @ns.marshal_with(groups_list_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, member): """Returns a list of group **GET** method provided by the webservice. :returns: Groups """ return { "groups": [ {"name": name, "inherit": inherit} for name, inherit in meta_grants.get_member_groups(member) ] } @ns.route( "/acl/groups", "/acl/<backend>/groups", "/acl/groups/<name>", "/acl/<backend>/groups/<name>", endpoint="acl_groups", ) @ns.doc( params={ "name": "Group name", "backend": "Backend name", } ) class AclGroups(Resource): """The :class:`burpui.api.admin.AclGroups` resource allows you to retrieve a list of groups and to add/update/delete them if your acl backend support those actions. This resource is part of the :mod:`burpui.api.admin` module. """ parser_add = ns.parser() parser_add.add_argument( "group", required=True, help="Group name", location="values" ) parser_add.add_argument("grant", required=True, help="Group grant content") parser_add.add_argument("backend", help="Backend", location="values") parser_mod = ns.parser() parser_mod.add_argument("grant", required=True, help="Group grant content") parser_mod.add_argument("backend", help="Backend", location="values") parser_del = ns.parser() parser_del.add_argument("backend", help="Backend", location="values") @api.acl_admin_or_moderator_required(message="Not allowed to view groups list") @ns.marshal_list_with(group_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, name=None, backend=None): """Returns a list of group **GET** method provided by the webservice. :returns: Groups """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") ret = [] for _, loader in handler.backends.items(): groups = loader.groups if groups: for _id, group in groups.items(): append = { "id": _id.lstrip("@"), "grant": group.get("grants", ""), "members": group.get("members", []), "backend": loader.name, } if name and name == append["id"]: if ( backend and backend == append["backend"] ) or backend is None: return [append] ret.append(append) return ret @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to create groups") @ns.expect(parser_add) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def put(self, backend=None): """Create a new group""" args = self.parser_add.parse_args() backend = backend or args["backend"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No acl backend found") loader = handler.backends[backend] if loader.add_group is False: self.abort( 500, "The '{}' backend does not support group creation" "".format(backend), ) success, message, code = loader.add_group(args["group"], args["grant"]) status = 201 if success else 200 bui.audit.logger.info( f'created new group {args["group"]} with grants: {args["grant"]}' ) return [[code, message]], status @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to delete this group") @ns.expect(parser_del) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def delete(self, name, backend=None): """Delete a group""" args = self.parser_del.parse_args() backend = backend or args["backend"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No acl backend found") loader = handler.backends[backend] if loader.del_group is False: self.abort( 500, "The '{}' backend does not support group deletion" "".format(backend), ) success, message, code = loader.del_group(name) status = 201 if success else 200 bui.audit.logger.info(f"removed group {name}") return [[code, message]], status @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to modify this group") @ns.expect(parser_mod) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def post(self, name, backend=None): """Change a group""" args = self.parser_mod.parse_args() backend = backend or args["backend"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No acl backend found") loader = handler.backends[backend] if loader.mod_group is False: self.abort( 500, "The '{}' backend does not support group modification" "".format(backend), ) success, message, code = loader.mod_group(name, args["grant"]) status = 201 if success else 200 bui.audit.logger.info(f'updated group {name} with: {args["grant"]}') return [[code, message]], status @ns.route( "/acl/grants", "/acl/<backend>/grants", "/acl/grants/<name>", "/acl/<backend>/grants/<name>", endpoint="acl_grants", ) @ns.doc( params={ "name": "Grant name", "backend": "Backend name", } ) class AclGrants(Resource): """The :class:`burpui.api.admin.AclGrants` resource allows you to retrieve a list of grants and to add/update/delete them if your acl backend support those actions. This resource is part of the :mod:`burpui.api.admin` module. """ parser_add = ns.parser() parser_add.add_argument("grant", required=True, help="Grant name") parser_add.add_argument("content", required=True, help="Grant content") parser_add.add_argument("backend", help="Backend") parser_mod = ns.parser() parser_mod.add_argument("content", required=True, help="Grant content") parser_mod.add_argument("backend", help="Backend") parser_del = ns.parser() parser_del.add_argument("backend", help="Backend", location="values") @api.acl_admin_or_moderator_required(message="Not allowed to view grants list") @ns.marshal_list_with(grant_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, name=None, backend=None): """Returns a list of grants **GET** method provided by the webservice. :returns: Grants """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No acl backend found") ret = [] for _, loader in handler.backends.items(): grants = loader.grants if grants: for _id, grant in grants.items(): append = { "id": _id, "grant": json.dumps(grant), "backend": loader.name, } if name and name == _id: if (backend and backend == loader.name) or backend is None: return [append] ret.append(append) return ret @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to create grants") @ns.expect(parser_add) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def put(self, backend=None): """Create a new grant""" args = self.parser_add.parse_args() backend = backend or args["backend"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No acl backend found") loader = handler.backends[backend] if loader.add_grant is False: self.abort( 500, "The '{}' backend does not support grant creation" "".format(backend), ) success, message, code = loader.add_grant(args["grant"], args["content"]) status = 201 if success else 200 bui.audit.logger.info(f'added grant {args["grant"]} with: {args["content"]}') return [[code, message]], status @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to delete this grant") @ns.expect(parser_del) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def delete(self, name, backend=None): """Delete a grant""" args = self.parser_del.parse_args() backend = backend or args["backend"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No acl backend found") loader = handler.backends[backend] if loader.del_grant is False: self.abort( 500, "The '{}' backend does not support grant deletion" "".format(backend), ) success, message, code = loader.del_grant(name) status = 201 if success else 200 bui.audit.logger.info(f"removed grant {name}") return [[code, message]], status @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to modify this grant") @ns.expect(parser_mod) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def post(self, name, backend=None): """Change a grant""" args = self.parser_mod.parse_args() backend = backend or args["backend"] try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No acl backend found") loader = handler.backends[backend] if loader.mod_grant is False: self.abort( 500, "The '{}' backend does not support grant modification" "".format(backend), ) success, message, code = loader.mod_grant(name, args["content"]) status = 201 if success else 200 bui.audit.logger.info(f'updated grant {name} with: {args["content"]}') return [[code, message]], status @ns.route("/acl/backend/<backend>", endpoint="acl_backend") class AclBackend(Resource): """The :class:`burpui.api.admin.AclBackend` resource allows you to retrieve a given ACL backend with its capabilities. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view backends list") @ns.marshal_with(acl_backend_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, backend): """Returns a given ACL backend **GET** method provided by the webservice. :returns: Backend """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No authentication backend found") if backend not in handler.backends: self.abort(404, "ACL backend {} not found".format(backend)) loader = handler.backends[backend] back = {} back["name"] = backend back["description"] = gettext(loader.__doc__) back["type"] = "authorization" back["priority"] = getattr(loader, "priority", -1) for method in [ "add_grant", "del_grant", "mod_grant", "add_group", "del_group", "mod_group", "add_group_member", "del_group_member", "add_moderator", "del_moderator", "mod_moderator", "add_admin", "del_admin", ]: back[method] = getattr(loader, method, False) is not False return back @ns.route("/acl/backends", endpoint="acl_backends") class AclBackends(Resource): """The :class:`burpui.api.admin.AclBackends` resource allows you to retrieve a list of ACL backends with their capabilities. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view backends list") @ns.marshal_list_with(acl_backend_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self): """Returns a list of backends **GET** method provided by the webservice. :returns: Backends """ try: handler = getattr(bui, "acl_handler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No authentication backend found") ret = [] for name, backend in handler.backends.items(): back = {} back["name"] = name back["description"] = gettext(backend.__doc__) back["type"] = "authorization" back["priority"] = getattr(backend, "priority", -1) for method in [ "add_grant", "del_grant", "mod_grant", "add_group", "del_group", "mod_group", "add_group_member", "del_group_member", "add_moderator", "del_moderator", "mod_moderator", "add_admin", "del_admin", ]: back[method] = getattr(backend, method, False) is not False ret.append(back) return ret @ns.route( "/auth/users", "/auth/<backend>/users", "/auth/users/<name>", "/auth/<backend>/users/<name>", endpoint="auth_users", ) @ns.doc( params={ "name": "Username", "backend": "Authentication backend", } ) class AuthUsers(Resource): """The :class:`burpui.api.admin.AuthUsers` resource allows you to retrieve a list of users and to add/update/delete them if your authentication backend support those actions. This resource is part of the :mod:`burpui.api.admin` module. """ parser_add = ns.parser() parser_add.add_argument("username", required=True, help="Username") parser_add.add_argument("password", required=True, help="Password") parser_add.add_argument("backend", help="Backend") parser_mod = ns.parser() parser_mod.add_argument("password", required=True, help="Password") parser_mod.add_argument("backend", help="Backend") parser_mod.add_argument("old_password", required=False, help="Old password") parser_del = ns.parser() parser_del.add_argument("backend", help="Backend") @api.acl_admin_or_moderator_required(message="Not allowed to view users list") @ns.marshal_list_with(user_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, name=None, backend=None): """Returns a list of users **GET** method provided by the webservice. :returns: Users """ try: handler = getattr(bui, "uhandler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No authentication backend found") ret = [] for _, backend in handler.backends.items(): loader = backend.loader preload_users = getattr(backend, "preload_users", True) try: users = getattr(loader, "users") except AttributeError: continue if users: if isinstance(users, list): for user in users: append = { "id": backend.user(user).get_id() if preload_users else user, "name": user, "backend": backend.name, } if name and name == append["id"]: if ( backend and backend == append["backend"] ) or backend is None: return append ret.append(append) elif isinstance(users, dict): for user, _ in users.items(): append = { "id": backend.user(user).get_id(), "name": user, "backend": backend.name, } if name and name == append["id"]: if ( backend and backend == append["backend"] ) or backend is None: return append ret.append(append) return ret @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to create users") @ns.expect(parser_add) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def put(self, name=None, backend=None): """Create a new user""" args = self.parser_add.parse_args() username = name or args["username"] backend = backend or args["backend"] try: handler = getattr(bui, "uhandler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No authentication backend found") backend = handler.backends[backend] if backend.add_user is False: self.abort( 500, "The '{}' backend does not support user creation" "".format(backend), ) success, message, code = backend.add_user(username, args["password"]) status = 201 if success else 200 bui.audit.logger.info(f"created new user: {username}") return [[code, message]], status @api.disabled_on_demo() @api.acl_admin_or_moderator_required(message="Not allowed to delete this user") @ns.expect(parser_del) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def delete(self, name, backend=None): """Delete a user""" args = self.parser_del.parse_args() backend = backend or args["backend"] try: handler = getattr(bui, "uhandler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No authentication backend found") backend = handler.backends[backend] if backend.del_user is False: self.abort( 500, "The '{}' backend does not support user deletion" "".format(backend), ) success, message, code = backend.del_user(name) status = 201 if success else 200 bui.audit.logger.info(f"removed user: {name}") return [[code, message]], status @api.disabled_on_demo() @api.acl_own_or_admin_or_moderator( key="name", message="Not allowed to modify this user" ) @ns.expect(parser_mod) @ns.doc( responses={ 200: "Request performed with errors", 201: "Success", 403: "Not allowed", 400: "Missing parameters", 404: "Backend not found", 500: "Backend does not support this operation", }, ) def post(self, name, backend=None): """Change user password""" args = self.parser_mod.parse_args() backend = backend or args["backend"] is_moderator = True if not current_user.is_anonymous: is_moderator = ( current_user.acl.is_admin() or current_user.acl.is_moderator() ) if not is_moderator and not args["old_password"]: self.abort(400, "Old password required") try: handler = getattr(bui, "uhandler") except AttributeError: handler = None if not handler or len(handler.backends) == 0 or backend not in handler.backends: self.abort(404, "No authentication backend found") backend = handler.backends[backend] if backend.change_password is False: self.abort( 500, "The '{}' backend does not support user modification" "".format(backend), ) success, message, code = backend.change_password( name, args["password"], args.get("old_password") ) status = 201 if success else 200 bui.audit.logger.info(f"changed password of user {name}") return [[code, message]], status @ns.route("/auth/backend/<backend>", endpoint="auth_backend") class AuthBackend(Resource): """The :class:`burpui.api.admin.AuthBackend` resource allows you to retrieve a given authentication backend and its capabilities. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view backends list") @ns.marshal_with(auth_backend_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self, backend): """Returns a given authentication backend **GET** method provided by the webservice. :returns: Backend """ try: handler = getattr(bui, "uhandler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No authentication backend found") if backend not in handler.backends: self.abort(404, "No authentication backend {} found".format(backend)) back = handler.backends[backend] ret = { "name": backend, "description": gettext(back.__doc__), "type": "authentication", "priority": getattr(back, "priority", -1), "add": getattr(back, "add_user", False) is not False, "del": getattr(back, "del_user", False) is not False, "mod": getattr(back, "change_password", False) is not False, } return ret @ns.route("/auth/backends", endpoint="auth_backends") class AuthBackends(Resource): """The :class:`burpui.api.admin.AuthBackends` resource allows you to retrieve a list of backends and to add/update/delete users if your authentication backend support those actions. This resource is part of the :mod:`burpui.api.admin` module. """ @api.acl_admin_or_moderator_required(message="Not allowed to view backends list") @ns.marshal_list_with(auth_backend_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "No backend found", }, ) def get(self): """Returns a list of backends **GET** method provided by the webservice. :returns: Backends """ try: handler = getattr(bui, "uhandler") except AttributeError: handler = None if not handler or len(handler.backends) == 0: self.abort(404, "No authentication backend found") ret = [] for name, backend in handler.backends.items(): ret.append( { "name": name, "description": gettext(backend.__doc__), "type": "authentication", "priority": backend.priority, "add": backend.add_user is not False, "del": backend.del_user is not False, "mod": backend.change_password is not False, } ) return ret @ns.route("/session/<user>", "/session/<user>/<uuid:id>", endpoint="other_sessions") @ns.doc( params={ "user": "User to get sessions from", "id": "Session id", } ) class OtherSessions(Resource): """The :class:`burpui.api.admin.OtherSessions` resource allows you to retrieve a list of sessions for a given user. This resource is part of the :mod:`burpui.api.admin` module. """ @ns.marshal_list_with(session_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "User not found", }, ) def get(self, user=None, id=None): """Returns a list of sessions **GET** method provided by the webservice. :returns: Sessions """ if id: return session_manager.get_session_by_id(str(id)) if not user: self.abort(404, "User not found") return session_manager.get_user_sessions(user) @ns.route("/me/session", "/me/session/<uuid:id>", endpoint="user_sessions") @ns.doc( params={ "id": "Session id", } ) class MySessions(Resource): """The :class:`burpui.api.admin.MySessions` resource allows you to retrieve a list of sessions and invalidate them for the current user. This resource is part of the :mod:`burpui.api.admin` module. """ @ns.marshal_list_with(session_fields, code=200, description="Success") @ns.doc( responses={ 403: "Insufficient permissions", 404: "User not found", }, ) def get(self, id=None): """Returns a list of sessions **GET** method provided by the webservice. :returns: Sessions """ if id: return session_manager.get_session_by_id(str(id)) user = getattr(current_user, "name", None) if not user: self.abort(404, "User not found") return session_manager.get_user_sessions(user) @api.disabled_on_demo() @ns.doc( responses={ 201: "Success", 403: "Insufficient permissions", 404: "User or session not found", 400: "Wrong request", } ) def delete(self, id=None): """Delete a given session Note: ``id`` is mandatory """ if not id: self.abort(400, "Missing id") user = getattr(current_user, "name", None) if not user: self.abort(404, "User not found") store = session_manager.get_session_by_id(str(id)) if not store: self.abort("Session not found") if store.user != user: if ( not current_user.is_anonymous and not current_user.acl.is_admin() and not current_user.acl.is_moderator() ): self.abort(403, "Insufficient permissions") if current_user.acl.is_moderator() and meta_grants.is_admin(store.user): self.abort(403, "Insufficient permissions") if session_manager.invalidate_session_by_id(store.uuid): session_manager.delete_session_by_id(store.uuid) bui.audit.logger.info(f"removed session {store.id} of {store.user}") return [NOTIF_OK, "Session {} successfully revoked".format(id)], 201
31.981618
88
0.574549
6,699
60,893
5.138976
0.042544
0.036165
0.016034
0.01708
0.819032
0.801458
0.759629
0.733022
0.702492
0.675652
0
0.01705
0.309412
60,893
1,903
89
31.998424
0.801603
0.098583
0
0.604555
0
0
0.200279
0.010536
0
0
0
0
0
1
0.024155
false
0.008282
0.006901
0
0.083506
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f9fdd562e1f3744ccd1957ac05a22721315aee90
51
py
Python
src/deep_dialog/usersims/__init__.py
Ambitioner-c/UserSimulator
9e32bd04e93464c02d86e8e3afb6998cd70ac57f
[ "MIT" ]
1
2020-10-13T01:15:58.000Z
2020-10-13T01:15:58.000Z
src/deep_dialog/usersims/__init__.py
Ambitioner-c/UserSimulator
9e32bd04e93464c02d86e8e3afb6998cd70ac57f
[ "MIT" ]
null
null
null
src/deep_dialog/usersims/__init__.py
Ambitioner-c/UserSimulator
9e32bd04e93464c02d86e8e3afb6998cd70ac57f
[ "MIT" ]
null
null
null
from .usersim_rule import * from .usersim import *
17
27
0.764706
7
51
5.428571
0.571429
0.578947
0
0
0
0
0
0
0
0
0
0
0.156863
51
2
28
25.5
0.883721
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e64dcc220d653ea686e027e8ea5ddd155b5a8293
247
py
Python
Functions/SimpleFunctions.py
fawq/NeuralNetworks
67342cc2ae2311c490d83e51053b303a0075cc62
[ "MIT" ]
1
2019-03-05T12:36:59.000Z
2019-03-05T12:36:59.000Z
Functions/SimpleFunctions.py
fawq/NeuralNetworks
67342cc2ae2311c490d83e51053b303a0075cc62
[ "MIT" ]
null
null
null
Functions/SimpleFunctions.py
fawq/NeuralNetworks
67342cc2ae2311c490d83e51053b303a0075cc62
[ "MIT" ]
null
null
null
import numpy as np def sigmoid(x, derivative=False): return (1 / (1 + np.exp(-x))) if derivative is False else sigmoid(x) * (1 - sigmoid(x)) def relu(x, derivative=False): return max(0, x) if derivative is False else 1 if x > 0 else 0
24.7
91
0.65587
45
247
3.6
0.4
0.148148
0.197531
0.271605
0.296296
0.296296
0
0
0
0
0
0.036082
0.214575
247
9
92
27.444444
0.798969
0
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0
0.2
0.4
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
e660a7424a8a89a3339f26c9648447f6ff6b22dd
23
py
Python
prestic/__init__.py
ducalex/prestic
ff6436f5eaccb74863337ae75829ff75b89dc360
[ "MIT" ]
6
2020-10-27T07:23:47.000Z
2022-01-15T10:13:22.000Z
prestic/__init__.py
ducalex/prestic
ff6436f5eaccb74863337ae75829ff75b89dc360
[ "MIT" ]
1
2021-01-04T20:53:24.000Z
2021-01-04T20:53:24.000Z
prestic/__init__.py
ducalex/prestic
ff6436f5eaccb74863337ae75829ff75b89dc360
[ "MIT" ]
2
2021-01-04T15:23:08.000Z
2022-01-15T11:34:44.000Z
from .prestic import *
11.5
22
0.73913
3
23
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.173913
23
1
23
23
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
0511536eb1de1528acb3c5dacd15d83ce85721a3
267
py
Python
hivemind/client/__init__.py
ploshkin/hivemind
7bb656567417895e9f1d8684a0c0e9ef4e4de25d
[ "MIT" ]
null
null
null
hivemind/client/__init__.py
ploshkin/hivemind
7bb656567417895e9f1d8684a0c0e9ef4e4de25d
[ "MIT" ]
null
null
null
hivemind/client/__init__.py
ploshkin/hivemind
7bb656567417895e9f1d8684a0c0e9ef4e4de25d
[ "MIT" ]
null
null
null
from hivemind.client.expert import RemoteExpert from hivemind.client.moe import RemoteMixtureOfExperts from hivemind.client.averaging import DecentralizedAverager from hivemind.client.optim import ParameterAveragingOptimizer, DecentralizedSGD, CollaborativeOptimizer
53.4
103
0.895131
26
267
9.192308
0.538462
0.200837
0.301255
0
0
0
0
0
0
0
0
0
0.067416
267
4
104
66.75
0.959839
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
05843a3006b0e6b96d5b6c8e393225ae739a55da
132
py
Python
src/python/basics/basics1.py
ptyagicodecamp/allgorythms
8d92d20110d273ee69651a3da3f442c96e165158
[ "MIT" ]
3
2020-10-01T16:55:21.000Z
2021-07-07T10:42:56.000Z
src/python/basics/basics1.py
ptyagicodecamp/allgorythms
8d92d20110d273ee69651a3da3f442c96e165158
[ "MIT" ]
null
null
null
src/python/basics/basics1.py
ptyagicodecamp/allgorythms
8d92d20110d273ee69651a3da3f442c96e165158
[ "MIT" ]
3
2020-12-09T23:44:05.000Z
2022-02-12T07:04:39.000Z
''' print("Hello Programming !") help("keywords") ''' print("Hello Programming !"); print("Hello World !");print("Hello Python !")
18.857143
76
0.643939
14
132
6.071429
0.5
0.470588
0.494118
0
0
0
0
0
0
0
0
0
0.113636
132
7
76
18.857143
0.726496
0.348485
0
0
0
0
0.582278
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
5593fa9cbcfd6742e1f494f16c3da8d489f5240b
199
py
Python
Lesson5/main5.py
NeuPasha/PythonBasics
f4642b6fb7fbac9121c58bd8f65bc520c39ecafb
[ "MIT" ]
null
null
null
Lesson5/main5.py
NeuPasha/PythonBasics
f4642b6fb7fbac9121c58bd8f65bc520c39ecafb
[ "MIT" ]
null
null
null
Lesson5/main5.py
NeuPasha/PythonBasics
f4642b6fb7fbac9121c58bd8f65bc520c39ecafb
[ "MIT" ]
null
null
null
import divisor_master print(divisor_master.if_simple(7)) print(divisor_master.divisors(1000)) print(divisor_master.biggest(70)) print(divisor_master.simple_m(91)) print(divisor_master.biggest_d(93))
28.428571
36
0.839196
31
199
5.096774
0.483871
0.493671
0.56962
0.316456
0
0
0
0
0
0
0
0.057292
0.035176
199
7
37
28.428571
0.765625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.166667
0
0.166667
0.833333
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
55a7d2c2ca0f1ef60f335e732286ff1fca52fad4
1,643
py
Python
src/utils/responseUtils.py
Ordergoras/backend
e2f7681628e71e86643e6080df39e1a72d6fc355
[ "MIT" ]
null
null
null
src/utils/responseUtils.py
Ordergoras/backend
e2f7681628e71e86643e6080df39e1a72d6fc355
[ "MIT" ]
null
null
null
src/utils/responseUtils.py
Ordergoras/backend
e2f7681628e71e86643e6080df39e1a72d6fc355
[ "MIT" ]
null
null
null
import json from typing import Dict, List from flask import Response, jsonify from src.utils.globals import ACCESS_TOKEN_LIFETIME def create200Response(message: str, newAccessToken: str = None) -> Response: response = Response(json.dumps({'message': message}), status=200) if newAccessToken is not None: response.set_cookie('accessToken', newAccessToken, max_age=ACCESS_TOKEN_LIFETIME, httponly=True) return response def create200ResponseData(body: Dict | List, newAccessToken: str = None) -> Response: response = jsonify(body) if newAccessToken is not None: response.set_cookie('accessToken', newAccessToken, max_age=ACCESS_TOKEN_LIFETIME, httponly=True) return response def create400Response(message: str, newAccessToken: str = None) -> Response: response = Response(json.dumps({'message': message}), status=400) if newAccessToken is not None: response.set_cookie('accessToken', newAccessToken, max_age=ACCESS_TOKEN_LIFETIME, httponly=True) return response def create401Response(message: str, newAccessToken: str = None) -> Response: response = Response(json.dumps({'message': message}), status=401) if newAccessToken is not None: response.set_cookie('accessToken', newAccessToken, max_age=ACCESS_TOKEN_LIFETIME, httponly=True) return response def create409Response(message: str, newAccessToken: str = None) -> Response: response = Response(json.dumps({'message': message}), status=409) if newAccessToken is not None: response.set_cookie('accessToken', newAccessToken, max_age=ACCESS_TOKEN_LIFETIME, httponly=True) return response
41.075
104
0.749848
191
1,643
6.335079
0.225131
0.099174
0.094215
0.119835
0.805785
0.775207
0.775207
0.775207
0.775207
0.775207
0
0.019397
0.152769
1,643
39
105
42.128205
0.849856
0
0
0.517241
0
0
0.050517
0
0
0
0
0
0
1
0.172414
false
0
0.137931
0
0.482759
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e976ec6f47cf9b983a8ac13461c4deff248fbe4c
2,398
py
Python
tests/scripts/test_clickgen_script.py
KaizIqbal/clickgen
cab0d0c005c7714cb0271809745a2dae321aa7eb
[ "MIT" ]
2
2020-06-06T03:34:29.000Z
2020-07-29T06:47:23.000Z
tests/scripts/test_clickgen_script.py
KaizIqbal/clickgen
cab0d0c005c7714cb0271809745a2dae321aa7eb
[ "MIT" ]
null
null
null
tests/scripts/test_clickgen_script.py
KaizIqbal/clickgen
cab0d0c005c7714cb0271809745a2dae321aa7eb
[ "MIT" ]
null
null
null
import argparse from unittest import mock from clickgen.parser.png import DELAY, SIZES from clickgen.scripts.clickgen import main def test_clickgen_all_cursor_build(samples_dir, x11_tmp_dir, hotspot): fp = samples_dir / "pngs/pointer.png" with open(fp, "rb") as f: with mock.patch( "argparse.ArgumentParser.parse_args", return_value=argparse.Namespace( files=[f], output=x11_tmp_dir, hotspot_x=hotspot[0], hotspot_y=hotspot[1], sizes=SIZES, delay=DELAY, platform="all", ), ): main() def test_clickgen_x11_build(samples_dir, x11_tmp_dir, hotspot): fp = samples_dir / "pngs/pointer.png" with open(fp, "rb") as f: with mock.patch( "argparse.ArgumentParser.parse_args", return_value=argparse.Namespace( files=[f], output=x11_tmp_dir, hotspot_x=hotspot[0], hotspot_y=hotspot[1], sizes=SIZES, delay=DELAY, platform="x11", ), ): main() def test_clickgen_windows_build(samples_dir, x11_tmp_dir, hotspot): fp = samples_dir / "pngs/pointer.png" with open(fp, "rb") as f: with mock.patch( "argparse.ArgumentParser.parse_args", return_value=argparse.Namespace( files=[f], output=x11_tmp_dir, hotspot_x=hotspot[0], hotspot_y=hotspot[1], sizes=SIZES, delay=DELAY, platform="windows", ), ): main() def test_clickgen_raises(capsys, samples_dir, x11_tmp_dir, hotspot): fp = samples_dir / "sample.toml" with open(fp, "rb") as f: with mock.patch( "argparse.ArgumentParser.parse_args", return_value=argparse.Namespace( files=[f], output=x11_tmp_dir, hotspot_x=hotspot[0], hotspot_y=hotspot[1], sizes=SIZES, delay=DELAY, platform="all", ), ): main() captured = capsys.readouterr() assert "Error occurred while processing sample.toml" in captured.err
29.975
80
0.524187
252
2,398
4.781746
0.230159
0.06639
0.059751
0.106224
0.742739
0.742739
0.742739
0.742739
0.742739
0.711203
0
0.018792
0.378649
2,398
79
81
30.35443
0.789933
0
0
0.814286
0
0
0.109258
0.056714
0
0
0
0
0.014286
1
0.057143
false
0
0.057143
0
0.114286
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e994349285b23ce6c5e068c27b0e098c4f4c1cfd
1,352
py
Python
niimpy/EDA/setup_dataframe.py
niima-project/niimpy
975470507b1f8836d9e29d43601e345612b06a62
[ "MIT" ]
5
2021-11-23T12:05:23.000Z
2022-02-11T12:57:50.000Z
niimpy/EDA/setup_dataframe.py
niima-project/niimpy
975470507b1f8836d9e29d43601e345612b06a62
[ "MIT" ]
62
2021-07-16T09:17:18.000Z
2022-03-16T11:27:50.000Z
niimpy/EDA/setup_dataframe.py
niima-project/niimpy
975470507b1f8836d9e29d43601e345612b06a62
[ "MIT" ]
6
2021-09-07T13:06:57.000Z
2022-03-14T11:26:30.000Z
import pandas as pd def create_dataframe(): """Create a sample Pandas dataframe used by the test functions. Returns ------- df : pandas.DataFrame Pandas dataframe containing sample data. """ dti = pd.date_range("2018-01-01", periods=9, freq="H") d = {'user': ['user_1','user_2','user_3','user_4','user_5','user_6','user_7','user_8','user_9'], 'group': ['group_1','group_1','group_1','group_2','group_2','group_2','group_3','group_3','group_3'], 'col_1': [1, 2, 3,4,5,6,7,8,9], 'col_2': [10, 11, 12, 13, 14, 15, 16, 17, 18]} df = pd.DataFrame(data=d,index=dti) return df def create_categorical_dataframe(): """Create a sample Pandas dataframe used by the test functions. Returns ------- df : pandas.DataFrame Pandas dataframe containing sample data. """ dti = pd.date_range("2018-01-01", periods=9, freq="H") d = {'user': ['user_1','user_2','user_3','user_4','user_5','user_6','user_7','user_8','user_9'], 'group': ['group_1','group_1','group_1','group_2','group_2','group_2','group_3','group_3','group_3'], 'question': [1, 2, 3,4,5,6,7,8,9], 'answer': [10, 11, 12, 13, 14, 15, 16, 17, 18]} df = pd.DataFrame(data=d,index=dti) return df
30.727273
110
0.559172
209
1,352
3.411483
0.253589
0.126227
0.092567
0.067321
0.906031
0.906031
0.906031
0.906031
0.906031
0.880785
0
0.107212
0.241124
1,352
43
111
31.44186
0.587719
0.217456
0
0.588235
0
0
0.302846
0
0
0
0
0
0
1
0.117647
false
0
0.058824
0
0.294118
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7578d78059e784c2dfa85058ce711ca16c433e89
10,845
py
Python
sedinet_models.py
ericslevenson/SediNet
666ffaa5edc9b83d860aecaab309b12fc55600e9
[ "MIT" ]
null
null
null
sedinet_models.py
ericslevenson/SediNet
666ffaa5edc9b83d860aecaab309b12fc55600e9
[ "MIT" ]
null
null
null
sedinet_models.py
ericslevenson/SediNet
666ffaa5edc9b83d860aecaab309b12fc55600e9
[ "MIT" ]
null
null
null
## Written by Daniel Buscombe, ## MARDA Science ## daniel@mardascience.com ##> Release v1.3 (July 2020) ###=================================================== # import libraries from sedinet_utils import * ###=================================================== def conv_block2(inp, filters=32, bn=True, pool=True, drop=True): """ This function generates a SediNet convolutional block """ # _ = Conv2D(filters=filters, kernel_size=3, activation='relu', # kernel_initializer='he_uniform')(inp) _ = SeparableConv2D(filters=filters, kernel_size=3, activation='relu')(inp) #kernel_initializer='he_uniform' if bn: _ = BatchNormalization()(_) if pool: _ = MaxPool2D()(_) if drop: _ = Dropout(0.2)(_) return _ ###=================================================== def make_cat_sedinet(ID_MAP, dropout, greyscale): """ This function creates an implementation of SediNet for estimating sediment category """ base = BASE_CAT ##30 if greyscale==True: input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 1)) else: input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) _ = conv_block2(input_layer, filters=base, bn=False, pool=False, drop=False) #x # _ = conv_block2(_, filters=base*2, bn=False, pool=True,drop=False) _ = conv_block2(_, filters=base*3, bn=False, pool=True,drop=False) _ = conv_block2(_, filters=base*4, bn=False, pool=True,drop=False) if not SHALLOW: _ = conv_block2(_, filters=base*5, bn=False, pool=True,drop=False) _ = conv_block2(_, filters=base*6, bn=False, pool=True,drop=False) bottleneck = GlobalMaxPool2D()(_) bottleneck = Dropout(dropout)(bottleneck) # for class prediction _ = Dense(units=CAT_DENSE_UNITS, activation='relu')(bottleneck) ##128 output = Dense(units=len(ID_MAP), activation='softmax', name='output')(_) model = Model(inputs=input_layer, outputs=[output]) if CAT_LOSS == 'focal': model.compile(optimizer=OPT, loss={'output': tfa.losses.SigmoidFocalCrossEntropy() }, metrics={'output': 'accuracy'}) else: model.compile(optimizer=OPT, #'adam', loss={'output': CAT_LOSS}, #'categorical_crossentropy' metrics={'output': 'accuracy'}) print("==========================================") print('[INFORMATION] Model summary:') model.summary() return model ###=================================================== def make_sedinet_siso_simo(vars, greyscale, dropout): """ This function creates an implementation of SediNet for estimating sediment metric on a continuous scale """ base = BASE_CONT ##30 ## suggested range = 20 -- 40 if greyscale==True: input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 1)) else: input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) _ = conv_block2(input_layer, filters=base, bn=False, pool=False, drop=False) #x # _ = conv_block2(_, filters=base*2, bn=False, pool=True,drop=False) _ = conv_block2(_, filters=base*3, bn=False, pool=True,drop=False) _ = conv_block2(_, filters=base*4, bn=False, pool=True,drop=False) _ = conv_block2(_, filters=base*5, bn=False, pool=True,drop=False) if not SHALLOW: _ = conv_block2(_, filters=base*6, bn=False, pool=True,drop=False) _ = conv_block2(_, filters=base*7, bn=False, pool=True,drop=False) _ = BatchNormalization(axis=-1)(_) bottleneck = GlobalMaxPool2D()(_) bottleneck = Dropout(dropout)(bottleneck) units = CONT_DENSE_UNITS ## suggested range 512 -- 1024 _ = Dense(units=units, activation='relu')(bottleneck) outputs = [] for var in vars: outputs.append(Dense(units=1, activation='linear', name=var+'_output')(_) ) if CONT_LOSS == 'pinball': loss = dict(zip([k+"_output" for k in vars], [tfa.losses.PinballLoss(tau=.5) for k in vars])) else: ## 'mse' loss = dict(zip([k+"_output" for k in vars], ['mse' for k in vars])) #loss = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) # Sum of squared error metrics = dict(zip([k+"_output" for k in vars], ['mae' for k in vars])) model = Model(inputs=input_layer, outputs=outputs) model.compile(optimizer=OPT,loss=loss, metrics=metrics) #print("==========================================") #print('[INFORMATION] Model summary:') #model.summary() return model # ###=================================================== # def conv_block_mbn(x, filters=32, alpha=1): # """ # This function generates a sedinet convolutional block based on a # mobilenet base model # """ # x = DepthwiseConv2D((3, 3), strides=(1, 1), padding='same', use_bias=False)(x) # x = BatchNormalization()(x) # x = Activation('relu')(x) # x = Conv2D(int(filters * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x) # x = BatchNormalization()(x) # x = Activation('relu')(x) # return x # ###=================================================== # def make_mlp(dim): #dense_neurons # # define our MLP network # dense_neurons = 4 # mlp = Sequential() # mlp.add(Dense(8, input_dim=dim, activation="relu")) # mlp.add(Dense(dense_neurons, activation="relu")) # return mlp # ###=================================================== # def conv_block(x, filters=32): # """ # This function generates a custom sedinet convolutional block # """ # x = Conv2D(filters=filters, kernel_size=3, activation='relu', # kernel_initializer='he_uniform')(x) # #x = BatchNormalization()(x) # x = MaxPool2D()(x) # #x = Dropout(0.2)(x) # return x # # ###=================================================== # def make_sedinet_miso_mimo(greyscale, dropout): # """ # This function creates a mobilenetv1 style implementation of sedinet # for estimating metric on a continuous scale # """ # # # create the sedinet model # if greyscale==True: # input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 1)) # else: # input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) # # img_input = BatchNormalization(axis=-1)(input_layer) #x # # alpha=1 # # x = Conv2D(int(32 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(img_input) # x = BatchNormalization()(x) # x = Activation('relu')(x) # # for k in [64,128,128,256,256,512]: # x = conv_block_mbn(x, filters=k, alpha=alpha) # # if not SHALLOW: # for i in range(5): # x = conv_block_mbn(x, filters=512, alpha=alpha) # # for k in [1024,1024]: # x = conv_block_mbn(x, filters=k, alpha=alpha) # # x = MaxPool2D()(x) # # x = BatchNormalization(axis=-1)(x) # bottleneck = GlobalMaxPool2D()(x) # bottleneck = Dropout(dropout)(bottleneck) # # model = Model(input_layer, bottleneck) # # return model # ######### ####=================================================== #def make_sedinet_custom_siso_simo(vars, greyscale): # """ # This function creates a custom implementation of sedinet # for estimating metric on a continuous scale # """ # # base = 16 # if greyscale==True: # input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 1)) # else: # input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) # input_layer = BatchNormalization(axis=-1)(input_layer) # # x = conv_block(input_layer, filters=base) # x = conv_block(x, filters=base*2) # x = conv_block(x, filters=base*3) # x = conv_block(x, filters=base*4) # # x = BatchNormalization(axis=-1)(x) # bottleneck = GlobalMaxPool2D()(x) # bottleneck = Dropout(dropout)(bottleneck) # units = 1024 # x = Dense(units=units, activation='relu')(bottleneck) # outputs = [] # for var in vars: # outputs.append(Dense(units=1, activation='linear', name=var+'_output')(x) ) # loss = dict(zip([k+"_output" for k in vars], ['mse' for k in vars])) # metrics = dict(zip([k+"_output" for k in vars], ['mae' for k in vars])) # model = Model(inputs=input_layer, outputs=outputs) # model.compile(optimizer=opt, loss=loss, metrics=metrics) # #print("==========================================") # #print('[INFORMATION] Model summary:') # #model.summary() # return model ####=================================================== #def make_sedinet_siso_simo(vars, greyscale, dropout): # """ # This function creates a mobilenetv1 style implementation of sedinet # for estimating metric on a continuous scale # """ # if greyscale==True: # input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 1)) # else: # input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) # # img_input = BatchNormalization(axis=-1)(input_layer) # alpha=1 # # x = Conv2D(int(32 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(img_input) # x = BatchNormalization()(x) # x = Activation('relu')(x) # # for k in [64,128,128,256,256,512]: # x = conv_block_mbn(x, filters=k, alpha=alpha) # if not shallow: # for i in range(5): # x = conv_block_mbn(x, filters=512, alpha=alpha) # for k in [1024,1024]: # x = conv_block_mbn(x, filters=k, alpha=alpha) # # x = MaxPool2D()(x) # # x = BatchNormalization(axis=-1)(x) # bottleneck = GlobalMaxPool2D()(x) # bottleneck = Dropout(dropout)(bottleneck) # units = 1024 # x = Dense(units=units, activation='relu')(bottleneck) # outputs = [] # for var in vars: # outputs.append(Dense(units=1, activation='linear', name=var+'_output')(x) ) # loss = dict(zip([k+"_output" for k in vars], ['mse' for k in vars])) # metrics = dict(zip([k+"_output" for k in vars], ['mae' for k in vars])) # model = Model(inputs=input_layer, outputs=outputs) # model.compile(optimizer=opt, loss=loss, metrics=metrics) # #print("==========================================") # #print('[INFORMATION] Model summary:') # #model.summary() # return model ####=================================================== #def make_sedinet_custom_miso_mimo(vars, greyscale): # """ # This function creates a custom implementation of sedinet for estimating metric on a continuous scale # """ # # base = 16 # if greyscale==True: # input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 1)) # else: # input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) # # input_layer = BatchNormalization(axis=-1)(input_layer) # # x = conv_block(input_layer, filters=base) # x = conv_block(x, filters=base*2) # x = conv_block(x, filters=base*3) # x = conv_block(x, filters=base*4) # # x = BatchNormalization(axis=-1)(x) # bottleneck = GlobalMaxPool2D()(x) # bottleneck = Dropout(dropout)(bottleneck) # model = Model(input_layer, bottleneck) # return model #
33.266871
182
0.59207
1,319
10,845
4.721001
0.134951
0.044965
0.017344
0.022483
0.805043
0.788341
0.751405
0.730047
0.730047
0.72555
0
0.0237
0.194652
10,845
325
183
33.369231
0.68926
0.644444
0
0.485714
0
0
0.052676
0.011831
0
0
0
0
0
1
0.042857
false
0
0.014286
0
0.1
0.028571
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
757eef646153897c5eb1e3d36382831f9528d6fc
201
py
Python
util.py
baumartig/paperboy
01659cda235508eac66a50a9c16c4a6c531015bd
[ "Apache-2.0" ]
3
2015-02-26T06:39:40.000Z
2017-07-04T14:56:18.000Z
util.py
baumartig/paperboy
01659cda235508eac66a50a9c16c4a6c531015bd
[ "Apache-2.0" ]
null
null
null
util.py
baumartig/paperboy
01659cda235508eac66a50a9c16c4a6c531015bd
[ "Apache-2.0" ]
1
2018-02-21T00:12:06.000Z
2018-02-21T00:12:06.000Z
from datetime import datetime TIME_FORMAT = "%H:%M" def parseTime(time_str): return datetime.strptime(time_str, TIME_FORMAT) def formatTime(time): return datetime.strftime(time, TIME_FORMAT)
22.333333
51
0.766169
28
201
5.321429
0.5
0.201342
0
0
0
0
0
0
0
0
0
0
0.134328
201
9
52
22.333333
0.856322
0
0
0
0
0
0.024752
0
0
0
0
0
0
1
0.333333
false
0
0.166667
0.333333
0.833333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
75a14cfe275cfd7b52e98aee717ef95c74694825
305
py
Python
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_14_25Mug.py
THU-DA-6D-Pose-Group/self6dpp
c267cfa55e440e212136a5e9940598720fa21d16
[ "Apache-2.0" ]
33
2021-12-15T07:11:47.000Z
2022-03-29T08:58:32.000Z
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_14_25Mug.py
THU-DA-6D-Pose-Group/self6dpp
c267cfa55e440e212136a5e9940598720fa21d16
[ "Apache-2.0" ]
3
2021-12-15T11:39:54.000Z
2022-03-29T07:24:23.000Z
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_14_25Mug.py
THU-DA-6D-Pose-Group/self6dpp
c267cfa55e440e212136a5e9940598720fa21d16
[ "Apache-2.0" ]
null
null
null
_base_ = "./FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_01_02MasterChefCan.py" OUTPUT_DIR = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/14_25Mug" DATASETS = dict(TRAIN=("ycbv_025_mug_train_pbr",))
76.25
136
0.891803
42
305
5.785714
0.690476
0.090535
0.213992
0.304527
0.559671
0.559671
0.559671
0.559671
0.559671
0.559671
0
0.10473
0.029508
305
3
137
101.666667
0.716216
0
0
0
0
0
0.813115
0.813115
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
75ab975cf247b8f45dd1a2564d8b207512ff9472
96
py
Python
venv/lib/python3.8/site-packages/rope/base/utils/__init__.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/rope/base/utils/__init__.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/rope/base/utils/__init__.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/69/aa/eb/d77b3fbb320d936e4d512a773780cc6e095a21ea982eba91b630762c30
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.416667
0
96
1
96
96
0.479167
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
75d372d222449289161b9bc5a3c26c7765d62ab4
306
py
Python
bert/example/exceptions.py
jbcurtin/bert
956e1647b590ac13b679579231b085895778d807
[ "MIT" ]
2
2019-08-28T21:39:50.000Z
2019-12-17T10:53:28.000Z
bert/example/exceptions.py
jbcurtin/bert
956e1647b590ac13b679579231b085895778d807
[ "MIT" ]
19
2019-09-04T21:19:12.000Z
2021-03-28T22:10:32.000Z
bert/example/exceptions.py
jbcurtin/bert
956e1647b590ac13b679579231b085895778d807
[ "MIT" ]
1
2019-08-28T21:39:53.000Z
2019-08-28T21:39:53.000Z
from bert import exceptions as bert_exceptions class ExampleException(bert_exceptions.BertException): pass class ProjectNameRequiredException(ExampleException): pass class DirectoryExistsException(ExampleException): pass class ProjectRepoInvalidFormatException(ExampleException): pass
20.4
58
0.830065
25
306
10.08
0.48
0.107143
0.198413
0
0
0
0
0
0
0
0
0
0.127451
306
14
59
21.857143
0.94382
0
0
0.444444
0
0
0
0
0
0
0
0
0
1
0
true
0.444444
0.111111
0
0.555556
0
1
0
1
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
6
75d5c58285aa5d40ee07f4c0248e61beeeebcaf8
170
py
Python
consumer_credit_card.py
muhammeedsari/filter_project
2de182711851e44d91a91d69dd453020b4a4ca65
[ "MIT" ]
null
null
null
consumer_credit_card.py
muhammeedsari/filter_project
2de182711851e44d91a91d69dd453020b4a4ca65
[ "MIT" ]
null
null
null
consumer_credit_card.py
muhammeedsari/filter_project
2de182711851e44d91a91d69dd453020b4a4ca65
[ "MIT" ]
null
null
null
from controller.credit_card_controller import CreditCardController credit_card_controller = CreditCardController() credit_card_controller.create_credit_card_customer()
28.333333
66
0.9
18
170
8
0.444444
0.277778
0.416667
0.555556
0
0
0
0
0
0
0
0
0.058824
170
5
67
34
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
75fc0082314197d65e6f500ebdf50f159f5ab530
165,816
py
Python
QChemTool/Development/polarization.py
slamavl/QChemTool
b6b17adf6cfa8ac1db47acba93aab1ee49c1be47
[ "MIT" ]
null
null
null
QChemTool/Development/polarization.py
slamavl/QChemTool
b6b17adf6cfa8ac1db47acba93aab1ee49c1be47
[ "MIT" ]
1
2018-01-03T12:08:41.000Z
2018-01-03T12:08:41.000Z
QChemTool/Development/polarization.py
slamavl/QChemTool
b6b17adf6cfa8ac1db47acba93aab1ee49c1be47
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Jan 31 14:33:56 2017 @author: Vladislav Sláma """ import numpy as np from copy import deepcopy from scipy.spatial.distance import pdist,squareform import os from ..QuantumChem.Classes.structure import Structure from ..QuantumChem.calc import identify_molecule from ..QuantumChem.read_mine import read_TrEsp_charges from ..QuantumChem.interaction import charge_charge from ..QuantumChem.positioningTools import project_on_plane, CenterMolecule, fit_plane from ..General.units import conversion_facs_energy, conversion_facs_mass from .electrostatic import PrepareMolecule_1Def as ElStat_PrepareMolecule_1Def from .electrostatic import PrepareMolecule_2Def as ElStat_PrepareMolecule_2Def from ..General.Potential import potential_charge, potential_dipole from ..QuantumChem.Classes.general import Energy as EnergyClass from ..General.UnitsManager import energy_units from ..QuantumChem.calc import GuessBonds from ..QuantumChem.output import OutputMathematica debug=False #============================================================================== # Definition of class for polarizable environment #============================================================================== class Dielectric: ''' Class managing dielectric properties of the material Parameters ---------- coor : numpy.array of real (dimension Nx3) where N is number of atoms origin of density grid polar : numpy.array or list of real (dimension N) Polarizabilities for every atom charge : numpy.array or list of real (dimension N) charges on individual atoms (initial charges) dipole : numpy.array of real (dimension Nx3) dipole on individual atoms (initial dipole) ''' def __init__(self,coor,pol_type,charge,dipole,AlphaE,Alpha_E,Alpha_st,BetaEE,V,CoarseGrain=None): self.coor=np.copy(coor) self.polar={} self.polar['AlphaE']=AlphaE self.polar['Alpha_E']=Alpha_E self.polar['BetaEE']=BetaEE self.polar['Alpha_st']=Alpha_st self.VinterFG=V self.charge=np.copy(charge) self.dipole=np.copy(dipole) self.at_type=pol_type self.coarse_grain = CoarseGrain self.Nat=len(coor) def assign_polar(self,**kwargs): ''' For now assignment is working only for fluorographene carbons with type 'CF' and defect carbons with type 'CD' Parameters ---------- pol_type : numpy.array or list of str (dimension N) Polarization atomic types for assign of polarizabilities - diferent from atomic types - for example group C-F will be treated as single atom and type will be pol_type='CF'. **kwargs : dict dictionary with three matrixes for every polarizable atom type. For example: kwargs['PolValues']['CF'][0] is Alpha(E) polarizability matrix for atom tyle 'CF'. [1] correspond to Alpha(-E) matrix and [2] to Beta(E,E) Returns ------- polar : numpy.array or list of real (dimension N) Polarizabilities for every atom. 'CF'=1.03595 and 'CD'=1.4 ''' ZeroM=np.zeros((3,3),dtype='f8') PolValues={'CF': [ZeroM,ZeroM,ZeroM,ZeroM], 'CD': [ZeroM,ZeroM,ZeroM,ZeroM],'C': [ZeroM,ZeroM,ZeroM,ZeroM]} for key in list(kwargs.keys()): if key=='PolValues': PolValues=kwargs['PolValues'] #print(PolValues) pol_type = self.at_type if self.Nat!=len(pol_type): raise IOError('Polarization type vector must have the same length as number of atoms') polar={} polar['AlphaE']=np.zeros((self.Nat,3,3),dtype='f8') polar['Alpha_E']=np.zeros((self.Nat,3,3),dtype='f8') polar['BetaEE']=np.zeros((self.Nat,3,3),dtype='f8') polar['Alpha_st']=np.zeros((self.Nat,3,3),dtype='f8') for ii in range(len(pol_type)): polar['AlphaE'][ii,:,:]=PolValues[pol_type[ii]][0] polar['Alpha_E'][ii,:,:]=PolValues[pol_type[ii]][1] polar['BetaEE'][ii,:,:]=PolValues[pol_type[ii]][2] polar['Alpha_st'][ii,:,:]=PolValues[pol_type[ii]][3] return polar def get_distance_matrixes(self): # calculation of tensors with interatomic distances R=np.zeros((self.Nat,self.Nat,3),dtype='f8') # mutual distance vectors for ii in range(self.Nat): for jj in range(ii+1,self.Nat): R[ii,jj,:]=self.coor[ii]-self.coor[jj] R[jj,ii,:]=-R[ii,jj,:] RR=np.sqrt(np.power(R[:,:,0],2)+np.power(R[:,:,1],2)+np.power(R[:,:,2],2)) # mutual distances return R,RR def get_T_tensor(self,R=None,RR=None,RR3=None,RR5=None): if R is None: R,RR = self.get_distance_matrixes(self) RR=RR+np.identity(self.Nat) # only for avoiding ddivision by 0 for diagonal elements RR3=np.power(RR,3) RR5=np.power(RR,5) T=np.zeros((self.Nat,self.Nat,3,3),dtype='f8') # mutual distance vectors for ii in range(3): T[:,:,ii,ii]=1/RR3[:,:]-3*np.power(R[:,:,ii],2)/RR5 for jj in range(ii+1,3): T[:,:,ii,jj] = -3*R[:,:,ii]*R[:,:,jj]/RR5 T[:,:,jj,ii] = T[:,:,ii,jj] for ii in range(self.Nat): T[ii,ii,:,:]=0.0 # no self interaction of atom i with atom i return T def get_S_tensor(self,R=None,RR=None,RR5=None): if R is None: R,RR = self.get_distance_matrixes(self) RR=RR+np.identity(self.Nat) # only for avoiding ddivision by 0 for diagonal elements RR5=np.power(RR,5) RR7=np.power(RR,7) # definition of S tensor S=np.zeros((self.Nat,self.Nat,3,3,3),dtype='f8') # mutual distance vectors for ii in range(3): for jj in range(3): for kk in range(3): S[:,:,ii,jj,kk]=-5*R[:,:,ii]*R[:,:,jj]*R[:,:,kk]/RR7 for ii in range(3): for jj in range(3): S[:,:,ii,ii,jj]+=R[:,:,jj]/RR5 S[:,:,ii,jj,ii]+=R[:,:,jj]/RR5 S[:,:,jj,ii,ii]+=R[:,:,jj]/RR5 for ii in range(self.Nat): S[ii,ii,:,:,:]=0.0 # no self interaction of atom i with atom i return S def _test_2nd_order(self,typ,Estatic=np.zeros(3,dtype='f8'),eps=1): ''' Function for testing of calculation with induced dipoles. Calculate induced dipoles in second order (by induced dipoles). Combined with calc_dipoles_All(typ,NN=1) we should obtain the same dipoles as with calc_dipoles_All(typ,NN=2) Parameters ---------- typ : str ('AlphaE','Alpha_E','BetaEE') Specifies which polarizability is used for calculation of induced atomic dipoles Estatic : numpy.array of real (dimension 3) (optional - init=np.zeros(3,dtype='f8')) External homogeneous electric fiel vectord (orientation and strength) in ATOMIC UNITS. By default there is no electric field eps : real (optional - init=1.0) Relative dielectric polarizability of medium where the dipoles and molecule is present ( by default vacuum with relative permitivity 1.0) Notes ---------- **OK. Definition of Tensor T is right** ''' debug=False R=np.zeros((self.Nat,self.Nat,3),dtype='f8') # mutual distance vectors P=np.zeros((self.Nat,3),dtype='f8') for ii in range(self.Nat): for jj in range(ii+1,self.Nat): R[ii,jj,:]=self.coor[ii]-self.coor[jj] R[jj,ii,:]=-R[ii,jj,:] RR=np.sqrt(np.power(R[:,:,0],2)+np.power(R[:,:,1],2)+np.power(R[:,:,2],2)) # mutual distances unit=np.diag([1]*self.Nat) RR=RR+unit # only for avoiding ddivision by 0 for diagonal elements RR3=np.power(RR,3) RR5=np.power(RR,5) # definition of T tensor T=np.zeros((self.Nat,self.Nat,3,3),dtype='f8') # mutual distance vectors for ii in range(3): T[:,:,ii,ii]=1/RR3[:,:]-3*np.power(R[:,:,ii],2)/RR5 for jj in range(ii+1,3): T[:,:,ii,jj] = -3*R[:,:,ii]*R[:,:,jj]/RR5 T[:,:,jj,ii] = T[:,:,ii,jj] for ii in range(self.Nat): T[ii,ii,:,:]=0.0 # no self interaction of atom i with atom i # calculating induced dipoles in second order Q=np.meshgrid(self.charge,self.charge)[0] # in columns same charges ELF=np.zeros((self.Nat,self.Nat,3),dtype='f8') for jj in range(3): ELF[:,:,jj]=(Q/RR3)*R[:,:,jj] # ELF[i,j,:] is electric field at position i generated by atom j for ii in range(self.Nat): ELF[ii,ii,:]=np.zeros(3,dtype='f8') ELFV=np.array(np.sum(ELF,axis=1),dtype='f8') # ELFV[i,:] is electric field at position of atom i for ii in range(self.Nat): P[ii,:]=np.dot(self.polar[typ][ii],ELFV[ii,:]) if debug and typ=='AlphaE': from ..General.Potential import ElField_dipole # Test first order induced dipoles self.dipole=np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('AlphaE',NN=1) if np.allclose(P,self.dipole): print('First order dipoles are the same.') else: print('Problem with first order induced dipoles.') # test induced electric field Elfield=np.zeros(3,dtype='f8') for ii in range(3): Elfield[ii]=np.dot(-T[0,1,ii,:],P[1,:]) print('Electric field at atom 0 induced by dipole at position 1 wT:',Elfield) Elfield=np.zeros(3,dtype='f8') Elfield=ElField_dipole(P[1,:],R[0,1,:]) print('Electric field at atom 0 induced by dipole at position 1 woT:',Elfield) ELFV=np.zeros((self.Nat,3),dtype='f8') for ii in range(3): for jj in range(3): ELFV[:,ii]+=np.dot(T[:,:,ii,jj],P[:,jj]) for ii in range(self.Nat): P[ii,:]=np.dot(self.polar[typ][ii],ELFV[ii,:]) # -P should be 2nd order induced dipoles self.dipole+=(-P) if debug: print('Dipole sum:',np.sum(self.dipole,axis=0)) # TODO: Add possibility for NN = -err to calculate dipoles until convergence is reached def _calc_dipoles_All(self,typ,Estatic=np.zeros(3,dtype='f8'),NN=60,eps=1,debug=False): ''' Function for calculation induced dipoles of SCF procedure for interaction of molecule with environment. It calculates induced dipoles on individual atoms by static charge distribution and homogeneous electric field. Parameters ---------- typ : str ('AlphaE','Alpha_E','BetaEE') Specifies which polarizability is used for calculation of induced atomic dipoles Estatic : numpy.array of real (dimension 3) (optional - init=np.zeros(3,dtype='f8')) External homogeneous electric fiel vectord (orientation and strength) in ATOMIC UNITS. By default there is no electric field NN : integer (optional - init=60) Number of SCF steps for calculation of induced dipole eps : real (optional - init=1.0) Relative dielectric polarizability of medium where the dipoles and molecule is present ( by default vacuum with relative permitivity 1.0) ''' if debug: import timeit time0 = timeit.default_timer() #R=np.zeros((self.Nat,self.Nat,3),dtype='f8') # mutual distance vectors #P=np.zeros((self.Nat,self.Nat,3),dtype='f8') #for ii in range(self.Nat): # for jj in range(ii+1,self.Nat): # R[ii,jj,:]=self.coor[ii]-self.coor[jj] # R[jj,ii,:]=-R[ii,jj,:] #if debug: # time01 = timeit.default_timer() #RR=np.sqrt(np.power(R[:,:,0],2)+np.power(R[:,:,1],2)+np.power(R[:,:,2],2)) # mutual distances R = np.tile(self.coor,(self.Nat,1,1)) R = (np.swapaxes(R,0,1) - R) RR=squareform(pdist(self.coor)) if 0: RR=np.sqrt(np.power(R[:,:,0],2)+np.power(R[:,:,1],2)+np.power(R[:,:,2],2)) RR2=squareform(pdist(self.coor)) print((RR2==RR).all()) # False print(np.allclose(RR2,RR)) # True if not (RR2==RR).all(): print(RR[0,1]) print(pdist(self.coor)[0]) print(RR[0,2]) print(pdist(self.coor)[1]) if debug: time01 = timeit.default_timer() unit=np.diag([1]*self.Nat) RR=RR+unit # only for avoiding ddivision by 0 for diagonal elements RR3=np.power(RR,3) RR5=np.power(RR,5) #mask=[] #for ii in range(len(self.charge)): # if abs(self.charge[ii])>1e-8: # mask.append(ii) mask=(np.abs(self.charge)>1e-8) mask=np.expand_dims(mask, axis=0) MASK=np.dot(mask.T,mask) MASK=np.tile(MASK,(3,1,1)) # np.shape(mask)=(3,N,N) True all indexes where are both non-zero charges MASK=np.rollaxis(MASK,0,3) MASK2=np.diag(np.ones(self.Nat,dtype='bool')) MASK2=np.tile(MASK2,(3,1,1)) MASK2=np.rollaxis(MASK2,0,3) Q=np.meshgrid(self.charge,self.charge)[0] # in columns same charges #ELF=np.zeros((self.Nat,self.Nat,3),dtype='f8') #ELF_Q=(Q/RR3)*np.rollaxis(R,2) #ELF_Q=np.rollaxis(ELF,0,3) if debug: time1 = timeit.default_timer() print('Time spend on preparation of variables in calc_dipoles_All:',time1-time0,'s') for kk in range(NN): # point charge electric field ELF=(Q/RR3)*np.rollaxis(R,2) ELF=np.rollaxis(ELF,0,3) #for jj in range(3): # ELF[:,:,jj]=(Q/RR3)*R[:,:,jj] # ELF[i,j,:] is electric field at position i generated by atom j - on diagonal there are zeros # TODO: Change this procedure because atoms with a charges could be polarized by all atoms with charges - but imput defect charges should be fitted accordingly with polarizable atoms # polarization by static charges only in area without charges: #for ii in mask: # ELF[ii,mask,:]=0.0 ELF[MASK]=0.0 # dipole electric field #for ii in range(self.Nat): # P[ii,:,:]=self.dipole[:,:] P=np.tile(self.dipole[:,:],(self.Nat,1,1)) # P[ii,:,:]=self.dipole[:,:] for ii going through all atoms PR=np.sum(np.multiply(P,R),axis=2) # TODO: This takes One second - make it faster for jj in range(3): ELF[:,:,jj]+=(3*PR/RR5)*R[:,:,jj] ELF[:,:,jj]-=P[:,:,jj]/RR3 #for ii in range(self.Nat): # ELF[ii,ii,:]=np.zeros(3,dtype='f8') ELF[MASK2]=0.0 elf=np.sum(ELF,axis=1)/eps # TODO: Think if this could be done in some efficient way for ii in range(self.Nat): self.dipole[ii,:]=np.dot(self.polar[typ][ii],elf[ii]+Estatic) if debug: print('Dipole sum:',np.sum(self.dipole,axis=0)) if debug: time2 = timeit.default_timer() print('Time spend on calculation in calc_dipoles_All:',time2-time1,'s') print('Calculation vs preparation ratio:',(time2-time1)/(time1-time0)) print('Time for filling coordinate matrix vs all the rest:',(time01-time0)/(time1-time01)) def _get_interaction_energy(self,index,charge=None,debug=False): ''' Function calculates interaction energy between atoms defined in index and the rest of the atoms Parameters ---------- index : list of int (dimension N) List of atoms where we would like to calculate potential and for which we would like to calculate interaction energy with the rest of the system charge : numpy.array of real (dimension Natoms_of_defect) Atomic trasition charges (TrEsp charges) for every atom of one defect defined by `index` Returns ------- InterE : real Interaction energies in atomic units (Hartree) ''' if isinstance(charge,np.ndarray) or isinstance(charge,list): use_orig_charges=False else: if charge==None: use_orig_charges=True else: raise IOError('Unable to determine charges') if use_orig_charges: charge=np.zeros(len(index),dtype='f8') # coppy charges and assign zero charges to those in index AllCharge=np.copy(self.charge) AllDipole=np.copy(self.dipole) for ii in range(self.Nat): if ii in index: if use_orig_charges: charge[np.where(index==ii)[0][0]]=AllCharge[ii] AllCharge[ii]=0.0 AllDipole[ii,:]=np.zeros(3,dtype='f8') InterE=0.0 # TODO: This distance matrix R is calculated many times - it would be faster to have it as global variable # TODO: Check if this filling of whole matrix and then taking only small slice is not slower than two for cycles only through relevant pairs # Fill matrix of interatomic vectors: R = np.tile(self.coor,(self.Nat,1,1)) R = (R - np.swapaxes(R,0,1)) # R[ii,jj,:]=self.coor[jj]-self.coor[ii] # Correct regions with zero distance if (AllCharge[index]==0.0).all(): R[index,index,0]=1.0 # it is small distance but it will be always multiplied by zero and therefore it wont influent total potential else: R[index,index,0]=1e20 # large distance to have a small norm in order not ti influent the total potential (these atoms should be excluded) # Take only slice of the matrix R[:,jj,:] where jj corespond to indexes R=R[:,index,:] pot_charge=potential_charge(AllCharge,R) pot_dipole=potential_dipole(AllDipole,R) # TODO: Move to test part if debug: print('Length of index list:',len(index)) print('Shape of coor matrix:',R.shape) #print('Coor 0,0:',R[0,0]) #print('Coor 0,1:',R[0,1]) #print('Coor 0,2:',R[0,2]) #print('Coor 2,3:',R[2,3]) potential_charge_test=np.zeros(len(index),dtype='f8') potential_dipole_test=np.zeros(len(index),dtype='f8') #print(pot_charge) for jj in range(len(index)): for ii in range(self.Nat): if ii!=index[jj]: R=self.coor[index[jj]]-self.coor[ii] #if jj==0 and ii==0: # print('Coor 0,0:',R) #if jj==1 and ii==0: # print('Coor 0,1:',R) #if jj==2 and ii==0: # print('Coor 0,2:',R) #if jj==3 and ii==2: # print('Coor 2,3:',R) potential_charge_test[jj]+=potential_charge(AllCharge[ii],R) potential_dipole_test[jj]+=potential_dipole(AllDipole[ii],R) #print(potential_test) print(pot_dipole) print(potential_dipole_test) if np.allclose(potential_charge_test,pot_charge): print('Potential generated by charges is the same for old and new calculation') else: raise Warning('Potentials generated by charges are different for both methods') if np.allclose(potential_dipole_test,pot_dipole): print('Potential generated by dipoles is the same for old and new calculation') else: raise Warning('Potentials generated by dipoles are different for both methods') for jj in range(len(index)): potential=0.0 for ii in range(self.Nat): if ii!=index[jj]: R=self.coor[index[jj]]-self.coor[ii] potential+=potential_charge(AllCharge[ii],R) potential+=potential_dipole(AllDipole[ii],R) InterE+=potential*charge[jj] if np.allclose(InterE,np.dot(charge,pot_charge+pot_dipole)): print('Interaction energy is calculated correctly') else: raise Warning('Interaction energy for both methods is different') InterE = np.dot(charge, pot_charge+pot_dipole) return InterE def _fill_Polar_matrix(self,index1,index2,typ='AlphaE',order=80,debug=False): """ Calculate polarization matrix representation for interaction energy calculation. Parameters --------- index1 : list of integer (dimension Natoms_defect1) Indexes of all atoms from the first defect (starting from 0) index2 : list of integer (dimension Natoms_defect2) Indexes of all atoms from the second defect (starting from 0) typ : string (optional init = 'AlphaE') Which polarizability should be used for calculation of induced dipoles. Supported types are: ``'AlphaE'``, ``'Alpha_E'`` and ``'BetaEE'`` order : integer (optional - init=80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 Returns ------- PolMAT : numpy array of float (dimension 2x2) Polarizability matrix representation. For ``typ='AlphaE'`` or ``typ='BetaEE': PolMAT[0,0] = -E(1)*induced_dipole(1), PolMAT[0,1] = PolMAT[1,0] = -E(1)*induced_dipole(2) and PolMAT[1,1] = -E(2)*induced_dipole(2). For ``typ='Alpha_E'`` diagonal elements are swapped: PolMAT[0,0] = -E(2)*induced_dipole(2), PolMAT[0,1] = PolMAT[1,0] = -E(1)*induced_dipole(2) and PolMAT[1,1] = -E(1)*induced_dipole(1) dipolesA : numpy array of float (dimension 3) Total induced dipole moment in the environment by the first defect. dipolesB : numpy array of float (dimension 3) Total induced dipole moment in the environment by the second defect. dipoles_polA : numpy array of float (dimension Natoms x 3) Induced atomic dipole moments for all atoms in the environment by the first defect """ if typ=='BetaEE' and order>1: raise IOError('For calculation with beta polarization maximal order is 1') elif typ=='BetaEE' and order<1: return np.zeros((2,2),dtype='f8') defA_charge=self.charge[index1] defB_charge=self.charge[index2] defA_indx=deepcopy(index1) defB_indx=deepcopy(index2) PolMAT=np.zeros((2,2),dtype='f8') E_TrEsp=self.get_TrEsp_Eng(index1, index2) if debug: print(typ,order) # Polarization by molecule B self.charge[defA_indx]=0.0 self._calc_dipoles_All(typ,NN=order,eps=1,debug=False) dipolesB=np.sum(self.dipole,axis=0) # induced dipoles by second defect (defect B) self.charge[defA_indx]=defA_charge PolMAT[1,1] = self._get_interaction_energy(defB_indx,charge=defB_charge,debug=False) - E_TrEsp PolMAT[0,1] = self._get_interaction_energy(defA_indx,charge=defA_charge,debug=False) - E_TrEsp PolMAT[1,0] = PolMAT[0,1] dipoles_polB = self.dipole.copy() self.dipole=np.zeros((self.Nat,3),dtype='f8') # Polarization by molecule A self.charge[defB_indx]=0.0 self._calc_dipoles_All(typ,NN=order,eps=1,debug=False) dipolesA=np.sum(self.dipole,axis=0) self.charge[defB_indx]=defB_charge PolMAT[0,0] = self._get_interaction_energy(defA_indx,charge=defA_charge,debug=False) - E_TrEsp if debug: print(PolMAT*conversion_facs_energy["1/cm"]) if np.isclose(self._get_interaction_energy(defB_indx,charge=defB_charge,debug=False)-E_TrEsp,PolMAT[1,0]): print('ApB = BpA') else: raise Warning('ApB != BpA') dipoles_polA = self.dipole.copy() self.dipole=np.zeros((self.Nat,3),dtype='f8') if typ=='AlphaE' or typ=='BetaEE' or typ=='Alpha_st': return PolMAT,dipolesA,dipolesB,dipoles_polA,dipoles_polB elif typ=='Alpha_E': PolMAT[[0,1],[0,1]] = PolMAT[[1,0],[1,0]] # Swap AlphaMAT[0,0] with AlphaMAT[1,1] return PolMAT,dipolesA,dipolesB,dipoles_polA,dipoles_polB def _TEST_fill_Polar_matrix(self,index1,index2,typ='AlphaE',order=80,debug=False, out_pot=False): """ Calculate polarization matrix representation for interaction energy calculation. Parameters --------- index1 : list of integer (dimension Natoms_defect1) Indexes of all atoms from the first defect (starting from 0) index2 : list of integer (dimension Natoms_defect2) Indexes of all atoms from the second defect (starting from 0) typ : string (optional init = 'AlphaE') Which polarizability should be used for calculation of induced dipoles. Supported types are: ``'AlphaE'``, ``'Alpha_E'`` and ``'BetaEE'`` order : integer (optional - init=80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 Returns ------- PolMAT : numpy array of float (dimension 2x2) Polarizability matrix representation. For ``typ='AlphaE'`` or ``typ='BetaEE': PolMAT[0,0] = -E(1)*induced_dipole(1), PolMAT[0,1] = PolMAT[1,0] = -E(1)*induced_dipole(2) and PolMAT[1,1] = -E(2)*induced_dipole(2). For ``typ='Alpha_E'`` diagonal elements are swapped: PolMAT[0,0] = -E(2)*induced_dipole(2), PolMAT[0,1] = PolMAT[1,0] = -E(1)*induced_dipole(2) and PolMAT[1,1] = -E(1)*induced_dipole(1) dipolesA : numpy array of float (dimension 3) Total induced dipole moment in the environment by the first defect. dipolesB : numpy array of float (dimension 3) Total induced dipole moment in the environment by the second defect. dipoles_polA : numpy array of float (dimension Natoms x 3) Induced atomic dipole moments for all atoms in the environment by the first defect """ if typ=='BetaEE' and order>1: raise IOError('For calculation with beta polarization maximal order is 1') elif typ=='BetaEE' and order<1: return np.zeros((2,2),dtype='f8') defA_charge=self.charge[index1] defB_charge=self.charge[index2] defA_indx=deepcopy(index1) defB_indx=deepcopy(index2) PolMAT=np.zeros((2,2),dtype='f8') E_TrEsp=self.get_TrEsp_Eng(index1, index2) if debug: print(typ,order) # Polarization by molecule B self.charge[defA_indx]=0.0 self._calc_dipoles_All(typ,NN=order,eps=1,debug=False) dipolesB=np.sum(self.dipole,axis=0) # induced dipoles by second defect (defect B) self.charge[defA_indx]=defA_charge PolMAT[1,1] = self._get_interaction_energy(defB_indx,charge=defB_charge,debug=False) - E_TrEsp PolMAT[0,1] = self._get_interaction_energy(defA_indx,charge=defA_charge,debug=False) - E_TrEsp PolMAT[1,0] = PolMAT[0,1] self.dipole=np.zeros((self.Nat,3),dtype='f8') # Polarization by molecule A self.charge[defB_indx]=0.0 self._calc_dipoles_All(typ,NN=order,eps=1,debug=False) dipolesA=np.sum(self.dipole,axis=0) self.charge[defB_indx]=defB_charge PolMAT[0,0] = self._get_interaction_energy(defA_indx,charge=defA_charge,debug=False) - E_TrEsp if debug: print(PolMAT*conversion_facs_energy["1/cm"]) if np.isclose(self._get_interaction_energy(defB_indx,charge=defB_charge,debug=False)-E_TrEsp,PolMAT[1,0]): print('ApB = BpA') else: raise Warning('ApB != BpA') dipoles_polA = self.dipole.copy() self.dipole=np.zeros((self.Nat,3),dtype='f8') if typ=='AlphaE' or typ=='BetaEE' or typ=='Alpha_st': return PolMAT,dipolesA,dipolesB,dipoles_polA elif typ=='Alpha_E': PolMAT[[0,1],[0,1]] = PolMAT[[1,0],[1,0]] # Swap AlphaMAT[0,0] with AlphaMAT[1,1] return PolMAT,dipolesA,dipolesB,dipoles_polA def get_TrEsp_Eng(self, index1, index2): """ Calculate TrEsp interaction energy for defects (defect-like molecules) in vacuum. Parameters -------- index1 : list of integer (dimension Natoms_defect1) Indexes of all atoms from the first defect (starting from 0) index2 : list of integer (dimension Natoms_defect2) Indexes of all atoms from the second defect (starting from 0) Returns -------- E_TrEsp : float TrEsp interaction energy in ATOMIC UNITS (Hartree) between defect in vacuum. """ defA_coor = self.coor[index1] defB_coor = self.coor[index2] defA_charge = self.charge[index1] defB_charge = self.charge[index2] E_TrEsp = charge_charge(defA_coor,defA_charge,defB_coor,defB_charge)[0] return E_TrEsp # in hartree def get_TrEsp_Dipole(self, index): """ Calculate vacuum transition dipole moment for single defect (from TrEsp charges). Parameters ---------- index : list of integer (dimension Natoms_defect) Indexes of all atoms from the defect (starting from 0) of which transition dipole is calculated Returns -------- Dip_TrEsp : numpy array of float (dimension 3) Transition dipole in ATOMIC UNITS for specified defect (by index) calculated from TrEsp charges """ def_coor = self.coor[index] def_charge = self.charge[index] Dip_TrEsp = np.dot(def_charge,def_coor) return Dip_TrEsp # in AU def _TEST_Compare_SingleDefectProperties(self, tr_charge, gr_charge, ex_charge, struc, index, dAVA=0.0, order=80, approx=1.1): ''' Calculate effects of environment such as transition energy shift and transition dipole change for single defect. Parameters ---------- index : list of integer (dimension Natoms_defect) Indexes of all atoms from the defect (starting from 0) for which transition energy and transition dipole is calculated dAVA : float **dAVA = <A|V|A> - <G|V|G>** Difference in electrostatic interaction energy between defect and environment for defect in excited state <A|V|A> and in ground state <G|V|G>. order : integer (optional - init = 80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 approx : real (optional - init=1.1) Specifies which approximation should be used. * **Approximation 1.1**: Neglect of `Beta(-E,-E)` and `Beta(-E,E)` and `Alpha(-E)`. * **Approximation 1.2**: Neglect of `Beta(-E,-E)` and `tilde{Beta(E)}`. * **Approximation 1.3**: `Beta(E,E)=Beta(-E,E)=Beta(-E,-E)` and also `Alpha(E)=Alpha(-E)`, however the second one is not condition Returns ------- Eshift : Energy class Transition energy shift for the defect due to the fluorographene environment calculated from structure with single defect. Units are energy managed TrDip : numpy array of real (dimension 3) Total transition dipole for the defect with environment effects included calculated from structure with single defect (in ATOMIC UNITS) **Neglecting `tilde{Beta(E)}` is not valid approximation. It shoudl be better to neglect Beta(E,-E) to be consistent with approximation for interaction energy** Notes ---------- dip = Alpha(E)*El_field_TrCharge + Alpha(-E)*El_field_TrCharge Then final transition dipole of molecule with environment is calculated according to the approximation: **Approximation 1.1:** dip_fin = dip - (Vinter-DE)*Beta(E,E)*El_field_TrCharge + dip_init(1-1/4*Ind_dip_Beta(E,E)*El_field_TrCharge) **Approximation 1.2:** dip_fin = dip - (Vinter-DE)*Beta(E,E)*El_field_TrCharge + dip_init **Approximation 1.3:** dip_fin = dip - 2*Vinter*Beta(E,E)*El_field_TrCharge + dip_init ''' # Get TrEsp Transition dipole TrDip_TrEsp = np.dot(self.charge[index],self.coor[index,:]) # vacuum transition dipole for single defect # Get energy contribution from polarization by transition density self.charge[index] = tr_charge charge = self.charge[index] # Set distance matrix R_elst = np.tile(struc.coor._value,(self.Nat,1,1)) R_pol = np.tile(self.coor,(struc.nat,1,1)) R = (R_elst - np.swapaxes(R_pol,0,1)) # R[ii,jj,:]=self.coor[jj]-self.coor[ii] # Calculate polarization matrixes # TODO: Shift this block to separate function self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('AlphaE',NN=1,eps=1,debug=False) Polar1_AlphaE = self._get_interaction_energy(index,charge=charge,debug=False) pot1_dipole_AlphaE_tr = potential_dipole(self.dipole,R) self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('AlphaE',NN=2,eps=1,debug=False) Polar2_AlphaE = self._get_interaction_energy(index,charge=charge,debug=False) Polar2_AlphaE = Polar2_AlphaE - Polar1_AlphaE dip_AlphaE = np.sum(self.dipole,axis=0) self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_E',NN=1,eps=1,debug=False) Polar1_Alpha_E = self._get_interaction_energy(index,charge=charge,debug=False) pot1_dipole_Alpha_E_tr = potential_dipole(self.dipole,R) self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_E',NN=2,eps=1,debug=False) dip_Alpha_E = np.sum(self.dipole,axis=0) dip_Alpha_E = np.sum(self.dipole,axis=0) Polar2_Alpha_E = self._get_interaction_energy(index,charge=charge,debug=False) Polar2_Alpha_E = Polar2_Alpha_E - Polar1_Alpha_E self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('BetaEE',NN=1,eps=1,debug=False) dip_Beta = np.sum(self.dipole,axis=0) Polar1_Beta_EE = self._get_interaction_energy(index,charge=charge,debug=False) pot1_dipole_betaEE_tr = potential_dipole(self.dipole,R) self.charge[index] = ex_charge charge = self.charge[index] Polar1_Beta_EE_tr_ex = self._get_interaction_energy(index,charge=charge,debug=False) self.charge[index] = gr_charge charge = self.charge[index] Polar1_Beta_EE_tr_gr = self._get_interaction_energy(index,charge=charge,debug=False) self.dipole = np.zeros((self.Nat,3),dtype='f8') # Calculate polarization by ground state charge distribution self.charge[index] = gr_charge charge = self.charge[index] self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_st',NN=1,eps=1,debug=False) Polar1_static_gr = self._get_interaction_energy(index,charge=charge,debug=False) self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_st',NN=2,eps=1,debug=False) Polar2_static_gr = self._get_interaction_energy(index,charge=charge,debug=False) Polar2_static_gr = Polar2_static_gr - Polar1_static_gr self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('BetaEE',NN=1,eps=1,debug=False) Polar1_Beta_EE_gr = self._get_interaction_energy(index,charge=charge,debug=False) self.dipole = np.zeros((self.Nat,3),dtype='f8') # Calculate polarization by excited state charge distribution self.charge[index] = ex_charge charge = self.charge[index] self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_st',NN=1,eps=1,debug=False) Polar1_static_ex = self._get_interaction_energy(index,charge=charge,debug=False) self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_st',NN=2,eps=1,debug=False) Polar2_static_ex = self._get_interaction_energy(index,charge=charge,debug=False) Polar2_static_ex = Polar2_static_ex - Polar1_static_ex self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('BetaEE',NN=1,eps=1,debug=False) Polar1_Beta_EE_ex = self._get_interaction_energy(index,charge=charge,debug=False) self.dipole = np.zeros((self.Nat,3),dtype='f8') # Calculate indiced dipole by charge difference between ground and excited state self.charge[index] = ex_charge - gr_charge charge = self.charge[index] self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_st',NN=1,eps=1,debug=False) pot1_dipole_ex_gr = potential_dipole(self.dipole,R) self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_st',NN=2,eps=1,debug=False) pot2_dipole_ex_gr = potential_dipole(self.dipole,R) pot2_dipole_ex_gr = pot2_dipole_ex_gr - pot1_dipole_ex_gr self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('BetaEE',NN=1,eps=1,debug=False) pot1_dipole_betaEE_ex_gr = potential_dipole(self.dipole,R) self.dipole = np.zeros((self.Nat,3),dtype='f8') # calculate interaction between induced dipoles by transition density with ground and excited charges of the chromophore self.charge[index] = tr_charge self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('Alpha_st',NN=1,eps=1,debug=False) pot1_dipole_static_tr = potential_dipole(self.dipole,R) self.charge[index] = ex_charge charge = self.charge[index] Polar1_static_tr_ex = self._get_interaction_energy(index,charge=charge,debug=False) self.charge[index] = gr_charge charge = self.charge[index] Polar1_static_tr_gr = self._get_interaction_energy(index,charge=charge,debug=False) self.charge[index] = tr_charge self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('AlphaE',NN=1,eps=1,debug=False) self.charge[index] = gr_charge charge = self.charge[index] Polar1_AlphaE_tr_gr = self._get_interaction_energy(index,charge=charge,debug=False) self.dipole = np.zeros((self.Nat,3),dtype='f8') self.charge[index] = tr_charge self._calc_dipoles_All('Alpha_E',NN=1,eps=1,debug=False) self.charge[index] = ex_charge charge = self.charge[index] Polar1_Alpha_E_tr_ex = self._get_interaction_energy(index,charge=charge,debug=False) # Set the variables to initial state self.dipole = np.zeros((self.Nat,3),dtype='f8') self.charge[index] = tr_charge if approx==1.1: # Calculate transition energy shift Eshift = dAVA + Polar1_AlphaE + Polar2_AlphaE - Polar1_Alpha_E - Polar2_Alpha_E Eshift -= (self.VinterFG - dAVA)*Polar1_Beta_EE # Calculate transition dipoles for every defect TrDip = TrDip_TrEsp*(1 + Polar1_Beta_EE/4) + dip_AlphaE + dip_Alpha_E TrDip -= (self.VinterFG - dAVA)*dip_Beta # Change to energy class with energy_units('AU'): Eshift = EnergyClass(Eshift) dAVA = EnergyClass(dAVA) Polar1_AlphaE = EnergyClass(Polar1_AlphaE) Polar2_AlphaE = EnergyClass(Polar2_AlphaE) Polar1_Alpha_E = EnergyClass(Polar1_Alpha_E) Polar2_Alpha_E = EnergyClass(Polar2_Alpha_E) Polar1_Beta_EE = EnergyClass(Polar1_Beta_EE) Polar1_static_ex_gr = EnergyClass(Polar1_static_ex - Polar1_static_gr) Polar2_static_ex_gr = EnergyClass(Polar2_static_ex - Polar2_static_gr) Polar1_Beta_EE_ex_gr = EnergyClass(Polar1_Beta_EE_ex - Polar1_Beta_EE_gr) Polar1_static_tr_ex = EnergyClass(Polar1_static_tr_ex) Polar1_static_tr_gr = EnergyClass(Polar1_static_tr_gr) Polar1_AlphaE_tr_gr = EnergyClass(Polar1_AlphaE_tr_gr) Polar1_Alpha_E_tr_ex = EnergyClass(Polar1_Alpha_E_tr_ex) Polar1_Beta_EE_tr_ex = EnergyClass(Polar1_Beta_EE_tr_ex) Polar1_Beta_EE_tr_gr = EnergyClass(Polar1_Beta_EE_tr_gr) res_Energy = {'dE_0-1': Eshift, 'dE_elstat(exct-grnd)': dAVA} res_Energy['E_pol1_Alpha(E)'] = Polar1_AlphaE res_Energy['E_pol2_Alpha(E)'] = Polar2_AlphaE res_Energy['E_pol1_Alpha(-E)'] = Polar1_Alpha_E res_Energy['E_pol2_Alpha(-E)'] = Polar2_Alpha_E res_Energy['E_pol1_Beta(E,E)'] = Polar1_Beta_EE res_Energy['E_pol1_static_(exct-grnd)'] = Polar1_static_ex_gr res_Energy['E_pol2_static_(exct-grnd)'] = Polar2_static_ex_gr res_Energy['E_pol1_Beta(E,E)_(exct-grnd)'] = Polar1_Beta_EE_ex_gr res_Energy['E_pol1_static_(trans)_(exct)'] = Polar1_static_tr_ex res_Energy['E_pol1_static_(trans)_(grnd)'] = Polar1_static_tr_gr res_Energy['E_pol1_Alpha(E)_(trans)_(grnd)'] = Polar1_AlphaE_tr_gr res_Energy['E_pol1_Alpha(-E)_(trans)_(exct)'] = Polar1_Alpha_E_tr_ex res_Energy['E_pol1_Beta(E,E)_(trans)_(exct)'] = Polar1_Beta_EE_tr_ex res_Energy['E_pol1_Beta(E,E)_(trans)_(grnd)'] = Polar1_Beta_EE_tr_gr res_Pot = {'Pol2-env_static_(exct-grnd)': pot2_dipole_ex_gr} res_Pot['Pol1-env_static_(exct-grnd)'] = pot1_dipole_ex_gr res_Pot['Pol1-env_Beta(E,E)_(exct-grnd)'] = pot1_dipole_betaEE_ex_gr res_Pot['Pol1-env_Beta(E,E)_(trans)'] = pot1_dipole_betaEE_tr res_Pot['Pol1-env_Alpha(E)_(trans)'] = pot1_dipole_AlphaE_tr res_Pot['Pol1-env_Alpha(-E)_(trans)'] = pot1_dipole_Alpha_E_tr res_Pot['Pol1-env_static_(trans)'] = pot1_dipole_static_tr # with energy_units('1/cm'): # print(Eshift.value,dAVA.value,Polar1_AlphaE.value,Polar2_AlphaE.value,Polar1_AlphaE.value+Polar2_AlphaE.value,Polar1_Alpha_E.value,Polar2_Alpha_E.value,Polar1_Alpha_E.value+Polar2_Alpha_E.value) # return res_Energy, res_Pot, TrDip else: raise IOError('Unsupported approximation') def _TEST_HeterodimerProperties(self, gr_charge1, ex_charge1, gr_charge2, ex_charge2, FG_charge, struc, index1, index2, Eng1, Eng2, dAVA=0.0, dBVB=0.0, order=80, approx=1.1): ''' Calculate effects of the environment for structure with two different defects such as interaction energy, site transition energy shifts and changes in transition dipoles Parameters ---------- index1 : list of integer (dimension Natoms_defect1) Indexes of all atoms from the first defect (starting from 0) index2 : list of integer (dimension Natoms_defect2) Indexes of all atoms from the second defect (starting from 0) Eng1 : float Vacuum transition energy of the first defect in ATOMIC UNITS (Hartree) Eng2 : float Vacuum transition energy of the second defect in ATOMIC UNITS (Hartree) dAVA : float **dAVA = <A|V|A> - <G|V|G>** Difference in electrostatic interaction energy between first defect the and environment for the defect in excited state <A|V|A> and in ground state <G|V|G>. dBVB : float **dBVB = <B|V|B> - <G|V|G>** Difference in electrostatic interaction energy between second defect and the environment for the defect in excited state <B|V|B> and in ground state <G|V|G>. order : integer (optional - init = 80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 approx : real (optional - init=1.1) Specifies which approximation should be used. * **Approximation 1.1**: Neglect of `Beta(-E,-E)` and `Beta(-E,E)` and `Alpha(-E)`. * **Approximation 1.2**: Neglect of `Beta(-E,-E)` and `tilde{Beta(E)}`. * **Approximation 1.3**: `Beta(E,E)=Beta(-E,E)=Beta(-E,-E)` and also `Alpha(E)=Alpha(-E)`, however the second one is not condition Returns ------- J_inter : Energy class Interaction energy with effects of environment included. Units are energy managed Eshift1 : Energy class Transition energy shift for the first defect due to fluorographene environment calculated from heterodymer structure. Units are energy managed Eshift2 : Energy class Transition energy shift for the second defect due to fluorographene environment calculated from heterodymer structure. Units are energy managed TrDip1 : numpy array of real (dimension 3) Total transition dipole for the first defect with environment effects included calculated from heterodimer structure (in ATOMIC UNITS) TrDip2 : numpy array of real (dimension 3) Total transition dipole for the first defect with environment effects included calculated from heterodimer structure (in ATOMIC UNITS) AllDipAE : numpy array of float (dimension Natoms x 3) Induced atomic dipole moments for all atoms in the environment by the first defect with Alpha(E) atomic polarizability AllDipA_E : numpy array of float (dimension Natoms x 3) Induced atomic dipole moments for all atoms in the environment by the first defect with Alpha(-E) atomic polarizability AllDipBE : numpy array of float (dimension Natoms x 3) Induced atomic dipole moments for all atoms in the environment by the first defect with Beta(E,E) atomic polarizability ''' res = {} # Get TrEsp interaction energy E_TrEsp = self.get_TrEsp_Eng(index1, index2) # Calculate polarization matrixes (1-2) PolarMat1_AlphaE, dip_AlphaE1, dip_AlphaE2, AllDipAE1, AllDipAE2 = self._fill_Polar_matrix(index1,index2,typ='AlphaE',order=1) PolarMat1_Alpha_E, dip_Alpha_E1, dip_Alpha_E2, AllDipA_E1, AllDipA_E2 = self._fill_Polar_matrix(index1,index2,typ='Alpha_E',order=1) PolarMat_AlphaE, dip_AlphaE1, dip_AlphaE2, AllDipAE1, AllDipAE2 = self._fill_Polar_matrix(index1,index2,typ='AlphaE',order=2) PolarMat_Alpha_E, dip_Alpha_E1, dip_Alpha_E2, AllDipA_E1, AllDipA_E2 = self._fill_Polar_matrix(index1,index2,typ='Alpha_E',order=2) PolarMat_Beta, dip_Beta1, dip_Beta2, AllDipBE1, AllDipBE2 = self._fill_Polar_matrix(index1,index2,typ='BetaEE',order=order//2) res["E_pol2_A(E)"] = (PolarMat_AlphaE - PolarMat1_AlphaE) * conversion_facs_energy["1/cm"] res["E_pol2_A(-E)"] = (PolarMat_Alpha_E - PolarMat1_Alpha_E) * conversion_facs_energy["1/cm"] res["E_pol2_B(E,E)"] = PolarMat_Beta """ Aditional first order contribution """ # gr_charge1, ex_charge1, gr_charge2, ex_charge2 tr_charge1 = self.charge[index1] tr_charge2 = self.charge[index2] self.charge[index1] = gr_charge1 self.charge[index2] = ex_charge2 PolarMat_Alpha_st_gr_ex, dip_Alpha_st1_gr, dip_Alpha_st2_ex, AllDipA_st1_gr, AllDipA_st2_ex = self._fill_Polar_matrix(index1,index2,typ='Alpha_st',order=1) self.charge[index1] = ex_charge1 self.charge[index2] = gr_charge2 PolarMat_Alpha_st_ex_gr, dip_Alpha_st1_ex, dip_Alpha_st2_gr, AllDipA_st1_ex, AllDipA_st2_gr = self._fill_Polar_matrix(index1,index2,typ='Alpha_st',order=1) # charges for the ground state and excited state are the same => correct # difference between first and second defect is in non symetrical charges - repeat the fit with symmetry constrains PolarMat_Alpha_st = np.zeros((2,2),dtype='f8') PolarMat_Alpha_st[0,0] = np.sum(PolarMat_Alpha_st_ex_gr) # PolarMat_Alpha_st_ex_gr[0,0] + PolarMat_Alpha_st_ex_gr[1,1] + 2*PolarMat_Alpha_st_ex_gr[0,1] PolarMat_Alpha_st[1,1] = np.sum(PolarMat_Alpha_st_gr_ex) # PolarMat_Alpha_st_gr_ex[0,0] + PolarMat_Alpha_st_gr_ex[1,1] + 2*PolarMat_Alpha_st_gr_ex[0,1] # pol1-env #----------------------------------- # Set distance matrix R_elst = np.tile(struc.coor._value,(self.Nat,1,1)) R_pol = np.tile(self.coor,(struc.nat,1,1)) R = (R_elst - np.swapaxes(R_pol,0,1)) # R[ii,jj,:]=self.coor[jj]-self.coor[ii] # if normaly ordered first are carbon atoms and then are fluorine atoms - for carbon atoms same indexes in pol_mol as in struc for ii in range(self.Nat): R[ii,ii,:] = 0.0 # self interaction is not permited in potential calculation # TODO: Maybe also exclude connected fluorinesto atoms ii # Calculate potential of induced dipoles pot1_dipole_Alpha_st1_gr = potential_dipole(AllDipA_st1_gr,R) pot1_dipole_Alpha_st1_ex = potential_dipole(AllDipA_st1_ex,R) pot1_dipole_Alpha_st2_gr = potential_dipole(AllDipA_st2_gr,R) pot1_dipole_Alpha_st2_ex = potential_dipole(AllDipA_st2_ex,R) # calculate interaction energies with environment FG_charge_tmp = FG_charge.charge.copy() FG_charge_tmp[index1] = 0.0 FG_charge_tmp[index2] = 0.0 E_Pol1_env_static_gr1_FG = np.dot(FG_charge_tmp,pot1_dipole_Alpha_st1_gr) E_Pol1_env_static_ex1_FG = np.dot(FG_charge_tmp,pot1_dipole_Alpha_st1_ex) E_Pol1_env_static_gr2_FG = np.dot(FG_charge_tmp,pot1_dipole_Alpha_st2_gr) E_Pol1_env_static_ex2_FG = np.dot(FG_charge_tmp,pot1_dipole_Alpha_st2_ex) PolarMat_Alpha_st[0,0] = 2*( E_Pol1_env_static_ex1_FG + E_Pol1_env_static_gr2_FG ) PolarMat_Alpha_st[1,1] = 2*( E_Pol1_env_static_gr1_FG + E_Pol1_env_static_ex2_FG ) # return transition charges back self.charge[index1] = tr_charge1 self.charge[index2] = tr_charge2 """ Aditional second order contribution - Comparison of magnitudes """ # Calculate polarization matrix A_grnd B_exct self.charge[index1] = gr_charge1 self.charge[index2] = ex_charge2 PolarMat_Beta_gr_ex, dip_Beta1_gr, dip_Beta2_ex, AllDipBE1_gr, AllDipBE2_ex = self._fill_Polar_matrix(index1,index2,typ='BetaEE',order=1) # Calculate polarization matrix A_exct B_grnd self.charge[index1] = ex_charge1 self.charge[index2] = gr_charge2 PolarMat_Beta_ex_gr, dip_Beta1_ex, dip_Beta2_gr, AllDipBE1_ex, AllDipBE2_gr = self._fill_Polar_matrix(index1,index2,typ='BetaEE',order=1) res["E_pol1_B(E,E)_(A_exct,B_grnd)"] = PolarMat_Beta_ex_gr res["E_pol1_B(E,E)_(A_grnd,B_exct)"] = PolarMat_Beta_gr_ex # calculate pol-env for previous: pot1A_dipole_BEE_gr = potential_dipole(AllDipBE1_gr,R) pot1A_dipole_BEE_ex = potential_dipole(AllDipBE1_ex,R) pot1B_dipole_BEE_gr = potential_dipole(AllDipBE2_gr,R) pot1B_dipole_BEE_ex = potential_dipole(AllDipBE2_ex,R) PolarMat_env_Beta_ex = np.zeros((2,2),dtype="f8") PolarMat_env_Beta_gr = np.zeros((2,2),dtype="f8") PolarMat_env_Beta_ex[0,0] = np.dot(FG_charge_tmp,pot1A_dipole_BEE_ex) PolarMat_env_Beta_ex[1,1] = np.dot(FG_charge_tmp,pot1B_dipole_BEE_ex) PolarMat_env_Beta_gr[0,0] = np.dot(FG_charge_tmp,pot1B_dipole_BEE_gr) PolarMat_env_Beta_gr[1,1] = np.dot(FG_charge_tmp,pot1A_dipole_BEE_gr) res["E_pol1-env_B(E,E)_grnd"] = PolarMat_env_Beta_gr res["E_pol1-env_B(E,E)_exct"] = PolarMat_env_Beta_ex # Calculate secon order contribution to the first order quantities self.charge[index1] = gr_charge1 self.charge[index2] = ex_charge2 PolarMat2_Alpha_st_gr_ex, dumm, dumm, AllDipA2_st1_gr, AllDipA2_st2_ex = self._fill_Polar_matrix(index1,index2,typ='Alpha_st',order=2) PolarMat2_Alpha_st_gr_ex = PolarMat2_Alpha_st_gr_ex - PolarMat_Alpha_st_gr_ex self.charge[index1] = ex_charge1 self.charge[index2] = gr_charge2 PolarMat2_Alpha_st_ex_gr, dumm, dumm, AllDipA2_st1_ex, AllDipA2_st2_gr = self._fill_Polar_matrix(index1,index2,typ='Alpha_st',order=2) PolarMat2_Alpha_st_ex_gr = PolarMat2_Alpha_st_ex_gr - PolarMat_Alpha_st_ex_gr res["E_pol2_st_(A_exct,B_grnd)"] = PolarMat2_Alpha_st_ex_gr * conversion_facs_energy["1/cm"] res["E_pol2_st_(A_grnd,B_exct)"] = PolarMat2_Alpha_st_gr_ex * conversion_facs_energy["1/cm"] pot2A_dipole_st_gr = potential_dipole(AllDipA2_st1_gr - AllDipA_st1_gr,R) pot2A_dipole_st_ex = potential_dipole(AllDipA2_st1_ex - AllDipA_st1_ex,R) pot2B_dipole_st_gr = potential_dipole(AllDipA2_st2_gr - AllDipA_st2_gr,R) pot2B_dipole_st_ex = potential_dipole(AllDipA2_st2_ex - AllDipA_st2_ex,R) PolarMat2_env_st_ex = np.zeros((2,2),dtype="f8") PolarMat2_env_st_gr = np.zeros((2,2),dtype="f8") PolarMat2_env_st_ex[0,0] = np.dot(FG_charge_tmp,pot2A_dipole_st_ex) PolarMat2_env_st_ex[1,1] = np.dot(FG_charge_tmp,pot2B_dipole_st_ex) PolarMat2_env_st_gr[0,0] = np.dot(FG_charge_tmp,pot2B_dipole_st_gr) PolarMat2_env_st_gr[1,1] = np.dot(FG_charge_tmp,pot2A_dipole_st_gr) res["E_pol2-env_st_grnd"] = PolarMat2_env_st_gr * conversion_facs_energy["1/cm"] res["E_pol2-env_st_exct"] = PolarMat2_env_st_ex * conversion_facs_energy["1/cm"] # Calculate polarization matrixes A_grnd B_0->1 self.charge[index1] = tr_charge1 self.charge[index2] = np.zeros(len(index2),dtype='f8') self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('AlphaE',NN=1,eps=1,debug=False) self.charge[index1] = np.zeros(len(index1),dtype='f8') E_AB_pol1_tr_gr_1 = self._get_interaction_energy(index2,charge=gr_charge2,debug=False) E_A_pol1_tr_gr = self._get_interaction_energy(index1,charge=gr_charge1,debug=False) self.dipole = np.zeros((self.Nat,3),dtype='f8') self.charge[index1] = np.zeros(len(index1),dtype='f8') self.charge[index2] = tr_charge2 self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('AlphaE',NN=1,eps=1,debug=False) self.charge[index2] = np.zeros(len(index2),dtype='f8') E_AB_pol1_gr_tr_1 = self._get_interaction_energy(index1,charge=gr_charge1,debug=False) E_B_pol1_tr_gr = self._get_interaction_energy(index2,charge=gr_charge2,debug=False) self.charge[index1] = gr_charge1 self.charge[index2] = np.zeros(len(index2),dtype='f8') self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('AlphaE',NN=1,eps=1,debug=False) self.charge[index1] = np.zeros(len(index1),dtype='f8') E_AB_pol1_gr_tr_2 = self._get_interaction_energy(index2,charge=tr_charge2,debug=False) self.charge[index1] = np.zeros(len(index1),dtype='f8') self.charge[index2] = gr_charge2 self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('AlphaE',NN=1,eps=1,debug=False) self.charge[index2] = np.zeros(len(index2),dtype='f8') E_AB_pol1_tr_gr_2 = self._get_interaction_energy(index1,charge=tr_charge1,debug=False) self.dipole = np.zeros((self.Nat,3),dtype='f8') # return transition charges back if (gr_charge1!=gr_charge2).any() : raise IOError("Heterodimer should have the same ground state charges") # return transition charges back if (tr_charge1!=tr_charge2).any() : raise IOError("Heterodimer should have the same transition charges") self.charge[index1] = gr_charge1 self.charge[index2] = tr_charge2 PolarMat_AlphaE_gr_tr, dip_AlphaE1_gr, dip_AlphaE2_tr, AllDipAE1_gr, AllDipAE2_tr = self._fill_Polar_matrix(index1,index2,typ='AlphaE',order=1) E_AB_pol1_gr_tr = PolarMat_AlphaE_gr_tr[0,1] self.charge[index1] = tr_charge1 self.charge[index2] = gr_charge2 PolarMat_AlphaE_gr_tr, dip_AlphaE1_gr, dip_AlphaE2_tr, AllDipAE1_gr, AllDipAE2_tr = self._fill_Polar_matrix(index1,index2,typ='AlphaE',order=1) E_AB_pol1_tr_gr = PolarMat_AlphaE_gr_tr[0,1] res["E_pol1_B(E,E)_(tr_gr,ex)"] = np.zeros((2,2),dtype="f8") self.charge[index1] = tr_charge1 self.charge[index2] = np.zeros(len(index2),dtype='f8') self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('BetaEE',NN=1,eps=1,debug=False) self.charge[index1] = np.zeros(len(index1),dtype='f8') res["E_pol1_B(E,E)_(tr_gr,ex)"][0,0] = self._get_interaction_energy(index1,charge=gr_charge1,debug=False) res["E_pol1_B(E,E)_(tr_gr,ex)"][0,1] = self._get_interaction_energy(index1,charge=ex_charge1,debug=False) self.charge[index1] = np.zeros(len(index2),dtype='f8') self.charge[index2] = tr_charge2 self.dipole = np.zeros((self.Nat,3),dtype='f8') self._calc_dipoles_All('BetaEE',NN=1,eps=1,debug=False) self.charge[index2] = np.zeros(len(index2),dtype='f8') res["E_pol1_B(E,E)_(tr_gr,ex)"][1,0] = self._get_interaction_energy(index2,charge=gr_charge2,debug=False) res["E_pol1_B(E,E)_(tr_gr,ex)"][1,1] = self._get_interaction_energy(index2,charge=ex_charge2,debug=False) # return transition charges back self.charge[index1] = tr_charge1 self.charge[index2] = tr_charge2 # compare electrostatic energies - TEST VAB_0101 = self.get_TrEsp_Eng(index1, index2) self.charge[index1] = ex_charge1 VAB_1101 = self.get_TrEsp_Eng(index1, index2) self.charge[index1] = gr_charge1 VAB_0001 = self.get_TrEsp_Eng(index1, index2) self.charge[index2] = gr_charge2 VAB_0000 = self.get_TrEsp_Eng(index1, index2) self.charge[index1] = ex_charge1 self.charge[index2] = ex_charge2 VAB_1111 = self.get_TrEsp_Eng(index1, index2) self.charge[index2] = gr_charge2 VAB_1100 = self.get_TrEsp_Eng(index1, index2) charge_orig1 = FG_charge.charge[index1] charge_orig2 = FG_charge.charge[index2] FG_charge.charge[index1] = gr_charge1 FG_charge.charge[index2] = 0.0 E_grnd=FG_charge.get_EnergyShift() FG_charge.charge[index1] = ex_charge1 FG_charge.charge[index2] = 0.0 E_exct=FG_charge.get_EnergyShift() FG_charge.charge[index1] = tr_charge1 FG_charge.charge[index2] = 0.0 E_trans=FG_charge.get_EnergyShift() FG_charge.charge[index1] = charge_orig1 FG_charge.charge[index2] = charge_orig2 self.charge[index1] = tr_charge1 self.charge[index2] = tr_charge2 # calculate new eigenstates and energies HH=np.zeros((2,2),dtype='f8') if Eng1<Eng2: HH[0,0] = Eng1+dAVA HH[1,1] = Eng2+dBVB else: HH[1,1] = Eng1+dAVA HH[0,0] = Eng2+dBVB HH[0,1] = E_TrEsp HH[1,0] = HH[0,1] Energy,Coeff=np.linalg.eigh(HH) d_esp=np.sqrt( E_TrEsp**2 + ((Eng2-Eng1+dBVB-dAVA)/2)**2 ) # sqrt( (<A|V|B>)**2 + ((Eng2-Eng1+dBVB-dAVA)/2)**2 ) # Calculate interaction energies if approx==1.1: # Calculate Total polarizability matrix PolarMat = PolarMat_AlphaE + PolarMat_Alpha_E + PolarMat_Alpha_st + PolarMat_Beta*(dAVA/2 + dBVB/2 - self.VinterFG) # Calculate interaction energies C1 = Coeff.T[0] E1 = Energy[0] + np.dot(C1, np.dot(PolarMat - d_esp*PolarMat_Beta, C1.T)) C2 = Coeff.T[1] E2 = Energy[1] + np.dot(C2, np.dot(PolarMat + d_esp*PolarMat_Beta, C2.T)) J_inter = np.sqrt( (E2 - E1)**2 - (Eng2 - Eng1)**2 )/2*np.sign(E_TrEsp) # Calculate energy shifts for every defect Eshift1 = dAVA + PolarMat_AlphaE[0,0] - PolarMat_Alpha_E[1,1] Eshift1 -= (self.VinterFG - dAVA)*PolarMat_Beta[0,0] Eshift2 = dBVB + PolarMat_AlphaE[1,1] - PolarMat_Alpha_E[0,0] Eshift2 -= (self.VinterFG - dBVB)*PolarMat_Beta[1,1] # Calculate transition dipoles for every defect TrDip1 = np.dot(self.charge[index1],self.coor[index1,:]) # vacuum transition dipole for single defect TrDip1 = TrDip1*(1 + PolarMat_Beta[0,0]/4) + dip_AlphaE1 + dip_Alpha_E1 TrDip1 -= (self.VinterFG - dAVA)*dip_Beta1 TrDip2 = np.dot(self.charge[index2],self.coor[index2,:]) # vacuum transition dipole for single defect TrDip2 = TrDip2*(1 + PolarMat_Beta[1,1]/4) + dip_AlphaE2 + dip_Alpha_E2 TrDip2 -= (self.VinterFG - dBVB)*dip_Beta2 # Change to energy class with energy_units('AU'): J_inter = EnergyClass(J_inter) Eshift1 = EnergyClass(Eshift1) Eshift2 = EnergyClass(Eshift2) E_pol_static1_ex_gr = EnergyClass(PolarMat_Alpha_st_ex_gr[0,0]-PolarMat_Alpha_st_gr_ex[0,0]) E_pol_static2_ex_gr = EnergyClass(PolarMat_Alpha_st_gr_ex[1,1]-PolarMat_Alpha_st_ex_gr[1,1]) E_pol_env_static1_ex_gr = EnergyClass(E_Pol1_env_static_ex1_FG - E_Pol1_env_static_gr1_FG) E_pol_env_static2_ex_gr = EnergyClass(E_Pol1_env_static_ex2_FG - E_Pol1_env_static_gr2_FG) VAB_0101 = EnergyClass(VAB_0101) VAB_1101 = EnergyClass(VAB_1101) VAB_0001 = EnergyClass(VAB_0001) VAB_0000 = EnergyClass(VAB_0000) VAB_1111 = EnergyClass(VAB_1111) VAB_1100 = EnergyClass(VAB_1100) E_grnd = EnergyClass(E_grnd) E_exct = EnergyClass(E_exct) E_trans = EnergyClass(E_trans) E_AB_pol1_gr_tr = EnergyClass(E_AB_pol1_gr_tr) E_AB_pol1_tr_gr = EnergyClass(E_AB_pol1_tr_gr) E_AB_pol1_gr_tr_1 = EnergyClass(E_AB_pol1_gr_tr_1) E_AB_pol1_tr_gr_1 = EnergyClass(E_AB_pol1_tr_gr_1) E_AB_pol1_gr_tr_2 = EnergyClass(E_AB_pol1_gr_tr_2) E_AB_pol1_tr_gr_2 = EnergyClass(E_AB_pol1_tr_gr_2) E_A_pol1_tr_gr = EnergyClass(E_A_pol1_tr_gr) E_B_pol1_tr_gr = EnergyClass(E_B_pol1_tr_gr) with energy_units("1/cm"): print("EA_pol1_s_ex_gr EA_pol1_env_s_ex_gr EAB_pol1_tr_gr EA_pol1_tr_gr") print(" {:9.4f} {:9.4f} {:9.4f} {:9.4f}".format( E_pol_static1_ex_gr.value, E_pol_env_static1_ex_gr.value, E_AB_pol1_tr_gr.value, E_A_pol1_tr_gr.value)) print(" VAB_0101 VAB_1101 VAB_0001 VAB_0000 VAB_1111 VAB_1100 E_grnd E_exct E_trans") print(VAB_0101.value, VAB_1101.value, VAB_0001.value, VAB_0000.value, VAB_1111.value, VAB_1100.value, E_grnd.value, E_exct.value, E_trans.value) # res["E_pol2_A(E)"] # res["E_pol2_A(-E)"] # res["E_pol2_B(E,E)"] # res["E_pol1_B(E,E)_(A_exct,B_grnd)"] # res["E_pol1_B(E,E)_(A_grnd,B_exct)"] # res["E_pol1-env_B(E,E)_grnd"] # res["E_pol1-env_B(E,E)_exct"] # res["E_pol2_st_(A_exct,B_grnd)"] # res["E_pol2_st_(A_grnd,B_exct)"] # res["E_pol2-env_st_grnd"] # res["E_pol2-env_st_exct"] return J_inter, Eshift1, Eshift2, TrDip1, TrDip2, AllDipAE1, AllDipA_E1, AllDipBE1, res else: raise IOError('Unsupported approximation') #============================================================================== # Definition of fuction for allocation of polarized molecules #============================================================================== def prepare_molecule_1Def(filenames,indx,AlphaE,Alpha_E,BetaE,VinterFG,verbose=False,CoarseGrain="plane",**kwargs): ''' Read all informations needed for Dielectric class and transform system with single defect into this class. Useful for calculation of interaction energies, transition site energy shifts and dipole changes. Parameters ---------- filenames : list of dictionary (dimension Nsystems) In the dictionaries there are specified all needed files which contains nessesary information for transformig the system into Dielectric class. keys: `'1def_structure'`: xyz file with system geometry and atom types `'charge_structure'`: xyz file with defect like molecule geometry for which transition charges were calculated `charge_grnd`: file with ground state charges for the defect `'charge_exct'`: file with excited state charges for the defect `'charge'`: file with transition charges for the defect indx : list of integers (dimension Nsystems x 6) For every system there are specified indexes neded for asignment of defect atoms. First three indexes correspond to center and two main axes of reference structure (structure which was used for charges calculation) and the remaining three indexes are corresponding atoms of the defect on fluorographene system. AlphaE : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) Alpha_E : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(-E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) BetaE : numpy.array of real (dimension 2x2) Atomic polarizability Beta(E,E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) VinterFG : real Difference in electrostatic interaction energy between interaction of excited C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state and interaction of ground state C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state. Units are ATOMIC UNITS (Hartree) CoarseGrain : string (optional init = "plane") Possible values are: "plane","C","CF". Define which level of coarse grained model should be used. If ``CoarseGrain="plane"`` then all atoms are projected on plane defined by nvec and C-F atoms re treated as single atom - for this case polarizabilities defined only in 2D by two numbers. If ``CoarseGrain="C"`` then carbon atoms are center for atomic polarizability tensor and again C-F are treated as a single atom. If ``CoarseGrain="CF"`` then center of C-F bonds are used as center for atomic polarizability tensor and again C-F are treated as a single atom. verbose : logical (optional - init=False) If `True` aditional information about whole proces will be printed **kwargs : dictionary (optional) Definition of polarizabitity matrixes for defect atoms (if nonzero polarizability is used) Returns ------- mol_polar : Dielectric class Fluorographene with defect in Dielectric class which contains all information needed for calculation of energy shifts and dipole changes for defect embeded in fluorographene index1 : list of integer (dimension Ndefect_atoms) Atom indexes of defect atoms charge : numpy.array of real (dimension Ndefect_atoms) Transition charges for every defect atom. First charge correspond to atom defined by first index in index1 list and so on. struc : Structure class Structure of the fluorographene system with single defects ''' if verbose: print(indx) indx_center_test=indx[0] indx_x_test=indx[1] indx_y_test=indx[2] indx_center1=indx[3] indx_x1=indx[4] indx_y1=indx[5] # Specify files: xyzfile2=filenames['charge_structure'] filenameESP=filenames['charge'] xyzfile=filenames['1def_structure'] if verbose: print(' Reading charges and format to polarization format...') struc_test=Structure() struc_test.load_xyz(xyzfile2) # Structure of molecule used for fitting charges if verbose: print(' Loading molecule...') struc=Structure() struc.load_xyz(xyzfile) # Fluorographene with single defect coor,charge,at_type=read_TrEsp_charges(filenameESP,verbose=False) if verbose: print(' Centering molecule...') struc.center(indx_center1,indx_x1,indx_y1) index1=identify_molecule(struc,struc_test,indx_center1,indx_x1,indx_y1,indx_center_test,indx_x_test,indx_y_test,onlyC=True) if len(index1)!=len(np.unique(index1)): raise IOError('There are repeating elements in index file') # Assign pol types and charges PolCoor,Polcharge,PolType = _prepare_polar_structure_1def(struc,index1,charge,CoarseGrain,verbose=False) # PolType=[] # Polcharge=[] # PolCoor=[] # for ii in range(struc.nat): # if struc.at_type[ii]=='C' and (ii in index1): # Polcharge.append(charge[np.where(index1==ii)[0][0]]) # PolType.append('C') # PolCoor.append(struc.coor._value[ii]) # elif struc.at_type[ii]=='C': # PolType.append('CF') # Polcharge.append(0.0) # PolCoor.append(struc.coor._value[ii]) # PolType=np.array(PolType) # Polcharge=np.array(Polcharge,dtype='f8') # PolCoor=np.array(PolCoor,dtype='f8') # # # project molecule whole system to plane defined by defect # nvec=np.array([0.0,0.0,1.0],dtype='f8') # center=np.array([0.0,0.0,0.0],dtype='f8') # PolCoor=project_on_plane(PolCoor,nvec,center) polar={} polar['AlphaE']=np.zeros((len(PolCoor),3,3),dtype='f8') polar['Alpha_E']=np.zeros((len(PolCoor),3,3),dtype='f8') polar['BetaE']=np.zeros((len(PolCoor),3,3),dtype='f8') mol_polar=Dielectric(PolCoor,Polcharge,np.zeros((len(PolCoor),3),dtype='f8'), polar['AlphaE'],polar['Alpha_E'],polar['BetaE'],VinterFG) ZeroM=np.zeros((3,3),dtype='f8') Polarizability = { 'CF': [AlphaE,Alpha_E,BetaE], 'CD': [AlphaE,Alpha_E,BetaE]} if "Alpha(E)" in kwargs.keys(): AlphaE_def=kwargs['Alpha(E)'] Alpha_E_def=kwargs['Alpha(-E)'] BetaE_def=kwargs['Beta(E,E)'] Polarizability['C'] = [AlphaE_def,Alpha_E_def,BetaE_def] else : Polarizability['C'] = [ZeroM,ZeroM,ZeroM] if "Fpolar" in kwargs.keys(): Polarizability['FC'] = kwargs['Fpolar'] else: Polarizability['FC'] = [ZeroM,ZeroM,ZeroM] mol_polar.polar=mol_polar.assign_polar(PolType,**{'PolValues': Polarizability}) if "Alpha_static" in kwargs.keys(): mol_polar.polar['Alpha_st'] = np.zeros((len(PolCoor),3,3),dtype='f8') if CoarseGrain=="all_atom": Alpha_static=kwargs["Alpha_static"] AlphaF_static=kwargs["AlphaF_static"] else: Alpha_static=kwargs["Alpha_static"] AlphaF_static=ZeroM for ii in range(len(PolType)): if PolType[ii]=='CF': mol_polar.polar['Alpha_st'][ii]=Alpha_static elif PolType[ii]=='FC': mol_polar.polar['Alpha_st'][ii]=AlphaF_static return mol_polar,index1,charge,struc def prepare_molecule_2Def(filenames,indx,AlphaE,Alpha_E,BetaE,VinterFG,verbose=False, def2_charge=True,CoarseGrain="plane",**kwargs): ''' Read all informations needed for Dielectric class and transform system with two same defects into this class. Useful for calculation of interaction energies, transition site energy shifts and dipole changes. Parameters ---------- filenames : dictionary In the dictionary there are specified all needed files which contains nessesary information for transformig the system into Dielectric class. keys: * ``'2def_structure'``: xyz file with FG system with two defects geometry and atom types * ``'charge1_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to first defect * ``'charge1'``: file with transition charges for the first defect (from TrEsp charges fitting) * ``'charge2_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to second defect * ``'charge2'``: file with transition charges for the second defect (from TrEsp charges fitting) indx : list of integers (dimension 9) There are specified indexes neded for asignment of defect atoms. First three indexes correspond to center and two main axes of reference structure (structure which was used for charges calculation) and the remaining six indexes are corresponding atoms of the defects on fluorographene system (three correspond to first defect and the last three to the second one). AlphaE : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) Alpha_E : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(-E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) BetaE : numpy.array of real (dimension 2x2) Atomic polarizability Beta(E,E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) VinterFG : real Difference in electrostatic interaction energy between interaction of excited C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state and interaction of ground state C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state. Units are ATOMIC UNITS (Hartree) def2_charge : logical (init = True) Specifies if transition charges should be placed also to the second defect CoarseGrain : string (optional init = "plane") Possible values are: "plane","C","CF". Define which level of coarse grained model should be used. If ``CoarseGrain="plane"`` then all atoms are projected on plane defined by nvec and C-F atoms re treated as single atom - for this case polarizabilities defined only in 2D by two numbers. If ``CoarseGrain="C"`` then carbon atoms are center for atomic polarizability tensor and again C-F are treated as a single atom. If ``CoarseGrain="CF"`` then center of C-F bonds are used as center for atomic polarizability tensor and again C-F are treated as a single atom. verbose : logical (optional - init=False) If `True` aditional information about whole proces will be printed **kwargs : dictionary (optional) Definition of polarizabitity matrixes for defect atoms (if nonzero polarizability is used) Returns ------- mol_polar : Dielectric class Fluorographene with two defects in Dielectric class which contains all information needed for calculation of energy shifts, dipole changes and interaction energies for defect homodimer embeded in fluorographene index1 : list of integer (dimension Ndefect_atoms) Atom indexes of first defect atoms index2 : list of integer (dimension Ndefect_atoms) Atom indexes of second defect atoms charge1 : numpy.array of real (dimension Ndefect1_atoms) Transition charges for every atom of the first defect. First charge correspond to atom defined by first index in index1 list and so on. charge2 : numpy.array of real (dimension Ndefect2_atoms) Transition charges for every atom of the second defect. First charge correspond to atom defined by first index in index2 list and so on. struc : Structure class Structure of the fluorographene system with two defects ''' indx_center_test=indx[0] indx_x_test=indx[1] indx_y_test=indx[2] indx_center1=indx[3] indx_x1=indx[4] indx_y1=indx[5] indx_center2=indx[6] indx_x2=indx[7] indx_y2=indx[8] # Specify files: xyzfile_chrg1=filenames['charge1_structure'] filenameESP_chrg1=filenames['charge1'] xyzfile_chrg2=filenames['charge2_structure'] filenameESP_chrg2=filenames['charge2'] xyzfile=filenames['2def_structure'] # Read Transition charges #filenameESP="".join([MolDir,'Perylene_TDDFT_fitted_charges_NoH.out']) if verbose: print(' Reading charges and format to polarization format...') struc1_test=Structure() struc2_test=Structure() struc1_test.load_xyz(xyzfile_chrg1) # Structure of molecule used for fitting charges struc2_test.load_xyz(xyzfile_chrg2) # Structure of molecule used for fitting charges coor,charge1,at_type=read_TrEsp_charges(filenameESP_chrg1,verbose=False) coor,charge2,at_type=read_TrEsp_charges(filenameESP_chrg2,verbose=False) # load molecule - fuorographene with 2 defects if verbose: print(' Loading molecule...') struc=Structure() struc.load_xyz(xyzfile) # Fluorographene with two defects index1=identify_molecule(struc,struc1_test,indx_center1,indx_x1,indx_y1,indx_center_test,indx_x_test,indx_y_test,onlyC=True) index2=identify_molecule(struc,struc2_test,indx_center2,indx_x2,indx_y2,indx_center_test,indx_x_test,indx_y_test,onlyC=True) if len(index1)!=len(np.unique(index1)) or len(index2)!=len(np.unique(index2)): print('index1:') print(index1) print('index2:') print(index2) raise IOError('There are repeating elements in index file') # Assign pol types PolCoor,Polcharge,PolType = _prepare_polar_structure_2def(struc,index1,charge1,index2,charge2,CoarseGrain) # PolType=[] # Polcharge=[] # PolCoor=[] # for ii in range(struc.nat): # if struc.at_type[ii]=='C' and (ii in index1): # Polcharge.append(charge1[np.where(index1==ii)[0][0]]) # PolType.append('C') # PolCoor.append(struc.coor._value[ii]) # elif struc.at_type[ii]=='C' and (ii in index2): # if def2_charge: # Polcharge.append(charge2[np.where(index2==ii)[0][0]]) # else: # Polcharge.append(0.0) # #Polcharge.append(charge[np.where(index2==ii)[0][0]]) # PolType.append('C') # PolCoor.append(struc.coor._value[ii]) # elif struc.at_type[ii]=='C': # PolType.append('CF') # Polcharge.append(0.0) # PolCoor.append(struc.coor._value[ii]) # # PolType=np.array(PolType) # Polcharge=np.array(Polcharge,dtype='f8') # PolCoor=np.array(PolCoor,dtype='f8') # # # project molecule whole system to plane defined by defect # center=np.array([0.0,0.0,0.0],dtype='f8') # PolCoor=project_on_plane(PolCoor,nvec,center) # center projected molecule on plane if verbose: print(' Centering molecule...') PolCoor,Phi,Psi,Chi,center=CenterMolecule(PolCoor,indx_center1,[indx_center1,indx_x1,indx_center2,indx_x2],[indx_center1,indx_y1,indx_center2,indx_y2],print_angles=True) # Do the same transformation also with the structure struc.move(-center[0],-center[1],-center[2]) struc.rotate(Phi,Psi,Chi) polar={} polar['AlphaE']=np.zeros((len(PolCoor),3,3),dtype='f8') polar['Alpha_E']=np.zeros((len(PolCoor),3,3),dtype='f8') polar['BetaE']=np.zeros((len(PolCoor),3,3),dtype='f8') mol_polar=Dielectric(PolCoor,Polcharge,np.zeros((len(PolCoor),3),dtype='f8'), polar['AlphaE'],polar['Alpha_E'],polar['BetaE'],VinterFG) ZeroM=np.zeros((3,3),dtype='f8') Polarizability = { 'CF': [AlphaE,Alpha_E,BetaE], 'CD': [AlphaE,Alpha_E,BetaE]} if "Alpha(E)" in kwargs.keys(): AlphaE_def=kwargs['Alpha(E)'] Alpha_E_def=kwargs['Alpha(-E)'] BetaE_def=kwargs['Beta(E,E)'] Polarizability['C'] = [AlphaE_def,Alpha_E_def,BetaE_def] else : Polarizability['C'] = [ZeroM,ZeroM,ZeroM] if "Fpolar" in kwargs.keys(): Polarizability['FC'] = kwargs['Fpolar'] else: Polarizability['FC'] = [ZeroM,ZeroM,ZeroM] mol_polar.polar=mol_polar.assign_polar(PolType,**{'PolValues': Polarizability}) if "Alpha_static" in kwargs.keys(): mol_polar.polar['Alpha_st'] = np.zeros((len(PolCoor),3,3),dtype='f8') if CoarseGrain=="all_atom": Alpha_static=ZeroM else: Alpha_static=kwargs["Alpha_static"] for ii in range(len(PolType)): if PolType[ii]=='CF': mol_polar.polar['Alpha_st'][ii]=Alpha_static return mol_polar,index1,index2,charge1,charge2,struc def _prepare_polar_structure_1def(struc,index1,charge1,Type,verbose=False): """ Type = "plane","C","CF","all_atom" """ if not Type in ["plane","C","CF","all_atom"]: raise Warning("Unsupported type of coarse graining.") if verbose: print(Type) # Molecule has to be centered and oriented first before this calculation is done # Assign pol types and charges PolType=[] Polcharge=[] PolCoor=[] if Type == "plane" or Type == "C": for ii in range(struc.nat): if struc.at_type[ii]=='C' and (ii in index1): Polcharge.append(charge1[np.where(index1==ii)[0][0]]) PolType.append('C') PolCoor.append(struc.coor._value[ii]) elif struc.at_type[ii]=='C': PolType.append('CF') Polcharge.append(0.0) PolCoor.append(struc.coor._value[ii]) PolType=np.array(PolType) Polcharge=np.array(Polcharge,dtype='f8') PolCoor=np.array(PolCoor,dtype='f8') if Type == "plane": # project molecule whole system to plane defined by defect nvec_test,origin_test = fit_plane(PolCoor) PolCoor=project_on_plane(PolCoor,nvec_test,origin_test) #center=np.array([0.0,0.0,0.0],dtype='f8') #PolCoor=project_on_plane(PolCoor,nvec,center) elif Type == "all_atom": PolCoor = struc.coor._value.copy() for ii in range(struc.nat): if struc.at_type[ii]=='C' and (ii in index1): Polcharge.append(charge1[np.where(index1==ii)[0][0]]) PolType.append('C') elif struc.at_type[ii]=='C': PolType.append('CF') Polcharge.append(0.0) elif struc.at_type[ii]=='F': PolType.append('FC') Polcharge.append(0.0) PolType=np.array(PolType) Polcharge=np.array(Polcharge,dtype='f8') PolCoor=np.array(PolCoor,dtype='f8') elif Type == "CF": connectivity = [] for ii in range(struc.nat): connectivity.append([]) if struc.bonds is None: struc.guess_bonds() for ii in range(len(struc.bonds)): indx1=struc.bonds[ii][0] at1=struc.at_type[indx1] indx2=struc.bonds[ii][1] at2=struc.at_type[indx2] if at1=="C" and at2=="F": connectivity[indx1].append(indx2) elif at2=="C" and at1=="F": connectivity[indx2].append(indx1) for ii in range(struc.nat): if struc.at_type[ii]=='C' and (ii in index1): Polcharge.append(charge1[np.where(index1==ii)[0][0]]) PolType.append('C') PolCoor.append(struc.coor._value[ii]) elif struc.at_type[ii]=='C': PolType.append('CF') Polcharge.append(0.0) # polarizabiliy center will be located at center of C-F bond (or F-C-F for border carbons) count = 1 position = struc.coor._value[ii] for jj in range(len(connectivity[ii])): position += struc.coor._value[ connectivity[ii][jj] ] count += 1 position = position / count PolCoor.append(position) PolType=np.array(PolType) Polcharge=np.array(Polcharge,dtype='f8') PolCoor=np.array(PolCoor,dtype='f8') # TODO: add all atom representation return PolCoor,Polcharge,PolType def _prepare_polar_structure_2def(struc,index1,charge1,index2,charge2,Type,verbose=False): """ Type = "plane","C","CF","all_atom" """ if not Type in ["plane","C","CF","all_atom"]: raise Warning("Unsupported type of coarse graining.") if verbose: print(Type) # Assign pol types PolType=[] Polcharge=[] PolCoor=[] if Type == "plane" or Type == "C": for ii in range(struc.nat): if struc.at_type[ii]=='C' and (ii in index1): Polcharge.append(charge1[np.where(index1==ii)[0][0]]) PolType.append('C') PolCoor.append(struc.coor._value[ii]) elif struc.at_type[ii]=='C' and (ii in index2): Polcharge.append(charge2[np.where(index2==ii)[0][0]]) PolType.append('C') PolCoor.append(struc.coor._value[ii]) elif struc.at_type[ii]=='C': PolType.append('CF') Polcharge.append(0.0) PolCoor.append(struc.coor._value[ii]) PolType=np.array(PolType) Polcharge=np.array(Polcharge,dtype='f8') PolCoor=np.array(PolCoor,dtype='f8') if Type == "plane": # project molecule whole system to plane defined by defect nvec_test,origin_test = fit_plane(PolCoor) PolCoor=project_on_plane(PolCoor,nvec_test,origin_test) #center=np.array([0.0,0.0,0.0],dtype='f8') #PolCoor=project_on_plane(PolCoor,nvec,center) elif Type == "all_atom": PolCoor = struc.coor._value.copy() for ii in range(struc.nat): if struc.at_type[ii]=='C' and (ii in index1): Polcharge.append(charge1[np.where(index1==ii)[0][0]]) PolType.append('C') elif struc.at_type[ii]=='C' and (ii in index2): Polcharge.append(charge2[np.where(index2==ii)[0][0]]) PolType.append('C') elif struc.at_type[ii]=='C': PolType.append('CF') Polcharge.append(0.0) elif struc.at_type[ii]=='F': PolType.append('FC') Polcharge.append(0.0) PolType=np.array(PolType) Polcharge=np.array(Polcharge,dtype='f8') #print(len(PolCoor),len(PolType)) # TODO: TEST this assignment of polarizability centers elif Type == "CF": connectivity = [] for ii in range(struc.nat): connectivity.append([]) if struc.bonds is None: struc.guess_bonds() for ii in range(len(struc.bonds)): indx1=struc.bonds[ii][0] at1=struc.at_type[indx1] indx2=struc.bonds[ii][1] at2=struc.at_type[indx2] if at1=="C" and at2=="F": connectivity[indx1].append(indx2) elif at2=="C" and at1=="F": connectivity[indx2].append(indx1) for ii in range(struc.nat): if struc.at_type[ii]=='C' and (ii in index1): Polcharge.append(charge1[np.where(index1==ii)[0][0]]) PolType.append('C') PolCoor.append(struc.coor._value[ii]) elif struc.at_type[ii]=='C' and (ii in index2): Polcharge.append(charge2[np.where(index2==ii)[0][0]]) PolType.append('C') PolCoor.append(struc.coor._value[ii]) elif struc.at_type[ii]=='C': PolType.append('CF') Polcharge.append(0.0) # polarizabiliy center will be located at center of C-F bond (or F-C-F for border carbons) count = 1 position = struc.coor._value[ii] for jj in range(len(connectivity[ii])): position += struc.coor._value[ connectivity[ii][jj] ] count += 1 position = position / count PolCoor.append(position) PolType=np.array(PolType) Polcharge=np.array(Polcharge,dtype='f8') PolCoor=np.array(PolCoor,dtype='f8') # TODO: add all atom representation return PolCoor,Polcharge,PolType #TODO: Get rid of ShortName def Calc_SingleDef_FGprop(filenames,ShortName,index_all,AlphaE,Alpha_E,BetaE,VinterFG,FG_charges,ChargeType,order=80,verbose=False,approx=1.1,MathOut=False,CoarseGrain="plane",**kwargs): ''' Calculate energy shifts and transition dipole shifts for single defect embeded in fluorographene Parameters ---------- filenames : dictionary Dictionary with information about all needed files which contains nessesary information for transformig the system into Dielectric class and electrostatic calculations. Keys: * ``'1def_structure'``: xyz file with FG system with single defect geometry and atom types * ``'charge_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to first defect * ``'charge'``: file with transition charges for the defect (from TrEsp charges fitting) * ``'charge_grnd'``: file with ground state charges for the defect (from TrEsp charges fitting) * ``'charge_exct'``: file with excited state charges for the defect (from TrEsp charges fitting) ShortName : string Short description of the system index_all : list of integers (dimension 6) There are specified indexes neded for asignment of defect atoms. First three indexes correspond to center and two main axes of reference structure (structure which was used for charges calculation) and the last three indexes are corresponding atoms of the defect. AlphaE : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) Alpha_E : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(-E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) BetaE : numpy.array of real (dimension 2x2) Atomic polarizability Beta(E,E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) VinterFG : real Difference in electrostatic interaction energy between interaction of excited C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state and interaction of ground state C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state. Units are ATOMIC UNITS (Hartree) FG_charges : list of real (dimension 2) [charge on inner fluorographene atom, charge on borded fluorographe carbon] ChargeType : string Specifies which charges should be used for electrostatic calculations (ground and excited state charges) for defect atoms. Allowed types are: ``'qchem'``, ``'qchem_all'``, ``'AMBER'`` and ``'gaussian'``. * ``'qchem'`` - charges calculated by fiting Q-Chem ESP on carbon atoms. * ``'qchem_all'`` - charges calculated by fiting Q-Chem ESP on all atoms, only carbon charges are used and same charge is added to all carbon atoms in order to have neutral molecule. * ``'AMBER'`` - not yet fully implemented. * ``'gaussian'`` - not yet fully implemented. order : integer (optional - init=80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 verbose : logical (optional - init=False) If `True` aditional information about whole proces will be printed approx : real (optional - init=1.1) Specifies which approximation should be used. * **Approximation 1.1**: Neglect of `Beta(-E,-E)` and `Beta(-E,E)` and `Alpha(-E)`. * **Approximation 1.2**: Neglect of `Beta(-E,-E)` and `tilde{Beta(E)}`. * **Approximation 1.3**: `Beta(E,E)=Beta(-E,E)=Beta(-E,-E)` and also `Alpha(E)=Alpha(-E)`, however the second one is not condition Returns -------- Eshift : Energy class Transition energy shift for the defect due to the fluorographene environment calculated from structure with single defect. Units are energy managed TrDip : numpy array of real (dimension 3) Total transition dipole for the defect with environment effects included calculated from structure with single defect (in ATOMIC UNITS) Notes -------- By comparing QC calculations it was found that energy shift from structure with two defects and with single defect is almost the same. ''' if verbose: print('Calculation of interaction energy for:',ShortName) # read and prepare molecule mol_polar,index1,charge,struc=prepare_molecule_1Def(filenames,index_all,AlphaE,Alpha_E,BetaE,VinterFG,verbose=False,CoarseGrain=CoarseGrain,**kwargs) # calculate dAVA = <A|V|A>-<G|V|G> AditInfo={'Structure': struc,'index1': index1} mol_Elstat,index,charge_grnd,charge_exct=ElStat_PrepareMolecule_1Def(filenames,index_all,FG_charges,ChargeType=ChargeType,verbose=False,**AditInfo) dAVA=mol_Elstat.get_EnergyShift() # calculate transition energy shifts and transition dipole change Eshift,TrDip=mol_polar.get_SingleDefectProperties(index1,dAVA=dAVA,order=order,approx=approx) if verbose: with energy_units("1/cm"): print(ShortName,Eshift.value) print(" dipole:",np.linalg.norm(TrDip)) print(" dAVA:",dAVA*conversion_facs_energy["1/cm"],'cm-1') return Eshift, TrDip #TODO: Get rid of ShortName #TODO: Input vacuum transition energies def Calc_Heterodimer_FGprop(filenames,ShortName,index_all,AlphaE,Alpha_E,BetaE,VinterFG,FG_charges,ChargeType,order=80,verbose=False,approx=1.1,MathOut=False,CoarseGrain="plane",**kwargs): ''' Calculate interaction energies between defects embeded in polarizable atom environment for all systems given in filenames. Possibility of calculate transition energy shifts and transition dipoles. Parameters ---------- filenames : dictionary Dictionary with information about all needed files which contains nessesary information for transformig the system into Dielectric class and electrostatic calculations. Keys: * ``'2def_structure'``: xyz file with FG system with two defects geometry and atom types * ``'charge1_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to first defect * ``'charge1'``: file with transition charges for the first defect (from TrEsp charges fitting) * ``'charge1_grnd'``: file with ground state charges for the first defect (from TrEsp charges fitting) * ``'charge1_exct'``: file with excited state charges for the first defect (from TrEsp charges fitting) * ``'charge2_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to second defect * ``'charge2'``: file with transition charges for the second defect (from TrEsp charges fitting) * ``'charge2_grnd'``: file with ground state charges for the second defect (from TrEsp charges fitting) * ``'charge2_exct'``: file with excited state charges for the second defect (from TrEsp charges fitting) ShortName : string Short description of the system index_all : list of integers (dimension 6) There are specified indexes neded for asignment of defect atoms. First three indexes correspond to center and two main axes of reference structure (structure which was used for charges calculation) and the next three indexes are corresponding atoms of the first defects on fluorographene system and the last three indexes are corresponding atoms of the second defect. AlphaE : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) Alpha_E : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(-E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) BetaE : numpy.array of real (dimension 2x2) Atomic polarizability Beta(E,E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) VinterFG : real Difference in electrostatic interaction energy between interaction of excited C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state and interaction of ground state C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state. Units are ATOMIC UNITS (Hartree) FG_charges : list of real (dimension 2) [charge on inner fluorographene atom, charge on borded fluorographe carbon] ChargeType : string Specifies which charges should be used for electrostatic calculations (ground and excited state charges) for defect atoms. Allowed types are: ``'qchem'``, ``'qchem_all'``, ``'AMBER'`` and ``'gaussian'``. * ``'qchem'`` - charges calculated by fiting Q-Chem ESP on carbon atoms. * ``'qchem_all'`` - charges calculated by fiting Q-Chem ESP on all atoms, only carbon charges are used and same charge is added to all carbon atoms in order to have neutral molecule. * ``'AMBER'`` - not yet fully implemented. * ``'gaussian'`` - not yet fully implemented. order : integer (optional - init=80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 CoarseGrain : string (optional init = "plane") Possible values are: "plane","C","CF". Define which level of coarse grained model should be used. If ``CoarseGrain="plane"`` then all atoms are projected on plane defined by nvec and C-F atoms re treated as single atom - for this case polarizabilities defined only in 2D by two numbers. If ``CoarseGrain="C"`` then carbon atoms are center for atomic polarizability tensor and again C-F are treated as a single atom. If ``CoarseGrain="CF"`` then center of C-F bonds are used as center for atomic polarizability tensor and again C-F are treated as a single atom. verbose : logical (optional - init=False) If `True` aditional information about whole proces will be printed approx : real (optional - init=1.1) Specifies which approximation should be used. **Approximation 1.1**: Neglect of `Beta(-E,-E)` and `Beta(-E,E)` and `Alpha(-E)`. **Approximation 1.2**: Neglect of `Beta(-E,-E)` and `tilde{Beta(E)}`. **Approximation 1.3**: `Beta(E,E)=Beta(-E,E)=Beta(-E,-E)` and also `Alpha(E)=Alpha(-E)`, however the second one is not condition Returns -------- Einter : Energy class Interaction energy with effects of environment included. Units are energy managed Eshift1 : Energy class Transition energy shift for the first defect due to fluorographene environment calculated from heterodymer structure. Units are energy managed Eshift2 : Energy class Transition energy shift for the second defect due to fluorographene environment calculated from heterodymer structure. Units are energy managed TrDip1 : numpy array of real (dimension 3) Total transition dipole for the first defect with environment effects included calculated from heterodimer structure (in ATOMIC UNITS) TrDip2 : numpy array of real (dimension 3) Total transition dipole for the first defect with environment effects included calculated from heterodimer structure (in ATOMIC UNITS) Notes ---------- No far working only with two symmetric defects - for heterodimer need to input vacuum transition energy for every defect. ''' if verbose: print('Calculation of interaction energy for:',ShortName) # read and prepare molecule mol_polar,index1,index2,charge1,charge2,struc=prepare_molecule_2Def(filenames,index_all,AlphaE,Alpha_E,BetaE,VinterFG,verbose=False,def2_charge=True,CoarseGrain=CoarseGrain,**kwargs) # # calculate dAVA = <A|V|A>-<G|V|G> and dBVB = <B|V|B>-<G|V|G> AditInfo={'Structure': struc,'index1': index1,'index2':index2} mol_Elstat,indx1,indx2,charge1_grnd,charge2_grnd,charge1_exct,charge2_exct=ElStat_PrepareMolecule_2Def(filenames,index_all,FG_charges,ChargeType=ChargeType,verbose=False,**AditInfo) dAVA=mol_Elstat.get_EnergyShift(index=index2, charge=charge2_grnd) dBVB=mol_Elstat.get_EnergyShift(index=index1, charge=charge1_grnd) # calculate interaction energy and transition energy shifts Einter,Eshift1,Eshift2,TrDip1,TrDip2,dipAE,dipA_E,dipBE=mol_polar.get_HeterodimerProperties(index1,index2,0.0,0.0,dAVA=dAVA,dBVB=dBVB,order=order,approx=approx) if verbose: with energy_units("1/cm"): print(' Total interaction energy:',Einter.value) print(ShortName,abs(Einter.value),Eshift1.value,Eshift2.value) print("dipole:",np.linalg.norm(TrDip1),np.linalg.norm(TrDip2)) print("dAVA:",dAVA*conversion_facs_energy["1/cm"],"dBVB:",dBVB*conversion_facs_energy["1/cm"]) if MathOut: if not os.path.exists("Pictures"): os.makedirs("Pictures") Bonds = GuessBonds(mol_polar.coor) if CoarseGrain in ["plane","C","CF"]: at_type = ['C']*mol_polar.Nat elif CoarseGrain == "all_atom": at_type = struc.at_type.copy() mat_filename = "".join(['Pictures/Polar_',ShortName,'_AlphaE.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipAE,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) mat_filename = "".join(['Pictures/Polar_',ShortName,'_Alpha_E.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipA_E,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) mat_filename = "".join(['Pictures/Polar_',ShortName,'_BetaE.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipBE,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) return Einter, Eshift1, Eshift2, TrDip1, TrDip2 def TEST_Calc_Heterodimer_FGprop(filenames,ShortName,index_all,AlphaE,Alpha_E,BetaE,VinterFG,FG_charges,ChargeType,order=80,verbose=False,approx=1.1,MathOut=False,CoarseGrain="plane",**kwargs): ''' Calculate interaction energies between defects embeded in polarizable atom environment for all systems given in filenames. Possibility of calculate transition energy shifts and transition dipoles. Parameters ---------- filenames : dictionary Dictionary with information about all needed files which contains nessesary information for transformig the system into Dielectric class and electrostatic calculations. Keys: * ``'2def_structure'``: xyz file with FG system with two defects geometry and atom types * ``'charge1_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to first defect * ``'charge1'``: file with transition charges for the first defect (from TrEsp charges fitting) * ``'charge1_grnd'``: file with ground state charges for the first defect (from TrEsp charges fitting) * ``'charge1_exct'``: file with excited state charges for the first defect (from TrEsp charges fitting) * ``'charge2_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to second defect * ``'charge2'``: file with transition charges for the second defect (from TrEsp charges fitting) * ``'charge2_grnd'``: file with ground state charges for the second defect (from TrEsp charges fitting) * ``'charge2_exct'``: file with excited state charges for the second defect (from TrEsp charges fitting) ShortName : string Short description of the system index_all : list of integers (dimension 6) There are specified indexes neded for asignment of defect atoms. First three indexes correspond to center and two main axes of reference structure (structure which was used for charges calculation) and the next three indexes are corresponding atoms of the first defects on fluorographene system and the last three indexes are corresponding atoms of the second defect. AlphaE : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) Alpha_E : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(-E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) BetaE : numpy.array of real (dimension 2x2) Atomic polarizability Beta(E,E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) VinterFG : real Difference in electrostatic interaction energy between interaction of excited C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state and interaction of ground state C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state. Units are ATOMIC UNITS (Hartree) FG_charges : list of real (dimension 2) [charge on inner fluorographene atom, charge on borded fluorographe carbon] ChargeType : string Specifies which charges should be used for electrostatic calculations (ground and excited state charges) for defect atoms. Allowed types are: ``'qchem'``, ``'qchem_all'``, ``'AMBER'`` and ``'gaussian'``. * ``'qchem'`` - charges calculated by fiting Q-Chem ESP on carbon atoms. * ``'qchem_all'`` - charges calculated by fiting Q-Chem ESP on all atoms, only carbon charges are used and same charge is added to all carbon atoms in order to have neutral molecule. * ``'AMBER'`` - not yet fully implemented. * ``'gaussian'`` - not yet fully implemented. order : integer (optional - init=80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 CoarseGrain : string (optional init = "plane") Possible values are: "plane","C","CF". Define which level of coarse grained model should be used. If ``CoarseGrain="plane"`` then all atoms are projected on plane defined by nvec and C-F atoms re treated as single atom - for this case polarizabilities defined only in 2D by two numbers. If ``CoarseGrain="C"`` then carbon atoms are center for atomic polarizability tensor and again C-F are treated as a single atom. If ``CoarseGrain="CF"`` then center of C-F bonds are used as center for atomic polarizability tensor and again C-F are treated as a single atom. verbose : logical (optional - init=False) If `True` aditional information about whole proces will be printed approx : real (optional - init=1.1) Specifies which approximation should be used. **Approximation 1.1**: Neglect of `Beta(-E,-E)` and `Beta(-E,E)` and `Alpha(-E)`. **Approximation 1.2**: Neglect of `Beta(-E,-E)` and `tilde{Beta(E)}`. **Approximation 1.3**: `Beta(E,E)=Beta(-E,E)=Beta(-E,-E)` and also `Alpha(E)=Alpha(-E)`, however the second one is not condition Returns -------- Einter : Energy class Interaction energy with effects of environment included. Units are energy managed Eshift1 : Energy class Transition energy shift for the first defect due to fluorographene environment calculated from heterodymer structure. Units are energy managed Eshift2 : Energy class Transition energy shift for the second defect due to fluorographene environment calculated from heterodymer structure. Units are energy managed TrDip1 : numpy array of real (dimension 3) Total transition dipole for the first defect with environment effects included calculated from heterodimer structure (in ATOMIC UNITS) TrDip2 : numpy array of real (dimension 3) Total transition dipole for the first defect with environment effects included calculated from heterodimer structure (in ATOMIC UNITS) Notes ---------- No far working only with two symmetric defects - for heterodimer need to input vacuum transition energy for every defect. ''' if verbose: print('Calculation of interaction energy for:',ShortName) # read and prepare molecule mol_polar,index1,index2,charge1,charge2,struc=prepare_molecule_2Def(filenames,index_all,AlphaE,Alpha_E,BetaE,VinterFG,verbose=False,def2_charge=True,CoarseGrain=CoarseGrain,**kwargs) if (mol_polar.charge[index1] != mol_polar.charge[index2]).any(): raise Warning("Transition charges are not the same - after creation.") # # calculate dAVA = <A|V|A>-<G|V|G> and dBVB = <B|V|B>-<G|V|G> AditInfo={'Structure': struc,'index1': index1,'index2':index2} mol_Elstat,indx1,indx2,charge1_grnd,charge2_grnd,charge1_exct,charge2_exct=ElStat_PrepareMolecule_2Def(filenames,index_all,FG_charges,ChargeType=ChargeType,verbose=False,**AditInfo) dAVA=mol_Elstat.get_EnergyShift(index=index2, charge=charge2_grnd) dBVB=mol_Elstat.get_EnergyShift(index=index1, charge=charge1_grnd) # dAVA=mol_Elstat.get_EnergyShift(index=index2) # dBVB=mol_Elstat.get_EnergyShift(index=index1) if (mol_polar.charge[index1] != mol_polar.charge[index2]).any(): raise Warning("Transition charges are not the same - after elstat.") # calculate interaction energy and transition energy shifts - so far for homodimer Einter,Eshift1,Eshift2,TrDip1,TrDip2,dipAE,dipA_E,dipBE,res=mol_polar._TEST_HeterodimerProperties(charge1_grnd,charge1_exct,charge2_grnd,charge2_exct,mol_Elstat,struc,index1,index2,0.0,0.0,dAVA=dAVA,dBVB=dBVB,order=order,approx=approx) #get_HeterodimerProperties_new(self, gr_charge1, ex_charge1, gr_charge2, ex_charge2, FG_elstat, struc, index1, index2, Eng1, Eng2, eps, dAVA=0.0, dBVB=0.0, order=2, approx=1.1) # res["E_pol2_A(E)"] # res["E_pol2_A(-E)"] # res["E_pol2_B(E,E)"] # res["E_pol1_B(E,E)_(A_exct,B_grnd)"] # res["E_pol1_B(E,E)_(A_grnd,B_exct)"] # res["E_pol1-env_B(E,E)_grnd"] # res["E_pol1-env_B(E,E)_exct"] # res["E_pol2_st_(A_exct,B_grnd)"] # res["E_pol2_st_(A_grnd,B_exct)"] # res["E_pol2-env_st_grnd"] # res["E_pol2-env_st_exct"] # res["E_pol1_B(E,E)_(tr_gr,ex)"] import os if not os.path.isfile("Temp.dat"): text = " pol2_A(E) | pol2_A(-E) | pol2_st_(A_ex,B_gr) | pol2_st_(A_gr,B_ex) | E_pol2-env_st_grnd | E_pol2-env_st_exct | pol1_BEE | pol1_BEE_(A_ex,B_gr) | pol1_BEE_(A_gr,B_ex) | pol1-env_BEE_grnd | pol1-env_BEE_exct | pol1_BEE_(tr_gr,ex) |" os.system("".join(['echo "',text,'" >> Temp.dat'])) text = "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|" os.system("".join(['echo "',text,'" >> Temp.dat'])) # pol2_A(E) | pol2_A(-E) | pol2_st_(A_ex,B_gr) | pol2_st_(A_gr,B_ex) | E_pol2-env_st_grnd | E_pol2-env_st_exct | pol1_BEE | pol1_BEE_(A_ex,B_gr) | pol1_BEE_(A_gr,B_ex) | pol1-env_BEE_grnd |" ii = 0 text="{:21} {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} |".format( ShortName,res["E_pol2_A(E)"][ii,0],res["E_pol2_A(E)"][ii,1],res["E_pol2_A(-E)"][ii,0],res["E_pol2_A(-E)"][ii,1], res["E_pol2_st_(A_exct,B_grnd)"][ii,0],res["E_pol2_st_(A_exct,B_grnd)"][ii,1],res["E_pol2_st_(A_grnd,B_exct)"][ii,0], res["E_pol2_st_(A_grnd,B_exct)"][ii,1],res["E_pol2-env_st_grnd"][ii,0],res["E_pol2-env_st_grnd"][ii,1], res["E_pol2-env_st_exct"][ii,0],res["E_pol2-env_st_exct"][ii,1],res["E_pol2_B(E,E)"][ii,0],res["E_pol2_B(E,E)"][ii,1], res["E_pol1_B(E,E)_(A_exct,B_grnd)"][ii,0],res["E_pol1_B(E,E)_(A_exct,B_grnd)"][ii,1],res["E_pol1_B(E,E)_(A_grnd,B_exct)"][ii,0], res["E_pol1_B(E,E)_(A_grnd,B_exct)"][ii,1],res["E_pol1-env_B(E,E)_grnd"][ii,0],res["E_pol1-env_B(E,E)_grnd"][ii,1], res["E_pol1-env_B(E,E)_exct"][ii,0],res["E_pol1-env_B(E,E)_exct"][ii,1],res["E_pol1_B(E,E)_(tr_gr,ex)"][ii,0],res["E_pol1_B(E,E)_(tr_gr,ex)"][ii,1]) os.system("".join(['echo "',text,'" >> Temp.dat'])) ii = 1 text="{:21} {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} |".format( " ",res["E_pol2_A(E)"][ii,0],res["E_pol2_A(E)"][ii,1],res["E_pol2_A(-E)"][ii,0],res["E_pol2_A(-E)"][ii,1], res["E_pol2_st_(A_exct,B_grnd)"][ii,0],res["E_pol2_st_(A_exct,B_grnd)"][ii,1],res["E_pol2_st_(A_grnd,B_exct)"][ii,0], res["E_pol2_st_(A_grnd,B_exct)"][ii,1],res["E_pol2-env_st_grnd"][ii,0],res["E_pol2-env_st_grnd"][ii,1], res["E_pol2-env_st_exct"][ii,0],res["E_pol2-env_st_exct"][ii,1],res["E_pol2_B(E,E)"][ii,0],res["E_pol2_B(E,E)"][ii,1], res["E_pol1_B(E,E)_(A_exct,B_grnd)"][ii,0],res["E_pol1_B(E,E)_(A_exct,B_grnd)"][ii,1],res["E_pol1_B(E,E)_(A_grnd,B_exct)"][ii,0], res["E_pol1_B(E,E)_(A_grnd,B_exct)"][ii,1],res["E_pol1-env_B(E,E)_grnd"][ii,0],res["E_pol1-env_B(E,E)_grnd"][ii,1], res["E_pol1-env_B(E,E)_exct"][ii,0],res["E_pol1-env_B(E,E)_exct"][ii,1],res["E_pol1_B(E,E)_(tr_gr,ex)"][ii,0],res["E_pol1_B(E,E)_(tr_gr,ex)"][ii,1]) os.system("".join(['echo "',text,'" >> Temp.dat'])) text = "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|" os.system("".join(['echo "',text,' " >> Temp.dat'])) # if (mol_polar.charge[index1] != mol_polar.charge[index2]).any(): # raise Warning("Transition charges are not the same - after polar.") # TODO: For testing output structure and polarization structure - I'm getting different values for first and second defect # struc.output_to_xyz("".join([ShortName,"_structure.xyz"])) # from QChemTool.QuantumChem.output import OutputToXYZ # from QChemTool.General.units import conversion_facs_position # OutputToXYZ(mol_polar.coor*conversion_facs_position["Angstrom"],["C"]*len(mol_polar.coor),"".join([ShortName,"_pol.xyz"])) if verbose: with energy_units("1/cm"): print(' Total interaction energy:',Einter.value) print(ShortName,abs(Einter.value),Eshift1.value,Eshift2.value) print("dipole:",np.linalg.norm(TrDip1),np.linalg.norm(TrDip2)) print("dAVA:",dAVA*conversion_facs_energy["1/cm"],"dBVB:",dBVB*conversion_facs_energy["1/cm"]) if MathOut: if not os.path.exists("Pictures"): os.makedirs("Pictures") Bonds = GuessBonds(mol_polar.coor) if CoarseGrain in ["plane","C","CF"]: at_type = ['C']*mol_polar.Nat elif CoarseGrain == "all_atom": at_type = struc.at_type.copy() # if (mol_polar.charge[index1] != mol_polar.charge[index2]).any(): # raise Warning("Transition charges are not the same - before output.") mat_filename = "".join(['Pictures/Polar_',ShortName,'_AlphaE.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipAE,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) mat_filename = "".join(['Pictures/Polar_',ShortName,'_Alpha_E.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipA_E,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) mat_filename = "".join(['Pictures/Polar_',ShortName,'_BetaE.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipBE,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) return Einter, Eshift1, Eshift2, TrDip1, TrDip2 def Calc_Heterodimer_FGprop_new(filenames,ShortName,E1,E2,index_all,AlphaE,Alpha_E,BetaE,VinterFG,FG_charges,ChargeType,order=2,verbose=False,approx=1.1,MathOut=False,CoarseGrain="plane",**kwargs): ''' Calculate interaction energies between defects embeded in polarizable atom environment for all systems given in filenames. Possibility of calculate transition energy shifts and transition dipoles. Parameters ---------- filenames : dictionary Dictionary with information about all needed files which contains nessesary information for transformig the system into Dielectric class and electrostatic calculations. Keys: * ``'2def_structure'``: xyz file with FG system with two defects geometry and atom types * ``'charge1_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to first defect * ``'charge1'``: file with transition charges for the first defect (from TrEsp charges fitting) * ``'charge1_grnd'``: file with ground state charges for the first defect (from TrEsp charges fitting) * ``'charge1_exct'``: file with excited state charges for the first defect (from TrEsp charges fitting) * ``'charge2_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to second defect * ``'charge2'``: file with transition charges for the second defect (from TrEsp charges fitting) * ``'charge2_grnd'``: file with ground state charges for the second defect (from TrEsp charges fitting) * ``'charge2_exct'``: file with excited state charges for the second defect (from TrEsp charges fitting) ShortName : string Short description of the system index_all : list of integers (dimension 6) There are specified indexes neded for asignment of defect atoms. First three indexes correspond to center and two main axes of reference structure (structure which was used for charges calculation) and the next three indexes are corresponding atoms of the first defects on fluorographene system and the last three indexes are corresponding atoms of the second defect. AlphaE : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) Alpha_E : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(-E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) BetaE : numpy.array of real (dimension 2x2) Atomic polarizability Beta(E,E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) VinterFG : real Difference in electrostatic interaction energy between interaction of excited C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state and interaction of ground state C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state. Units are ATOMIC UNITS (Hartree) FG_charges : list of real (dimension 2) [charge on inner fluorographene atom, charge on borded fluorographe carbon] ChargeType : string Specifies which charges should be used for electrostatic calculations (ground and excited state charges) for defect atoms. Allowed types are: ``'qchem'``, ``'qchem_all'``, ``'AMBER'`` and ``'gaussian'``. * ``'qchem'`` - charges calculated by fiting Q-Chem ESP on carbon atoms. * ``'qchem_all'`` - charges calculated by fiting Q-Chem ESP on all atoms, only carbon charges are used and same charge is added to all carbon atoms in order to have neutral molecule. * ``'AMBER'`` - not yet fully implemented. * ``'gaussian'`` - not yet fully implemented. order : integer (optional - init=80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 CoarseGrain : string (optional init = "plane") Possible values are: "plane","C","CF". Define which level of coarse grained model should be used. If ``CoarseGrain="plane"`` then all atoms are projected on plane defined by nvec and C-F atoms re treated as single atom - for this case polarizabilities defined only in 2D by two numbers. If ``CoarseGrain="C"`` then carbon atoms are center for atomic polarizability tensor and again C-F are treated as a single atom. If ``CoarseGrain="CF"`` then center of C-F bonds are used as center for atomic polarizability tensor and again C-F are treated as a single atom. verbose : logical (optional - init=False) If `True` aditional information about whole proces will be printed approx : real (optional - init=1.1) Specifies which approximation should be used. **Approximation 1.1**: Neglect of `Beta(-E,-E)` and `Beta(-E,E)` and `Alpha(-E)`. **Approximation 1.2**: Neglect of `Beta(-E,-E)` and `tilde{Beta(E)}`. **Approximation 1.3**: `Beta(E,E)=Beta(-E,E)=Beta(-E,-E)` and also `Alpha(E)=Alpha(-E)`, however the second one is not condition Returns -------- Einter : Energy class Interaction energy with effects of environment included. Units are energy managed Eshift1 : Energy class Transition energy shift for the first defect due to fluorographene environment calculated from heterodymer structure. Units are energy managed Eshift2 : Energy class Transition energy shift for the second defect due to fluorographene environment calculated from heterodymer structure. Units are energy managed TrDip1 : numpy array of real (dimension 3) Total transition dipole for the first defect with environment effects included calculated from heterodimer structure (in ATOMIC UNITS) TrDip2 : numpy array of real (dimension 3) Total transition dipole for the first defect with environment effects included calculated from heterodimer structure (in ATOMIC UNITS) Notes ---------- No far working only with two symmetric defects - for heterodimer need to input vacuum transition energy for every defect. ''' if verbose: print('Calculation of interaction energy for:',ShortName) # read and prepare molecule mol_polar,index1,index2,charge1,charge2,struc=prepare_molecule_2Def(filenames,index_all,AlphaE,Alpha_E,BetaE,VinterFG,verbose=False,def2_charge=True,CoarseGrain=CoarseGrain,**kwargs) if (mol_polar.charge[index1] != mol_polar.charge[index2]).any(): raise Warning("Transition charges are not the same - after creation.") # # calculate dAVA = <A|V|A>-<G|V|G> and dBVB = <B|V|B>-<G|V|G> AditInfo={'Structure': struc,'index1': index1,'index2':index2} mol_Elstat,indx1,indx2,charge1_grnd,charge2_grnd,charge1_exct,charge2_exct=ElStat_PrepareMolecule_2Def(filenames,index_all,FG_charges,ChargeType=ChargeType,verbose=False,**AditInfo) dAVA=mol_Elstat.get_EnergyShift(index=index2, charge=charge2_grnd) dBVB=mol_Elstat.get_EnergyShift(index=index1, charge=charge1_grnd) # dAVA=mol_Elstat.get_EnergyShift(index=index2) # dBVB=mol_Elstat.get_EnergyShift(index=index1) if (mol_polar.charge[index1] != mol_polar.charge[index2]).any(): raise Warning("Transition charges are not the same - after elstat.") eps = EnergyClass( (E1.value+E2.value)/2 ) # calculate interaction energy and transition energy shifts - so far for homodimer Einter,Eshift1,Eshift2,TrDip1,TrDip2,dipAE,dipA_E,dipBE,res=mol_polar.get_HeterodimerProperties_new(charge1_grnd,charge1_exct,charge2_grnd,charge2_exct,mol_Elstat,struc,index1,index2,0.0,0.0,eps,dAVA=dAVA,dBVB=dBVB,order=order,approx=approx) if verbose: with energy_units("1/cm"): print(' Total interaction energy:',Einter.value) print(ShortName,abs(Einter.value),Eshift1.value,Eshift2.value) print("dipole:",np.linalg.norm(TrDip1),np.linalg.norm(TrDip2)) print("dAVA:",dAVA*conversion_facs_energy["1/cm"],"dBVB:",dBVB*conversion_facs_energy["1/cm"]) if MathOut: if not os.path.exists("Pictures"): os.makedirs("Pictures") Bonds = GuessBonds(mol_polar.coor) if CoarseGrain in ["plane","C","CF"]: at_type = ['C']*mol_polar.Nat elif CoarseGrain == "all_atom": at_type = struc.at_type.copy() mat_filename = "".join(['Pictures/Polar_',ShortName,'_AlphaE.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipAE,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) mat_filename = "".join(['Pictures/Polar_',ShortName,'_Alpha_E.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipA_E,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) mat_filename = "".join(['Pictures/Polar_',ShortName,'_BetaE.nb']) params = {'TrPointCharge': mol_polar.charge,'AtDipole': dipBE,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_polar.coor,Bonds,at_type,scaleDipole=50.0,**params) # res["E_pol2_A(E)"] = PolarMat_AlphaE # res["E_pol2_A(-E)"] = PolarMat_Alpha_E # res["E_pol2_A_static"] = PolarMat_Alpha_st # res["E_pol2_B(E,E)"] = PolarMat_Beta # res["E_pol2_B(E,E)_scaled"] = PolarMat_Beta_scaled # res["E_pol2_A(E)_(trans,grnd)"] = PolarMat_Alpha_tr_gr # res["E_pol1_A_static"] = PolarMat_static_tr_gr_ex # res["E_elstat_1"] = ElstatMat_1 if verbose: if not os.path.isfile("Temp.dat"): text = " pol2_A(E) | pol2_A(-E) | pol2_st | pol2_BEE_scaled | E_pol1-A(E)_tr_gr | E_pol1_st | pol1_BEE | sum_elstat |" os.system("".join(['echo "',text,'" >> Temp.dat'])) text = "----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|" os.system("".join(['echo "',text,'" >> Temp.dat'])) with energy_units("1/cm"): ii = 0 text="{:21} {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} |".format( ShortName,res["E_pol2_A(E)"].value[ii,0],res["E_pol2_A(E)"].value[ii,1],res["E_pol2_A(-E)"].value[ii,0],res["E_pol2_A(-E)"].value[ii,1], res["E_pol2_A_static"].value[ii,0],res["E_pol2_A_static"].value[ii,1],res["E_pol2_B(E,E)_scaled"].value[ii,0], res["E_pol2_B(E,E)_scaled"].value[ii,1],res["E_pol2_A(E)_(trans,grnd)"].value[ii,0],res["E_pol2_A(E)_(trans,grnd)"].value[ii,1], res["E_pol1_A_static"].value[ii,0],res["E_pol1_A_static"].value[ii,1],res["E_pol2_B(E,E)"].value[ii,0],res["E_pol2_B(E,E)"].value[ii,1], res["E_elstat_1"].value[ii,0],res["E_elstat_1"].value[ii,1]) os.system("".join(['echo "',text,'" >> Temp.dat'])) ii = 1 text="{:21} {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.3f} {:10.3f} | {:10.6f} {:10.6f} | {:10.6f} {:10.6f} |".format( " ",res["E_pol2_A(E)"].value[ii,0],res["E_pol2_A(E)"].value[ii,1],res["E_pol2_A(-E)"].value[ii,0],res["E_pol2_A(-E)"].value[ii,1], res["E_pol2_A_static"].value[ii,0],res["E_pol2_A_static"].value[ii,1],res["E_pol2_B(E,E)_scaled"].value[ii,0], res["E_pol2_B(E,E)_scaled"].value[ii,1],res["E_pol2_A(E)_(trans,grnd)"].value[ii,0],res["E_pol2_A(E)_(trans,grnd)"].value[ii,1], res["E_pol1_A_static"].value[ii,0],res["E_pol1_A_static"].value[ii,1],res["E_pol2_B(E,E)"].value[ii,0],res["E_pol2_B(E,E)"].value[ii,1], res["E_elstat_1"].value[ii,0],res["E_elstat_1"].value[ii,1]) os.system("".join(['echo "',text,'" >> Temp.dat'])) text = "----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|" os.system("".join(['echo "',text,' " >> Temp.dat'])) return Einter, Eshift1, Eshift2, TrDip1, TrDip2 def TEST_Compare_SingleDef_FGprop(filenames,ShortName,index_all,AlphaE,Alpha_E,BetaE,VinterFG,FG_charges,ChargeType,order=1,verbose=False,approx=1.1,MathOut=False,CoarseGrain="plane",**kwargs): ''' Compare magnitude of individual terms in energy shift calculation for defect in Fluorographene environment (so far only for first order of perturbation expansion -> order = 1) Parameters ---------- filenames : dictionary Dictionary with information about all needed files which contains nessesary information for transformig the system into Dielectric class and electrostatic calculations. Keys: * ``'1def_structure'``: xyz file with FG system with single defect geometry and atom types * ``'charge_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to first defect * ``'charge'``: file with transition charges for the defect (from TrEsp charges fitting) * ``'charge_grnd'``: file with ground state charges for the defect (from TrEsp charges fitting) * ``'charge_exct'``: file with excited state charges for the defect (from TrEsp charges fitting) ShortName : string Short description of the system index_all : list of integers (dimension 6) There are specified indexes neded for asignment of defect atoms. First three indexes correspond to center and two main axes of reference structure (structure which was used for charges calculation) and the last three indexes are corresponding atoms of the defect. AlphaE : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) Alpha_E : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(-E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) BetaE : numpy.array of real (dimension 2x2) Atomic polarizability Beta(E,E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) VinterFG : real Difference in electrostatic interaction energy between interaction of excited C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state and interaction of ground state C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state. Units are ATOMIC UNITS (Hartree) FG_charges : list of real (dimension 2) [charge on inner fluorographene atom, charge on borded fluorographe carbon] ChargeType : string Specifies which charges should be used for electrostatic calculations (ground and excited state charges) for defect atoms. Allowed types are: ``'qchem'``, ``'qchem_all'``, ``'AMBER'`` and ``'gaussian'``. * ``'qchem'`` - charges calculated by fiting Q-Chem ESP on carbon atoms. * ``'qchem_all'`` - charges calculated by fiting Q-Chem ESP on all atoms, only carbon charges are used and same charge is added to all carbon atoms in order to have neutral molecule. * ``'AMBER'`` - not yet fully implemented. * ``'gaussian'`` - not yet fully implemented. order : integer (optional - init=80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 verbose : logical (optional - init=False) If `True` aditional information about whole proces will be printed approx : real (optional - init=1.1) Specifies which approximation should be used. * **Approximation 1.1**: Neglect of `Beta(-E,-E)` and `Beta(-E,E)` and `Alpha(-E)`. * **Approximation 1.2**: Neglect of `Beta(-E,-E)` and `tilde{Beta(E)}`. * **Approximation 1.3**: `Beta(E,E)=Beta(-E,E)=Beta(-E,-E)` and also `Alpha(E)=Alpha(-E)`, however the second one is not condition Returns -------- Eshift : Energy class Transition energy shift for the defect due to the fluorographene environment calculated from structure with single defect. Units are energy managed TrDip : numpy array of real (dimension 3) Total transition dipole for the defect with environment effects included calculated from structure with single defect (in ATOMIC UNITS) Notes -------- By comparing QC calculations it was found that energy shift from structure with two defects and with single defect is almost the same. ''' # read and prepare molecule mol_polar,index1,charge,struc=prepare_molecule_1Def(filenames,index_all,AlphaE,Alpha_E,BetaE,VinterFG,verbose=False,CoarseGrain=CoarseGrain,**kwargs) # calculate dAVA = <A|V|A>-<G|V|G> AditInfo={'Structure': struc,'index1': index1,'Output_exct': True} mol_Elstat,index,charge_grnd,charge_exct=ElStat_PrepareMolecule_1Def(filenames,index_all,FG_charges,ChargeType=ChargeType,verbose=False,**AditInfo) dAVA=mol_Elstat.get_EnergyShift() # Calculate interaction with ground state charges mol_Elstat.charge[index] = charge_grnd E_elst_grnd = mol_Elstat.get_EnergyShift() mol_Elstat.charge[index] = charge_exct - charge_grnd # Calculate interaction with excited state charges mol_Elstat.charge[index] = charge_exct E_elst_exct = mol_Elstat.get_EnergyShift() mol_Elstat.charge[index] = charge_exct - charge_grnd # Calculate interaction with transition density mol_Elstat.charge[index] = charge E_elst_trans = mol_Elstat.get_EnergyShift() mol_Elstat.charge[index] = charge_exct - charge_grnd # calculate transition energy shifts and transition dipole change res_Energy, res_Pot, TrDip = mol_polar._TEST_Compare_SingleDefectProperties(charge,charge_grnd,charge_exct,struc,index1,dAVA=dAVA,order=order,approx=approx) charge_FG_grnd = mol_Elstat.charge.copy() charge_FG_grnd[index] = 0.0 E_Pol1_env_static_ex_gr_FG = np.dot(charge_FG_grnd,res_Pot['Pol1-env_static_(exct-grnd)']) E_Pol2_env_static_ex_gr_FG = np.dot(charge_FG_grnd,res_Pot['Pol2-env_static_(exct-grnd)']) E_Pol1_env_BetaEE_ex_gr_FG = np.dot(charge_FG_grnd,res_Pot['Pol1-env_Beta(E,E)_(exct-grnd)']) E_Pol1_env_BetaEE_trans_FG = np.dot(charge_FG_grnd,res_Pot['Pol1-env_Beta(E,E)_(trans)']) E_Pol1_env_AlphaE_trans_FG = np.dot(charge_FG_grnd,res_Pot['Pol1-env_Alpha(E)_(trans)']) E_Pol1_env_Alpha_E_trans_FG = np.dot(charge_FG_grnd,res_Pot['Pol1-env_Alpha(-E)_(trans)']) E_Pol1_env_static_trans_FG = np.dot(charge_FG_grnd,res_Pot['Pol1-env_static_(trans)']) #E_Polar_AlphaE_gr_ex_FG = 0.0 # pot_dipole_gr_ex = potential of induced dipoles induced by difference charges between ground and excited state (gr_charges - ex_charges) with energy_units("AU"): E_elst_trans = EnergyClass(E_elst_trans) E_elst_grnd = EnergyClass(E_elst_grnd) E_elst_exct = EnergyClass(E_elst_exct) E_Pol1_env_static_ex_gr_FG = EnergyClass(E_Pol1_env_static_ex_gr_FG) E_Pol2_env_static_ex_gr_FG = EnergyClass(E_Pol2_env_static_ex_gr_FG) E_Pol1_env_BetaEE_ex_gr_FG = EnergyClass(E_Pol1_env_BetaEE_ex_gr_FG) E_Pol1_env_BetaEE_trans_FG = EnergyClass(E_Pol1_env_BetaEE_trans_FG) E_Pol1_env_AlphaE_trans_FG = EnergyClass(E_Pol1_env_AlphaE_trans_FG) E_Pol1_env_Alpha_E_trans_FG = EnergyClass(E_Pol1_env_Alpha_E_trans_FG) E_Pol1_env_static_trans_FG = EnergyClass(E_Pol1_env_static_trans_FG) if MathOut: if not os.path.exists("Pictures"): os.makedirs("Pictures") Bonds = GuessBonds(mol_polar.coor) struc.guess_bonds() if CoarseGrain in ["plane","C","CF"]: at_type = ['C']*mol_polar.Nat elif CoarseGrain == "all_atom": at_type = struc.at_type.copy() mat_filename = "".join(['Pictures/Charge_',ShortName,'_Exct-Grnd.nb']) params = {'TrPointCharge': mol_Elstat.charge,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_Elstat.coor,struc.bonds,struc.at_type,**params) mol_Elstat.charge[index] = charge mat_filename = "".join(['Pictures/Charge_',ShortName,'_Trans.nb']) params = {'TrPointCharge': mol_Elstat.charge,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_Elstat.coor,struc.bonds,struc.at_type,**params) # res_Pot = {'Pol2-env_static_(exct-grnd)': pot2_dipole_ex_gr} # res_Pot['Pol1-env_static_(exct-grnd)'] = pot1_dipole_ex_gr # res_Pot['Pol1-env_Beta(E,E)_(exct-grnd)'] = pot1_dipole_betaEE_ex_gr # res_Pot['Pol1-env_Beta(E,E)_(trans)'] = pot1_dipole_betaEE_tr # res_Pot['Pol1-env_Alpha(E)_(trans)'] = pot1_dipole_AlphaE_tr # res_Pot['Pol1-env_Alpha(-E)_(trans)'] = pot1_dipole_Alpha_E_tr # res_Pot['Pol1-env_static_(trans)'] = pot1_dipole_static_tr # # res_Energy = {'dE_0-1': Eshift, 'dE_elstat(exct-grnd)': dAVA} # res_Energy['E_pol1_Alpha(E)'] = Polar1_AlphaE # res_Energy['E_pol2_Alpha(E)'] = Polar2_AlphaE # res_Energy['E_pol1_Alpha(-E)'] = Polar1_Alpha_E # res_Energy['E_pol2_Alpha(-E)'] = Polar2_Alpha_E # res_Energy['E_pol1_Beta(E,E)'] = Polar1_Beta_EE # res_Energy['E_pol1_static_(exct-grnd)'] = Polar1_static_ex_gr # res_Energy['E_pol2_static_(exct-grnd)'] = Polar2_static_ex_gr # res_Energy['E_pol1_Beta(E,E)_(exct-grnd)'] = Polar1_Beta_EE_ex_gr # res_Energy['E_pol1_static_(trans)_(exct)'] = Polar1_static_tr_ex # res_Energy['E_pol1_static_(trans)_(grnd)'] = Polar1_static_tr_gr # res_Energy['E_pol1_Alpha(E)_(trans)_(grnd)'] = Polar1_AlphaE_tr_gr # res_Energy['E_pol1_Alpha(-E)_(trans)_(exct)'] = Polar1_Alpha_E_tr_ex # res_Energy['E_pol1_Beta(E,E)_(trans)_(exct-grnd)'] = Polar1_Beta_EE_tr_ex_gr # res_Energy['E_elstat_trans'] = E_elst_trans res_Energy['E_pol1-env_static_(exct-grnd)'] = E_Pol1_env_static_ex_gr_FG res_Energy['E_pol2-env_static_(exct-grnd)'] = E_Pol2_env_static_ex_gr_FG res_Energy['E_pol1-env_Beta(E,E)_(exct-grnd)'] = E_Pol1_env_BetaEE_ex_gr_FG res_Energy['E_pol1-env_Beta(E,E)_(trans)'] = E_Pol1_env_BetaEE_trans_FG res_Energy['E_pol1-env_Alpha(E)_(trans)'] = E_Pol1_env_AlphaE_trans_FG res_Energy['E_pol1-env_Alpha(-E)_(trans)'] = E_Pol1_env_Alpha_E_trans_FG res_Energy['E_pol1-env_static_(trans)'] = E_Pol1_env_static_trans_FG # E_elst_grnd, E_elst_exct return res_Energy, TrDip def Calc_SingleDef_FGprop_new(filenames,ShortName,index_all,E01,AlphaE,Alpha_E,BetaE,VinterFG,FG_charges,ChargeType,order=2,verbose=False,approx=1.1,MathOut=False,CoarseGrain="plane",**kwargs): ''' Compare magnitude of individual terms in energy shift calculation for defect in Fluorographene environment (so far only for first order of perturbation expansion -> order = 1) Parameters ---------- filenames : dictionary Dictionary with information about all needed files which contains nessesary information for transformig the system into Dielectric class and electrostatic calculations. Keys: * ``'1def_structure'``: xyz file with FG system with single defect geometry and atom types * ``'charge_structure'``: xyz file with defect-like molecule geometry for which transition charges were calculated corresponding to first defect * ``'charge'``: file with transition charges for the defect (from TrEsp charges fitting) * ``'charge_grnd'``: file with ground state charges for the defect (from TrEsp charges fitting) * ``'charge_exct'``: file with excited state charges for the defect (from TrEsp charges fitting) ShortName : string Short description of the system index_all : list of integers (dimension 6) There are specified indexes neded for asignment of defect atoms. First three indexes correspond to center and two main axes of reference structure (structure which was used for charges calculation) and the last three indexes are corresponding atoms of the defect. AlphaE : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) Alpha_E : numpy.array of real (dimension 2x2) Atomic polarizability Alpha(-E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) BetaE : numpy.array of real (dimension 2x2) Atomic polarizability Beta(E,E) for C-F corse grained atoms of fluorographene in ATOMIC UNITS (Bohr^2 - because 2D) VinterFG : real Difference in electrostatic interaction energy between interaction of excited C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state and interaction of ground state C-F corse grained atom of fluorographene with all others fluorographene corse grained atoms in ground state. Units are ATOMIC UNITS (Hartree) FG_charges : list of real (dimension 2) [charge on inner fluorographene atom, charge on borded fluorographe carbon] ChargeType : string Specifies which charges should be used for electrostatic calculations (ground and excited state charges) for defect atoms. Allowed types are: ``'qchem'``, ``'qchem_all'``, ``'AMBER'`` and ``'gaussian'``. * ``'qchem'`` - charges calculated by fiting Q-Chem ESP on carbon atoms. * ``'qchem_all'`` - charges calculated by fiting Q-Chem ESP on all atoms, only carbon charges are used and same charge is added to all carbon atoms in order to have neutral molecule. * ``'AMBER'`` - not yet fully implemented. * ``'gaussian'`` - not yet fully implemented. order : integer (optional - init=80) Specify how many SCF steps shoudl be used in calculation of induced dipoles - according to the used model it should be 2 verbose : logical (optional - init=False) If `True` aditional information about whole proces will be printed approx : real (optional - init=1.1) Specifies which approximation should be used. * **Approximation 1.1**: Neglect of `Beta(-E,-E)` and `Beta(-E,E)` and `Alpha(-E)`. * **Approximation 1.2**: Neglect of `Beta(-E,-E)` and `tilde{Beta(E)}`. * **Approximation 1.3**: `Beta(E,E)=Beta(-E,E)=Beta(-E,-E)` and also `Alpha(E)=Alpha(-E)`, however the second one is not condition Returns -------- Eshift : Energy class Transition energy shift for the defect due to the fluorographene environment calculated from structure with single defect. Units are energy managed TrDip : numpy array of real (dimension 3) Total transition dipole for the defect with environment effects included calculated from structure with single defect (in ATOMIC UNITS) Notes -------- By comparing QC calculations it was found that energy shift from structure with two defects and with single defect is almost the same. ''' # read and prepare molecule mol_polar,index1,charge,struc=prepare_molecule_1Def(filenames,index_all,AlphaE,Alpha_E,BetaE,VinterFG,verbose=False,CoarseGrain=CoarseGrain,**kwargs) # calculate dAVA = <A|V|A>-<G|V|G> AditInfo={'Structure': struc,'index1': index1,'Output_exct': True} mol_Elstat,index,charge_grnd,charge_exct=ElStat_PrepareMolecule_1Def(filenames,index_all,FG_charges,ChargeType=ChargeType,verbose=False,**AditInfo) dAVA=mol_Elstat.get_EnergyShift() # dAVA2, dAVA_R = mol_Elstat.get_EnergyShift_and_Derivative() # print(dAVA,dAVA2,dAVA-dAVA2) # calculate transition energy shifts and transition dipole change # res_Energy, res_Pot, TrDip = mol_polar._TEST_Compare_SingleDefectProperties(charge,charge_grnd,charge_exct,struc,index1,dAVA=dAVA,order=order,approx=approx) Eshift,res_Energy,TrDip = mol_polar.get_SingleDefectProperties_new(charge_grnd, charge_exct, mol_Elstat, struc, index1, E01, dAVA=dAVA, order=order, approx=approx) if MathOut: if not os.path.exists("Pictures"): os.makedirs("Pictures") Bonds = GuessBonds(mol_polar.coor) struc.guess_bonds() if CoarseGrain in ["plane","C","CF"]: at_type = ['C']*mol_polar.Nat elif CoarseGrain == "all_atom": at_type = struc.at_type.copy() mat_filename = "".join(['Pictures/Charge_',ShortName,'_Exct-Grnd.nb']) params = {'TrPointCharge': mol_Elstat.charge,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_Elstat.coor,struc.bonds,struc.at_type,**params) mol_Elstat.charge[index] = charge mat_filename = "".join(['Pictures/Charge_',ShortName,'_Trans.nb']) params = {'TrPointCharge': mol_Elstat.charge,'rSphere_dip': 0.5,'rCylinder_dip':0.1} OutputMathematica(mat_filename,mol_Elstat.coor,struc.bonds,struc.at_type,**params) return Eshift, TrDip '''----------------------- TEST PART --------------------------------''' if __name__=="__main__": print(' TESTS') print('-----------------------------------------') ''' Test derivation of energy d/dR ApB ''' # SETUP VERY SIMPLE SYSTEM OF TWO DEFECT ATOMS AND ONE ENVIRONMENT ATOM: coor=np.array([[-1.0,0.0,0.0],[0.0,0.0,0.0],[1.0,0.0,0.0]],dtype='f8') charge_pol=np.array([1.0,0.0,0.0],dtype='f8') dipole=np.zeros((len(coor),3),dtype='f8') AlphaE=np.array([np.zeros((3,3)),[[2.0,0.0,0.0],[0.0,2.0,0.0],[0.0,0.0,0.0]],np.zeros((3,3))],dtype='f8') pol_mol=Dielectric(coor,charge_pol,dipole,AlphaE,AlphaE,AlphaE,0.0) # definition of defect atoms and corresponding charges charge=np.array([1.0],dtype='f8') index1=[0] index2=[2] res_general=pol_mol._dR_BpA(index1,index2,charge,'AlphaE') result=np.zeros((3,3),dtype='f8') result2=np.array([[-4.0,0.0,0.0],[0.0,0.0,0.0],[4.0,0.0,0.0]],dtype='f8').reshape(3*len(coor)) R01=coor[1,:]-coor[0,:] RR01=np.sqrt(np.dot(R01,R01)) R21=coor[1,:]-coor[2,:] RR21=np.sqrt(np.dot(R21,R21)) dn=np.dot(AlphaE[1],R21/(RR21**3)) result[0,:]=charge[0]*charge[0]*(3*np.dot(R01/(RR01**5),dn)*R01-1/(RR01**3)*dn) dn=np.dot(AlphaE[1],R01/(RR01**3)) result[2,:]=charge[0]*charge[0]*(3*np.dot(R21/(RR21**5),dn)*R21-1/(RR21**3)*dn) if np.allclose(res_general,result2): print('Symm _dR_BpA simple system ... OK') else: print('Symm _dR_BpA simple system ... Error') print(' General result: ',res_general) print(' Analytical result:',result2) result3=np.array([[8.0,0.0,0.0],[-8.0,0.0,0.0]],dtype='f8').reshape(6) pol_mol._swap_atoms(index1,index2) res_general=pol_mol._dR_BpA(index2,index2,charge,'AlphaE') if np.allclose(res_general[3:9],result3): print('Symm _dR_ApA simple system ... OK') else: print('Symm _dR_ApA simple system ... Error') print(' General result: ',res_general) print(' Analytical result:',result3) # SETUP NON-SYMETRIC SIMPLE SYSTEM OF TWO DEFECT ATOMS AND ONE ENVIRONMENT ATOM: coor=np.array([[-1.0,0.0,0.0],[0.0,0.0,0.0],[1.0,2.0,0.0]],dtype='f8') charge_pol=np.array([1.0,0.0,0.0],dtype='f8') dipole=np.zeros((len(coor),3),dtype='f8') AlphaE=np.array([np.zeros((3,3)),[[2.0,0.0,0.0],[0.0,2.0,0.0],[0.0,0.0,0.0]],np.zeros((3,3))],dtype='f8') pol_mol=Dielectric(coor,charge_pol,dipole,AlphaE,AlphaE,AlphaE,0.0) # definition of defect atoms and corresponding charges charge=np.array([1.0],dtype='f8') index1=[0] index2=[2] res_general=pol_mol._dR_BpA(index1,index2,charge,'AlphaE') # # result=np.zeros((3,3),dtype='f8') result2=np.array([[-4.0/np.sqrt(5)**3,4.0/np.sqrt(5)**3,0.0], [6*(1/np.sqrt(5)**3-1/np.sqrt(5)**5),-4/np.sqrt(5)**3-12/np.sqrt(5)**5,0.0], [6/np.sqrt(5)**5-2/np.sqrt(5)**3,12/np.sqrt(5)**5,0.0]],dtype='f8').reshape(3*len(coor)) result=np.zeros((3,3),dtype='f8') R01=coor[1,:]-coor[0,:] RR01=np.sqrt(np.dot(R01,R01)) R21=coor[1,:]-coor[2,:] RR21=np.sqrt(np.dot(R21,R21)) dn=np.dot(AlphaE[1],R21/(RR21**3)) result[0,:]=charge[0]*charge[0]*(3*np.dot(R01/(RR01**5),dn)*R01-1/(RR01**3)*dn) dn=np.dot(AlphaE[1],R01/(RR01**3)) result[2,:]=charge[0]*charge[0]*(3*np.dot(R21/(RR21**5),dn)*R21-1/(RR21**3)*dn) #print(result2) #print(result) if np.allclose(res_general,result2): print('non-Symm _dR_BpA simple system ... OK') else: print('non-Symm _dR_BpA simple system ... Error') print(' General result: ',res_general) print(' Analytical result:',result2) result3=np.array([[0.064,0.128,0.0],[-0.064,-0.128,0.0]],dtype='f8').reshape(6) pol_mol._swap_atoms(index1,index2) res_general=pol_mol._dR_BpA(index2,index2,charge,'AlphaE') if np.allclose(res_general[3:9],result3): print('non-Symm _dR_ApA simple system ... OK') else: print('non-Symm _dR_ApA simple system ... Error') print(' General result: ',res_general) print(' Analytical result:',result3) # SETUP LITTLE BIT MORE COMPLICATED SYSTEM OF 2 DEFECT ATOMS AND 2ENVIRONMENT ATOMS for kk in range(2): if kk==0: coor=np.array([[-2.0,0.0,0.0],[-2.0,-1.0,0.0],[0.0,0.0,0.0],[1.0,0.0,0.0],[2.0,0.0,0.0],[2.0,1.0,0.0]],dtype='f8') else: coor=np.array([[-2.0,0.0,0.0],[-2.0,1.0,0.0],[0.0,0.0,0.0],[1.0,0.0,0.0],[2.0,0.0,0.0],[2.0,1.0,0.0]],dtype='f8') charge_pol=np.array([1.0,-1.0,0.0,0.0,0.0,0.0],dtype='f8') dipole=np.zeros((len(coor),3),dtype='f8') AlphaE=np.array([np.zeros((3,3)),np.zeros((3,3)), [[2.0,0.0,0.0],[0.0,2.0,0.0],[0.0,0.0,0.0]], [[2.0,0.0,0.0],[0.0,2.0,0.0],[0.0,0.0,0.0]], np.zeros((3,3)),np.zeros((3,3))],dtype='f8') pol_mol=Dielectric(coor,charge_pol,dipole,AlphaE,AlphaE,AlphaE,0.0) # definition of defect atoms and corresponding charges charge=np.array([1.0,-1.0],dtype='f8') index1=[0,1] index2=[4,5] res_general=pol_mol._dR_BpA(index1,index2,charge,'AlphaE') if kk==0: # for coor[1]=[-2.0,-1.0,0.0] result2=np.array([[-0.1313271490,-0.04854981982,0.0],[0.04798957640,0.07411449339,0.0], [0.0,0.0,0.0],[-0.04637925945,-0.08345754376,0.0], [0.1005284061,0.08560623298,0.0], [0.02918842589,-0.02771336278,0.0]],dtype='f8').reshape(3*len(coor)) else: # for coor[1]=[-2.0,1.0,0.0] result2=np.array([[-0.131327,-0.0485498,0.0],[0.126639,-0.0300095,0.0], [0.0,0.0624526,0.0],[-0.0195464,0.138987,0.0], [0.100528,-0.0856062,0.0],[-0.0762936,-0.037274,0.0]],dtype='f8').reshape(3*len(coor)) if np.allclose(res_general,result2): print('non-Symm _dR_BpA system',kk+1,' ... OK') else: print('non-Symm _dR_BpA system',kk+1,' ... Error') print(' General result: ',res_general) print(' Analytical result:',result2) if kk==1: res_general=pol_mol._dR_BpA(index1,index1,charge,'AlphaE') result3=np.array([[0.0759272,-0.0494062,0.0],[0.00288743,0.0479804,0.0], [-0.0738948,0.0013901,0.0],[-0.00491991,0.00003574515217,0.0]],dtype='f8').reshape(12) if np.allclose(res_general[0:12],result3): print('non-Symm _dR_ApA system',kk+1,' ... OK') else: print('non-Symm _dR_ApA system',kk+1,' ... Error') print(' General result: ',res_general) print(' Analytical result:',result3) ''' Test derivation of energy d/dR BppA ''' # SETUP NON-SYMETRIC SIMPLE SYSTEM OF TWO DEFECT ATOMS AND TWO ENVIRONMENT ATOM: coor=np.array([[-1.0,0.0,0.0],[0.0,0.0,0.0],[0.0,1.0,0.0],[1.0,0.0,0.0]],dtype='f8') charge_pol=np.array([1.0,0.0,0.0,0.0],dtype='f8') dipole=np.zeros((len(coor),3),dtype='f8') AlphaE=np.array([np.zeros((3,3)),[[2.0,0.0,0.0],[0.0,2.0,0.0],[0.0,0.0,0.0]], [[2.0,0.0,0.0],[0.0,2.0,0.0],[0.0,0.0,0.0]],np.zeros((3,3))],dtype='f8') pol_mol=Dielectric(coor,charge_pol,dipole,AlphaE,AlphaE,AlphaE,0.0) # definition of defect atoms and corresponding charges charge=np.array([1.0],dtype='f8') index1=[0] index2=[3] res_general=pol_mol._dR_BppA(index1,index2,charge,'AlphaE') result2=np.array([[3.535533906,-0.7071067812,0.0],[0.0,14.14213562,0.0], [0.0,-12.72792206,0.0],[-3.535533906,-0.7071067812,0.0], ],dtype='f8').reshape(3*len(coor)) if np.allclose(res_general,result2): print('non-Symm _dR_BppA simple system ... OK') else: print('non-Symm _dR_BppA simple system ... Error') print(' General result: ',res_general) print(' Analytical result:',result2) res_general=pol_mol._dR_BppA(index1,index1,charge,'AlphaE') result3=np.array([[-7.071067812,-9.899494937,0.0],[-2.8284271247,-2.8284271247,0.0], [9.899494937,12.72792206,0.0], ],dtype='f8').reshape(9) if np.allclose(res_general[0:9],result3): print('non-Symm _dR_AppA simple system ... OK') else: print('non-Symm _dR_AppA simple system ... Error') print(' General result: ',res_general[0:9]) print(' Analytical result:',result3)
51.801312
326
0.613741
22,357
165,816
4.394552
0.039585
0.007898
0.007023
0.00684
0.833708
0.806624
0.777148
0.758328
0.737738
0.725361
0
0.034527
0.263768
165,816
3,201
327
51.801312
0.770272
0.409333
0
0.592022
0
0.004199
0.129302
0.03317
0
0
0
0.002812
0
1
0.016795
false
0
0.013996
0
0.048985
0.069979
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f98d81ae747cfbf08b763b71e666b4f950867a0e
6,640
py
Python
test/test_types.py
Josef-Friedrich/lively-lights
6b601bd7523fc5cd8bb8cacbe9dfd691229333f2
[ "MIT" ]
null
null
null
test/test_types.py
Josef-Friedrich/lively-lights
6b601bd7523fc5cd8bb8cacbe9dfd691229333f2
[ "MIT" ]
null
null
null
test/test_types.py
Josef-Friedrich/lively-lights
6b601bd7523fc5cd8bb8cacbe9dfd691229333f2
[ "MIT" ]
null
null
null
import unittest from lively_lights.types import _range, \ _list, \ _comma, \ brightness, \ hue, \ light_id, \ time, \ transition_time, \ saturation class TestPrivateList(unittest.TestCase): def test_list(self): self.assertEqual(_list([1, 2], hue), (1, 2)) def test_tuple(self): self.assertEqual(_list((1, 2), hue), (1, 2)) def test_one(self): self.assertEqual(_list((1, ), hue), (1,)) def test_three(self): self.assertEqual(_list((1, 2, 3), hue), (1, 2, 3)) def test_wrong_inner_type(self): with self.assertRaises(ValueError): _list(('lol', 2), hue) class TestPrivateRange(unittest.TestCase): def test_list(self): self.assertEqual(_range([1, 2], hue), (1, 2)) def test_tuple(self): self.assertEqual(_range((1, 2), hue), (1, 2)) def test_less(self): with self.assertRaises(ValueError): _range((1, ), hue) def test_more(self): with self.assertRaises(ValueError): _range((1, 2, 3), hue) def test_max_less_than_min(self): with self.assertRaises(ValueError): _range((2, 1), hue) def test_wrong_inner_type(self): with self.assertRaises(ValueError): _range(('lol', 2), hue) class TestPrivateListComma(unittest.TestCase): def test_list(self): self.assertEqual(_comma('1,2', light_id), (1, 2)) class TestBrightness(unittest.TestCase): def test_valid_min(self): self.assertEqual(brightness(1), 1) def test_valid_normal(self): self.assertEqual(brightness(100), 100) def test_valid_max(self): self.assertEqual(brightness(254), 254) def test_valid_min_string(self): self.assertEqual(brightness('1'), 1) def test_valid_max_string(self): self.assertEqual(brightness('254'), 254) def test_valid_float(self): self.assertEqual(brightness(2.3), 2) def test_valid_float_cut(self): self.assertEqual(brightness(2.9), 2) def test_invalid_min(self): with self.assertRaises(ValueError): brightness(0) def test_invalid_max(self): with self.assertRaises(ValueError): brightness(255) def test_invalid_negativ(self): with self.assertRaises(ValueError): brightness(-1) def test_invalid_string(self): with self.assertRaises(ValueError): brightness('lol') class TestHue(unittest.TestCase): def test_valid_min(self): self.assertEqual(hue(0), 0) def test_valid_normal(self): self.assertEqual(hue(100), 100) def test_valid_max(self): self.assertEqual(hue(65535), 65535) def test_valid_min_string(self): self.assertEqual(hue('0'), 0) def test_valid_max_string(self): self.assertEqual(hue('65535'), 65535) def test_valid_float(self): self.assertEqual(hue(2.3), 2) def test_valid_float_cut(self): self.assertEqual(hue(2.9), 2) def test_invalid_min(self): with self.assertRaises(ValueError): hue(-1) def test_invalid_max(self): with self.assertRaises(ValueError): hue(65536) def test_invalid_string(self): with self.assertRaises(ValueError): hue('lol') class TestLightId(unittest.TestCase): def test_valid_min(self): self.assertEqual(light_id(1), 1) def test_valid_normal(self): self.assertEqual(light_id(100), 100) def test_valid_min_string(self): self.assertEqual(light_id('1'), 1) def test_valid_float(self): self.assertEqual(light_id(2.3), 2) def test_valid_float_cut(self): self.assertEqual(light_id(2.9), 2) def test_invalid_min(self): with self.assertRaises(ValueError): light_id(0) def test_invalid_string(self): with self.assertRaises(ValueError): light_id('lol') class TestTime(unittest.TestCase): def test_valid_min(self): self.assertEqual(time(0), 0) def test_valid_normal(self): self.assertEqual(time(10), 10) def test_valid_min_string(self): self.assertEqual(time('0'), 0) def test_valid_float(self): self.assertEqual(time(2.3), 2.3) def test_invalid_min(self): with self.assertRaises(ValueError): time(-1) def test_invalid_string(self): with self.assertRaises(ValueError): time('lol') class TesttTransitionTime(unittest.TestCase): def test_valid_min(self): self.assertEqual(transition_time(0), 0) def test_valid_normal(self): self.assertEqual(transition_time(10), 100) def test_valid_max(self): self.assertEqual(transition_time(6553.5), 65535) def test_valid_min_string(self): self.assertEqual(transition_time('0'), 0) def test_valid_max_string(self): self.assertEqual(transition_time('6553.5'), 65535) def test_valid_float(self): self.assertEqual(transition_time(2.3), 23) def test_valid_float_cut(self): self.assertEqual(transition_time(2.9), 29) def test_invalid_min(self): with self.assertRaises(ValueError): transition_time(-1) def test_invalid_max(self): with self.assertRaises(ValueError): transition_time(6553.6) def test_invalid_string(self): with self.assertRaises(ValueError): transition_time('lol') class TestSaturation(unittest.TestCase): def test_valid_min(self): self.assertEqual(saturation(0), 0) def test_valid_normal(self): self.assertEqual(saturation(100), 100) def test_valid_max(self): self.assertEqual(saturation(254), 254) def test_valid_min_string(self): self.assertEqual(saturation('0'), 0) def test_valid_max_string(self): self.assertEqual(saturation('254'), 254) def test_valid_float(self): self.assertEqual(saturation(2.3), 2) def test_valid_float_cut(self): self.assertEqual(saturation(2.9), 2) def test_invalid_min(self): with self.assertRaises(ValueError): saturation(-1) def test_invalid_max(self): with self.assertRaises(ValueError): saturation(255) def test_invalid_string(self): with self.assertRaises(ValueError): saturation('lol')
25.736434
58
0.619127
810
6,640
4.855556
0.085185
0.117468
0.21256
0.134249
0.877447
0.852784
0.792525
0.745741
0.680651
0.370455
0
0.044208
0.264157
6,640
257
59
25.836576
0.760745
0
0
0.473988
0
0
0.00753
0
0
0
0
0
0.381503
1
0.381503
false
0
0.011561
0
0.445087
0
0
0
0
null
0
1
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
f9ad3c933237af5c80fd426b9d9d5f887748fe43
34
py
Python
language-model-train/lm/trainer/__init__.py
azagsam/cross-lingual-summarization
402871dcf7a385cda90914574de24aad7133acf9
[ "Unlicense" ]
null
null
null
language-model-train/lm/trainer/__init__.py
azagsam/cross-lingual-summarization
402871dcf7a385cda90914574de24aad7133acf9
[ "Unlicense" ]
null
null
null
language-model-train/lm/trainer/__init__.py
azagsam/cross-lingual-summarization
402871dcf7a385cda90914574de24aad7133acf9
[ "Unlicense" ]
null
null
null
from . import language_model_char
17
33
0.852941
5
34
5.4
1
0
0
0
0
0
0
0
0
0
0
0
0.117647
34
1
34
34
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f9ae0be0b5f703b4fe3a5fc3c5ec87262bf60e1c
37
py
Python
steamreedem/__init__.py
Sinf0r0s0/Steam-Reedem
672238081b2c43407d61f5b3fc9c149b1ceeb640
[ "MIT" ]
null
null
null
steamreedem/__init__.py
Sinf0r0s0/Steam-Reedem
672238081b2c43407d61f5b3fc9c149b1ceeb640
[ "MIT" ]
null
null
null
steamreedem/__init__.py
Sinf0r0s0/Steam-Reedem
672238081b2c43407d61f5b3fc9c149b1ceeb640
[ "MIT" ]
null
null
null
from .steamreedem import Steamreedem
18.5
36
0.864865
4
37
8
0.75
0
0
0
0
0
0
0
0
0
0
0
0.108108
37
1
37
37
0.969697
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f9d338d0de128f1ea414f1a2e8a396bdee59ab29
103
py
Python
clusterval/datasets/__init__.py
Nuno09/clusterval
4844fd75a658b7ced6c78e4f79f0b308870f9adf
[ "BSD-2-Clause" ]
3
2020-11-27T10:49:40.000Z
2021-12-13T02:52:29.000Z
clusterval/datasets/__init__.py
Nuno09/clusterval
4844fd75a658b7ced6c78e4f79f0b308870f9adf
[ "BSD-2-Clause" ]
null
null
null
clusterval/datasets/__init__.py
Nuno09/clusterval
4844fd75a658b7ced6c78e4f79f0b308870f9adf
[ "BSD-2-Clause" ]
null
null
null
from clusterval.datasets.datasets import load_vote_repub, load_animals, load_khan_train, load_khan_test
103
103
0.893204
16
103
5.3125
0.6875
0.188235
0
0
0
0
0
0
0
0
0
0
0.058252
103
1
103
103
0.876289
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f9e6c3fef4569f8da9733e02b16213d45d681af1
102
py
Python
src/jbdl/rbdl/utils/__init__.py
yz-mao/jbdl
a5380233b3795c8aaa9acd9e5c07fa44f8a5dadb
[ "MIT" ]
21
2021-08-29T06:59:18.000Z
2022-01-13T22:53:02.000Z
src/jbdl/rbdl/utils/__init__.py
yz-mao/jbdl
a5380233b3795c8aaa9acd9e5c07fa44f8a5dadb
[ "MIT" ]
2
2021-08-31T08:34:09.000Z
2021-09-06T07:40:51.000Z
src/jbdl/rbdl/utils/__init__.py
yz-mao/jbdl
a5380233b3795c8aaa9acd9e5c07fa44f8a5dadb
[ "MIT" ]
4
2021-08-29T06:59:22.000Z
2021-10-04T05:59:41.000Z
from .wrapper import ModelWrapper from .xyz2int import xyz2int from .calc_rank_jc import calc_rank_jc
25.5
38
0.852941
16
102
5.1875
0.5
0.192771
0.240964
0
0
0
0
0
0
0
0
0.022222
0.117647
102
3
39
34
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fb1147b2e427c6a5a79a4494ed1510b05386b40b
131
py
Python
SecQureSdk/__init__.py
engineering-secuuth/secuuth-jwt-python-sdk
77b985d4aa9d51f1b37ebd558916d09d2d611e0d
[ "MIT" ]
null
null
null
SecQureSdk/__init__.py
engineering-secuuth/secuuth-jwt-python-sdk
77b985d4aa9d51f1b37ebd558916d09d2d611e0d
[ "MIT" ]
null
null
null
SecQureSdk/__init__.py
engineering-secuuth/secuuth-jwt-python-sdk
77b985d4aa9d51f1b37ebd558916d09d2d611e0d
[ "MIT" ]
null
null
null
from SecQureSdk.accessToken import accessToken from SecQureSdk.idToken import idToken from SecQureSdk.renewToken import renewToken
32.75
46
0.885496
15
131
7.733333
0.4
0.362069
0
0
0
0
0
0
0
0
0
0
0.091603
131
3
47
43.666667
0.97479
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
348a9a5f2d0f489f9a46f0b1186bb2ce70e239e9
249
py
Python
octue/utils/gen_uuid.py
octue/octue-sdk-python
31c6e9358d3401ca708f5b3da702bfe3be3e52ce
[ "MIT" ]
5
2020-10-01T12:43:10.000Z
2022-03-14T17:26:25.000Z
octue/utils/gen_uuid.py
octue/octue-sdk-python
31c6e9358d3401ca708f5b3da702bfe3be3e52ce
[ "MIT" ]
322
2020-06-24T15:55:22.000Z
2022-03-30T11:49:28.000Z
octue/utils/gen_uuid.py
octue/octue-sdk-python
31c6e9358d3401ca708f5b3da702bfe3be3e52ce
[ "MIT" ]
null
null
null
import uuid def gen_uuid(): """Generates a unique identifier for an object TODO - generate using an Octue api call, so we can register and find objects later using their UUID :return: uuid string """ return str(uuid.uuid4())
20.75
103
0.686747
37
249
4.594595
0.810811
0
0
0
0
0
0
0
0
0
0
0.005291
0.240964
249
11
104
22.636364
0.89418
0.662651
0
0
1
0
0
0
0
0
0
0.090909
0
1
0.333333
true
0
0.333333
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
1
0
0
1
1
0
1
0
1
0
0
6
349695eb270473410280f27d57ab6267c34e42c2
17,354
py
Python
src/data_analysis/decoy_analysis.py
sidhikabalachandar/lig_clash_score
449bac16a7c2b9779e7cd51ff17eb5e41be6ff99
[ "FTL" ]
null
null
null
src/data_analysis/decoy_analysis.py
sidhikabalachandar/lig_clash_score
449bac16a7c2b9779e7cd51ff17eb5e41be6ff99
[ "FTL" ]
null
null
null
src/data_analysis/decoy_analysis.py
sidhikabalachandar/lig_clash_score
449bac16a7c2b9779e7cd51ff17eb5e41be6ff99
[ "FTL" ]
null
null
null
""" The purpose of this code is to create the split files It can be run on sherlock using $ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py gnn_dict_all /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data_analysis/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/pdbbind_refined_set_labels.csv /home/users/sidhikab/lig_clash_score/reports/figures /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /home/users/sidhikab/lig_clash_score/src/data_analysis/gnn_code_dict /home/users/sidhikab/lig_clash_score/src/data_analysis/glide_code_dict $ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py gnn_dict_group /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data_analysis/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/pdbbind_refined_set_labels.csv /home/users/sidhikab/lig_clash_score/reports/figures /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /home/users/sidhikab/lig_clash_score/src/data_analysis/gnn_code_dict/205.pkl /home/users/sidhikab/lig_clash_score/src/data_analysis/glide_code_dict --index 205 $ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py gnn_dict_check /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data_analysis/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/pdbbind_refined_set_labels.csv /home/users/sidhikab/lig_clash_score/reports/figures /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /home/users/sidhikab/lig_clash_score/src/data_analysis/gnn_code_dict /home/users/sidhikab/lig_clash_score/src/data_analysis/glide_code_dict $ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py glide_dict_all /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data_analysis/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/pdbbind_refined_set_labels.csv /home/users/sidhikab/lig_clash_score/reports/figures /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /home/users/sidhikab/lig_clash_score/src/data_analysis/gnn_code_dict /home/users/sidhikab/lig_clash_score/src/data_analysis/glide_code_dict $ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py glide_dict_group /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data_analysis/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/pdbbind_refined_set_labels.csv /home/users/sidhikab/lig_clash_score/reports/figures /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /home/users/sidhikab/lig_clash_score/src/data_analysis/gnn_code_dict /home/users/sidhikab/lig_clash_score/src/data_analysis/glide_code_dict/0.pkl --index 0 $ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py glide_dict_check /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data_analysis/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/pdbbind_refined_set_labels.csv /home/users/sidhikab/lig_clash_score/reports/figures /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /home/users/sidhikab/lig_clash_score/src/data_analysis/gnn_code_dict /home/users/sidhikab/lig_clash_score/src/data_analysis/glide_code_dict $ /home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py graph /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data_analysis/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/pdbbind_refined_set_labels.csv /home/users/sidhikab/lig_clash_score/reports/figures /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /home/users/sidhikab/lig_clash_score/src/data_analysis/gnn_code_dict /home/users/sidhikab/lig_clash_score/src/data_analysis/glide_code_dict """ import argparse import pickle import pandas as pd import matplotlib.pyplot as plt import os from tqdm import tqdm import seaborn as sns import random import sys sys.path[-2] = '/home/users/sidhikab/lig_clash_score/src' from atom3d.protein_ligand.get_labels import get_label MAX_POSES = 100 CUTOFF = 2 LABELS =['gnn with correct pose', 'gnn without correct pose', 'glide'] N = 3 def graph(title, ls, save_root): n_bins = 1000 fig, ax = plt.subplots() # plot the cumulative histogram for i in range(len(ls)): ax.hist(ls[i], n_bins, density=True, histtype='step', cumulative=True, label=LABELS[i]) ax.grid(True) ax.legend(loc='lower right') ax.set_title(title + ' Pose Cumulative step histograms') ax.set_xlabel('Docking performance (RMSD)') ax.set_ylabel('Cumulative Frequency') plt.savefig(os.path.join(save_root, title + '.png')) def bar_graph(all_freq_ls, save_root): sns.set_context("talk", font_scale=0.8) unnormalized_all_graph_rmsds = [] label = 'Best over \nall sampled \nposes' unnormalized_all_graph_rmsds.append([label, all_freq_ls[0], 'GNN with correct pose']) unnormalized_all_graph_rmsds.append([label, all_freq_ls[1], 'GNN without correct pose']) unnormalized_all_graph_rmsds.append([label, all_freq_ls[2], 'Glide']) df = pd.DataFrame(unnormalized_all_graph_rmsds) df.columns = ['Type', 'Percent', 'Legend'] g = sns.catplot(x='Type', y='Percent', hue='Legend', data=df, kind="bar") # plt.title('Unormalized') ax = plt.gca() ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.savefig(os.path.join(save_root, 'glide_vs_gnn.png')) def get_prots(docked_prot_file): """ gets list of all protein, target ligands, and starting ligands in the index file :param docked_prot_file: (string) file listing proteins to process :return: process (list) list of all protein, target ligands, and starting ligands to process """ process = [] with open(docked_prot_file) as fp: for line in fp: if line[0] == '#': continue protein, target, start = line.strip().split() process.append((protein, target, start)) return process def group_files(n, process): """ groups pairs into sublists of size n :param n: (int) sublist size :param process: (list) list of pairs to process :return: grouped_files (list) list of sublists of pairs """ grouped_files = [] for i in range(0, len(process), n): grouped_files += [process[i: i + n]] return grouped_files def get_gnn_code_dict(process, pkl_file, label_file, raw_root): """ gets list of all protein, target ligands, starting ligands, and starting indices information in the index file (up to CUTOFF) :param process: (list) shuffled list of all protein, target ligands, and starting ligands to process :param pkl_file: (string) file containing list of all protein, target ligands, starting ligands, and starting indices information (or file path where this information will be saved) :param label_file: (string) file containing rmsd label information :param raw_root: (string) path to directory with data :return: grouped_files (list) list of all protein, target ligands, starting ligands, and starting indices to process """ label_df = pd.read_csv(label_file) gnn_code_dict = {} for protein, target, start in tqdm(process, desc='going through protein, target, start groups'): if (protein, target, start) not in gnn_code_dict: gnn_code_dict[(protein, target, start)] = [] protein_path = os.path.join(raw_root, protein) pair_path = os.path.join(protein_path, '{}-to-{}'.format(target, start)) graph_dir = '{}/{}-to-{}_graph.pkl'.format(pair_path, target, start) infile = open(graph_dir, 'rb') graph_data = pickle.load(infile) infile.close() for pdb_code in graph_data: if len(label_df[label_df['target'] == pdb_code]) != 0: gnn_code_dict[(protein, target, start)].append((pdb_code, get_label(pdb_code, label_df))) outfile = open(pkl_file, 'wb') pickle.dump(gnn_code_dict, outfile) return gnn_code_dict def combine_code_dict(gnn_code_dict_root): """ gets list of all protein, target ligands, starting ligands, and starting indices information in the index file (up to CUTOFF) :param process: (list) shuffled list of all protein, target ligands, and starting ligands to process :param pkl_file: (string) file containing list of all protein, target ligands, starting ligands, and starting indices information (or file path where this information will be saved) :param label_file: (string) file containing rmsd label information :param raw_root: (string) path to directory with data :return: grouped_files (list) list of all protein, target ligands, starting ligands, and starting indices to process """ gnn_code_dict = {} for file in os.listdir(gnn_code_dict_root): infile = open(os.path.join(gnn_code_dict_root, file), 'rb') in_dict = pickle.load(infile) infile.close() gnn_code_dict.update(in_dict) return gnn_code_dict def get_glide_code_dict(process, pkl_file, label_file, raw_root): """ gets list of all protein, target ligands, starting ligands, and starting indices information in the index file (up to CUTOFF) :param process: (list) shuffled list of all protein, target ligands, and starting ligands to process :param pkl_file: (string) file containing list of all protein, target ligands, starting ligands, and starting indices information (or file path where this information will be saved) :param label_file: (string) file containing rmsd label information :param raw_root: (string) path to directory with data :return: grouped_files (list) list of all protein, target ligands, starting ligands, and starting indices to process """ label_df = pd.read_csv(label_file) glide_code_dict = {} for protein, target, start in tqdm(process, desc='going through protein, target, start groups'): if (protein, target, start) not in glide_code_dict: glide_code_dict[(protein, target, start)] = [] protein_path = os.path.join(raw_root, protein) pair_path = os.path.join(protein_path, '{}-to-{}'.format(target, start)) pose_path = os.path.join(pair_path, 'ligand_poses') for i in range(1, MAX_POSES): pdb_code = '{}_lig{}'.format(target, i) if os.path.exists(os.path.join(pose_path, '{}.sdf'.format(pdb_code))) and \ len(label_df[label_df['target'] == pdb_code]) != 0: glide_code_dict[(protein, target, start)].append((pdb_code, get_label(pdb_code, label_df))) outfile = open(pkl_file, 'wb') pickle.dump(glide_code_dict, outfile) return glide_code_dict def main(): parser = argparse.ArgumentParser() parser.add_argument('task', type=str, help='file listing proteins to process') parser.add_argument('docked_prot_file', type=str, help='file listing proteins to process') parser.add_argument('run_path', type=str, help='file listing proteins to process') parser.add_argument('label_file', type=str, help='file listing proteins to process') parser.add_argument('graph_save_root', type=str, help='file listing proteins to process') parser.add_argument('raw_root', type=str, help='file listing proteins to process') parser.add_argument('gnn_code_dict', type=str) parser.add_argument('glide_code_dict', type=str) parser.add_argument('--index', type=int, default=-1) args = parser.parse_args() random.seed(0) if not os.path.exists(args.run_path): print(args.run_path) os.mkdir(args.run_path) if args.task == 'gnn_dict_all': process = get_prots(args.docked_prot_file) random.shuffle(process) grouped_files = group_files(N, process) if not os.path.exists(args.run_path): os.mkdir(args.run_path) for i, group in enumerate(grouped_files): cmd = 'sbatch -p owners -t 1:00:00 -o {} --wrap="' \ '/home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py ' \ 'gnn_dict_group {} {} {} {} {} {}/{}.pkl {} --index {}"' os.system(cmd.format(os.path.join(args.run_path, 'gnn_dict{}.out'.format(i)), args.docked_prot_file, args.run_path, args.label_file, args.graph_save_root, args.raw_root, args.gnn_code_dict, i, args.glide_code_dict, i)) # print(cmd.format(os.path.join(args.run_path, 'gnn_dict{}.out'.format(i)), args.docked_prot_file, # args.run_path, args.label_file, args.graph_save_root, args.raw_root, # args.gnn_code_dict, i, args.glide_code_dict, i)) if args.task == 'gnn_dict_group': process = get_prots(args.docked_prot_file) random.shuffle(process) grouped_files = group_files(N, process) get_gnn_code_dict(grouped_files[args.index], args.gnn_code_dict, args.label_file, args.raw_root) if args.task == 'gnn_dict_check': process = get_prots(args.docked_prot_file) random.shuffle(process) grouped_files = group_files(N, process) unfinished = [] for i in range(len(grouped_files)): if not os.path.exists('{}/{}.pkl'.format(args.gnn_code_dict, i)): unfinished.append(i) print('Missing', len(unfinished), '/', len(grouped_files)) print(unfinished) if args.task == 'glide_dict_all': process = get_prots(args.docked_prot_file) random.shuffle(process) grouped_files = group_files(N, process) if not os.path.exists(args.run_path): os.mkdir(args.run_path) for i, group in enumerate(grouped_files): cmd = 'sbatch -p owners -t 1:00:00 -o {} --wrap="' \ '/home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python decoy_analysis.py ' \ 'glide_dict_group {} {} {} {} {} {} {}/{}.pkl --index {}"' os.system(cmd.format(os.path.join(args.run_path, 'gnn_dict{}.out'.format(i)), args.docked_prot_file, args.run_path, args.label_file, args.graph_save_root, args.raw_root, args.gnn_code_dict, args.glide_code_dict, i, i)) if args.task == 'glide_dict_group': process = get_prots(args.docked_prot_file) random.shuffle(process) grouped_files = group_files(N, process) get_glide_code_dict(grouped_files[args.index], args.glide_code_dict, args.label_file, args.raw_root) if args.task == 'glide_dict_check': process = get_prots(args.docked_prot_file) random.shuffle(process) grouped_files = group_files(N, process) unfinished = [] for i in range(len(grouped_files)): if not os.path.exists('{}/{}.pkl'.format(args.glide_code_dict, i)): unfinished.append(i) print('Missing', len(unfinished), '/', len(grouped_files)) print(unfinished) if args.task == 'graph': process = get_prots(args.docked_prot_file) random.shuffle(process) gnn_code_dict = combine_code_dict(args.gnn_code_dict) glide_code_dict = combine_code_dict(args.glide_code_dict) # index 0 is gnn pred index, 1 is gnn without ground truth, 2 is glide all_ls = [[], [], []] error_count = 0 for protein, target, start in gnn_code_dict: if len(glide_code_dict[(protein, target, start)]) != 0: all_ls[0].append(min(gnn_code_dict[(protein, target, start)], key=lambda x: x[1])[1]) if min(gnn_code_dict[(protein, target, start)], key=lambda x: x[1])[1] == 0: all_ls[1].append(sorted(gnn_code_dict[(protein, target, start)], key=lambda x: x[1])[1][1]) else: all_ls[1].append(min(gnn_code_dict[(protein, target, start)], key=lambda x: x[1])[1]) all_ls[2].append(min(glide_code_dict[(protein, target, start)], key=lambda x: x[1])[1]) else: error_count += 1 print('Error count =', error_count) all_freq_ls = [] for i in range(len(all_ls)): all_freq_ls.append(len([j for j in all_ls[i] if j < CUTOFF]) * 100 / len(all_ls[i])) print("Labels:", LABELS) print("All frequencies:", all_freq_ls) graph('all', all_ls, args.graph_save_root) bar_graph(all_freq_ls, args.graph_save_root) if __name__=="__main__": main()
56.898361
629
0.705601
2,484
17,354
4.714171
0.109098
0.042357
0.031939
0.04953
0.802989
0.772502
0.758497
0.739112
0.734244
0.721093
0
0.006095
0.177538
17,354
305
630
56.898361
0.814335
0.415005
0
0.328205
0
0.010256
0.137651
0.020783
0
0
0
0
0
1
0.041026
false
0
0.051282
0
0.117949
0.041026
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
34d8fd69682f92c8bc856e2fa6dae8be41380aa0
53,515
py
Python
board.py
bidetaggle/skaak
b51ab7303409fe2b6a288568dc3e904ac610a6a3
[ "MIT" ]
null
null
null
board.py
bidetaggle/skaak
b51ab7303409fe2b6a288568dc3e904ac610a6a3
[ "MIT" ]
null
null
null
board.py
bidetaggle/skaak
b51ab7303409fe2b6a288568dc3e904ac610a6a3
[ "MIT" ]
null
null
null
import typing as t import re import math class Chessboard(object): STARTING_FEN = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR" #def __init__(self, fen: str = Chessboard.STARTING_FEN) -> None: def __init__(self, fen: str = STARTING_FEN) -> None: self.board_index: t.Dict[str, t.Dict[str, t.Union[None, str, int]]] = {} self.files = "abcdefgh" self.fen = fen # - Initializing functions self.init() def init(self) -> None: # - Keeps track of which square is being worked on : square_index = 1 file_index = 1 while square_index <= 64: # - Data used to define the square square_rank = 8 - int(square_index / 8) square_file = self.files[file_index - 1] if square_index % 8 == 0: square_rank += 1 if square_rank < 1: break square_ref = "{file}{rank}".format(file=square_file, rank=square_rank) self.board_index[square_ref] = {"index": square_index, "color": None, "type": None} if file_index % 8 == 0: file_index = 1 else: file_index += 1 square_index def get_rank_squares(self, rank: int) -> t.List[str]: squares: t.List[str] = [] for square in self.board_index: if str(square)[1] == str(rank): squares.append(str(square)) return squares def draw_rank(self, rank: int) -> str: result: t.List[str] = [] squares = self.get_rank_squares(rank) for square in squares: if self.board_index[square]["type"] != None: result.append(self.board_index[square]["type"]) # type: ignore else: result.append("-") drawing = "" for square in result: drawing += square drawing += " " return drawing def draw_ascii(self) -> None: rank_index = 8 while rank_index >= 1: print("") print( "{rank} | {rank_drawing}".format(rank=rank_index, rank_drawing=self.draw_rank(rank_index)) ) print("") rank_index -= 1 print("-" * 44) print("") print(" a b c d e f g h".upper()) def reset_board_position(self) -> None: self.position(Chessboard.STARTING_FEN) def get_ref_from_index(self, index: int) -> str: for square in self.board_index: if int(self.board_index[str(square)]["index"]) == index: # type: ignore return str(square) else: raise IndexError("index not found") # mypy complaining about it not returning in all cases. # mypy complaining about it not returning in all cases. def def_piece_colors(self) -> None: for square in self.board_index: self.board_index[str(square)]["color"] = self.def_square_color(str(square)) def position(self, fen: str) -> None: square_index = 1 self.fen = fen fen = self.parse_fen(fen) for char in fen: if char == "1": self.board_index[self.get_ref_from_index(square_index)]["type"] = None elif char == "/": square_index = square_index square_index -= 1 elif re.match("[a-zA-Z]+", char): self.board_index[self.get_ref_from_index(square_index)]["type"] = char square_index += 1 def parse_fen(self, fen: str) -> str: resulting_fen = "" for char in fen: if re.match("[0-9]+", char): resulting_fen += "1" * int(char) else: resulting_fen += char return resulting_fen def highlight_moves(self, squares: t.List[t.Optional[str]]) -> None: for square in squares: if square == None: pass else: if self.board_index[str(square)]["type"] == None: self.board_index[str(square)]["type"] = "*" else: self.board_index[str(square)]["type"] += "*" # type: ignore def def_square_color(self, square: str) -> t.Optional[str]: piece = self.board_index[str(square)]["type"] if piece == None: return None if re.match("[a-z]+", str(piece)): self.board_index[str(square)]["color"] = "b" elif re.match("[A-Z]+", str(piece)): self.board_index[str(square)]["color"] = "w" return self.board_index[str(square)]["color"] # type: ignore def clean_moves(self, origin: str, moves: t.List[t.Optional[str]]) -> t.List[str]: clean_moves: t.List[str] = [] for move in moves: if move == None: pass else: if self.board_index[str(origin)]["color"] == self.board_index[str(move)]["color"]: pass else: clean_moves.append(str(move)) return clean_m def calc_board_position_pos_moves(self, fen: str) -> t.List[t.Dict[str, str]]: moves: t.List[t.Dict[str, str]] = [] self.position(fen) for square in self.board_index: piece = self.board_index[str(square)]["type"] color = self.board_index[str(square)]["color"] possible_moves = self.calc_piece_pos_moves(piece, str(square), color) # type: ignore for move in possible_moves: move_obect = { "origin": "{origin}".format(origin=str(square)), "dest": "{dest}".format(dest=str(move)), } moves.append(move_obect) return moves def calc_piece_pos_moves(self, piece: str, pos: str, color: str) -> t.List[t.Optional[str]]: possible_moves: t.List[t.Optional[str]] = [] # - Calculates moves for a knight (N / n) if piece == "n" or piece == "N": position_index = int(self.board_index[pos]["index"]) # type: ignore possible_moves.append(self.get_ref_from_index(position_index - 17)) possible_moves.append(self.get_ref_from_index(position_index - 15)) possible_moves.append(self.get_ref_from_index(position_index - 10)) possible_moves.append(self.get_ref_from_index(position_index - 6)) possible_moves.append(self.get_ref_from_index(position_index + 6)) possible_moves.append(self.get_ref_from_index(position_index + 10)) possible_moves.append(self.get_ref_from_index(position_index + 15)) possible_moves.append(self.get_ref_from_index(position_index + 17)) if pos[0] == "a" or pos[0] == "b": possible_moves[4] = None possible_moves[2] = None if pos[0] == "a": possible_moves[0] = None possible_moves[6] = None if pos[0] == "g" or pos[0] == "h": possible_moves[3] = None possible_moves[5] = None if pos[0] == "h": possible_moves[1] = None possible_moves[7] = None if pos[1] == "7" or pos[1] == "8": possible_moves[0] = None possible_moves[1] = None if pos[1] == "8": possible_moves[2] = None possible_moves[3] = None if pos[1] == "1" or pos[1] == "2": possible_moves[6] = None possible_moves[7] = None if pos[1] == "h": possible_moves[4] = None possible_moves[5] = None # - Calculates moves for a bishop (B / b) if piece == "b" or piece == "B": og_pos_index: int = self.board_index[pos]["index"] # type: ignore valid = True index = og_pos_index curr_square = index - 9 diag_index = 9 og_color = self.board_index[pos]["color"] op_color = None if og_color == "w": op_color = "b" else: op_color = "w" directions = 0 while valid: if directions == 4: break if curr_square < 1 or curr_square > 64: break square_ref = str(self.get_ref_from_index(curr_square)) if self.board_index[square_ref]["color"] == og_color: directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 elif self.board_index[square_ref]["color"] == op_color: possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 elif self.is_edge_square(square_ref): possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 else: possible_moves.append(square_ref) index = self.board_index[square_ref]["index"] # type: ignore if directions == 0: curr_square = index - 9 elif directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 # - Calculates moves for a rook (R / r) if piece == "r" or piece == "R": og_pos_index = self.board_index[pos]["index"] # type: ignore valid = True index = og_pos_index curr_square = index - 8 og_color = self.board_index[pos]["color"] op_color = None if og_color == "w": op_color = "b" else: op_color = "w" directions = 0 while valid: if directions == 4: break if curr_square < 1 or curr_square > 64: break square_ref = str(self.get_ref_from_index(curr_square)) if self.board_index[square_ref]["color"] == og_color: directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 elif self.board_index[square_ref]["color"] == op_color: possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 elif self.is_edge_square(square_ref): possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 else: possible_moves.append(square_ref) index = self.board_index[square_ref]["index"] # type: ignore if directions == 0: curr_square = index - 8 elif directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 # - Calculates moves for a queen (Q / q) if piece == "q" or piece == "Q": og_pos_index = self.board_index[pos]["index"] # type: ignore valid = True index = og_pos_index curr_square = index - 8 og_color = self.board_index[pos]["color"] op_color = None if og_color == "w": op_color = "b" else: op_color = "w" directions = 0 while valid: if directions == 4: break if curr_square < 1 or curr_square > 64: break square_ref = str(self.get_ref_from_index(curr_square)) if self.board_index[square_ref]["color"] == og_color: directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 elif self.board_index[square_ref]["color"] == op_color: possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 elif self.is_edge_square(square_ref): possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 else: possible_moves.append(square_ref) index = self.board_index[square_ref]["index"] # type: ignore if directions == 0: curr_square = index - 8 elif directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 valid = True index = og_pos_index curr_square = index - 9 directions = 0 while valid: if directions == 4: break if curr_square < 1 or curr_square > 64: break square_ref = str(self.get_ref_from_index(curr_square)) if self.board_index[square_ref]["color"] == og_color: directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 elif self.board_index[square_ref]["color"] == op_color: possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 elif self.is_edge_square(square_ref): possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 else: possible_moves.append(square_ref) index = self.board_index[square_ref]["index"] # type: ignore if directions == 0: curr_square = index - 9 elif directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 # - Calculates moves for a king (K / k) if piece == "k" or piece == "K": square_index = int(self.board_index[pos]["index"]) # type: ignore possible_moves.append(self.get_ref_from_index(square_index - 9)) possible_moves.append(self.get_ref_from_index(square_index - 8)) possible_moves.append(self.get_ref_from_index(square_index - 7)) possible_moves.append(self.get_ref_from_index(square_index - 1)) possible_moves.append(self.get_ref_from_index(square_index + 1)) possible_moves.append(self.get_ref_from_index(square_index + 7)) possible_moves.append(self.get_ref_from_index(square_index + 8)) possible_moves.append(self.get_ref_from_index(square_index + 9)) if pos[0] == "a": possible_moves[0] = None possible_moves[3] = None possible_moves[5] = None if pos[0] == "h": possible_moves[2] = None possible_moves[4] = None possible_moves[7] = None if pos[1] == "1": possible_moves[5] = None possible_moves[6] = None possible_moves[7] = None if pos[1] == "8": possible_moves[0] = None possible_moves[1] = None possible_moves[2] = None # safe_possible_moves = [] possible_moves = self.clean_moves(pos, possible_moves) # type: ignore # for move in possible_moves: # if(self.safe(move)): safe_possible_moves.append(move) # possible_moves = safe_possible_moves # - Calculates moves for a pawn (P / p) if piece == "p" or piece == "P": piece_index: int = self.board_index[pos]["index"] # type: ignore if color == "w": one_ahead_index = int(piece_index) - 8 two_ahead_index = int(piece_index) - 16 l_diag = None r_diag = None if pos[1] != "8": if pos[0 != "a"]: l_diag = int(self.board_index[pos]["index"]) - 9 # type: ignore if pos[0 != "h"]: r_diag = int(self.board_index[pos]["index"]) - 7 # type: ignore l_diag = self.get_ref_from_index(l_diag) r_diag = self.get_ref_from_index(r_diag) if pos[1] == "2": if ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] == None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None ): possible_moves.append(self.get_ref_from_index(one_ahead_index)) possible_moves.append(self.get_ref_from_index(two_ahead_index)) return possible_moves elif ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] != None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None ): possible_moves.append(self.get_ref_from_index(one_ahead_index)) return possible_moves elif ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] == None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] != None ): return [] if r_diag != None: if ( self.board_index[r_diag]["color"] != self.board_index[pos]["color"] and self.board_index[r_diag]["type"] != None ): possible_moves.append(r_diag) if l_diag != None: if ( self.board_index[l_diag]["color"] != self.board_index[pos]["color"] and self.board_index[l_diag]["type"] != None ): possible_moves.append(l_diag) if self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None: possible_moves.append(self.get_ref_from_index(one_ahead_index)) elif self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] != None: return [] elif color == "b": one_ahead_index = int(piece_index) + 8 two_ahead_index = int(piece_index) + 16 l_diag = None r_diag = None if pos[1] != "1": if pos[0 != "a"]: l_diag = int(self.board_index[pos]["index"]) + 7 # type: ignore if pos[0 != "h"]: r_diag = int(self.board_index[pos]["index"]) + 9 # type: ignore l_diag = self.get_ref_from_index(l_diag) r_diag = self.get_ref_from_index(r_diag) if pos[1] == "7": if ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] == None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None ): possible_moves.append(self.get_ref_from_index(one_ahead_index)) possible_moves.append(self.get_ref_from_index(two_ahead_index)) return possible_moves elif ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] != None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None ): possible_moves.append(self.get_ref_from_index(one_ahead_index)) return possible_moves elif ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] == None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] != None ): return [] if r_diag != None: if ( self.board_index[r_diag]["color"] != self.board_index[pos]["color"] and self.board_index[r_diag]["type"] != None ): possible_moves.append(r_diag) if l_diag != None: if ( self.board_index[l_diag]["color"] != self.board_index[pos]["color"] and self.board_index[l_diag]["type"] != None ): possible_moves.append(l_diag) if self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None: possible_moves.append(self.get_ref_from_index(one_ahead_index)) elif self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] != None: return [] possible_moves = self.clean_moves(pos, possible_moves) # type: ignore return possible_moves def safe(self, square): attacked_squares = self.legal_moves() for i in attacked_squares: if square == i: return False return True def legal_moves(self) -> t.List[str]: legal_moves = [] moves = self.calc_board_position_pos_moves(self.fen) for move in moves: for origin in move: legal_moves.append(move[origin]) return legal_moves def is_edge_square(self, square: str) -> bool: if str(square)[0] == "a" or str(square)[0] == "h" or str(square)[1] == "8" or str(square)[1] == "1": return True else: return False def legal(self, move: t.Dict[str, str]) -> bool: if move in self.calc_board_position_pos_moves(self.fen): return True return False def move(self, origin: str, dest: str) -> None: move = {"{origin}".format(origin=origin): "{dest}".format(dest=dest)} if self.legal(move): self.board_index[dest]["type"] = self.board_index[origin]["type"] self.board_index[dest]["color"] = self.board_index[origin]["color"] self.board_index[origin]["color"] = None self.board_index[origin]["type"] = None self.fen = self.create_fen() else: return def create_fen(self) -> str: dirty_fen = "" clean_fen = "" index = 1 s_index = 0 for square in self.board_index: if self.board_index[square]["type"] == None: dirty_fen += "x" else: dirty_fen += self.board_index[square]["type"] # type: ignore if index == 8: dirty_fen += "/" index = 1 else: index += 1 for char in dirty_fen: if char == "x": s_index += 1 else: if char != "x" and s_index > 0: clean_fen += str(s_index) clean_fen += char s_index = 0 else: clean_fen += char return clean_fen """ ======= import re import math class Chessboard(object): STARTING_FEN = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR" def __init__(self, fen=STARTING_FEN): self.board_index = {} self.files = "abcdefgh" self.fen = fen # - Initializing functions self.init() def init(self): # - Keeps track of which square is being worked on : square_index = 1 file_index = 1 while square_index <= 64: # - Data used to define the square square_rank = 8 - int(square_index / 8) square_file = self.files[file_index - 1] if square_index % 8 == 0: square_rank += 1 if square_rank < 1: break square_ref = "{file}{rank}".format(file=square_file, rank=square_rank) self.board_index[square_ref] = {"index": square_index, "color": None, "type": None} if file_index % 8 == 0: file_index = 1 else: file_index += 1 square_index += 1 def get_rank_squares(self, rank): squares = [] for square in self.board_index: if str(square)[1] == str(rank): squares.append(str(square)) return squares def draw_rank(self, rank): result = [] squares = self.get_rank_squares(rank) for square in squares: if self.board_index[square]["type"] != None: result.append(self.board_index[square]["type"]) else: result.append("-") drawing = "" for square in result: drawing += square drawing += " " return drawing def draw_ascii(self): rank_index = 8 while rank_index >= 1: print("") print( "{rank} | {rank_drawing}".format(rank=rank_index, rank_drawing=self.draw_rank(rank_index)) ) print("") rank_index -= 1 print("-" * 44) print("") print(" a b c d e f g h".upper()) def reset_board_position(self): self.position(Chessboard.STARTING_FEN) def get_ref_from_index(self, index): for square in self.board_index: if int(self.board_index[str(square)]["index"]) == index: return str(square) def def_piece_colors(self): for square in self.board_index: self.board_index[str(square)]["color"] = self.def_square_color(str(square)) def position(self, fen): square_index = 1 self.fen = fen fen = self.parse_fen(fen) for char in fen: if char == "1": self.board_index[self.get_ref_from_index(square_index)]["type"] = None elif char == "/": square_index = square_index square_index -= 1 elif re.match("[a-zA-Z]+", char): self.board_index[self.get_ref_from_index(square_index)]["type"] = char square_index += 1 def parse_fen(self, fen): resulting_fen = "" for char in fen: if re.match("[0-9]+", char): resulting_fen += "1" * int(char) else: resulting_fen += char return resulting_fen def highlight_moves(self, squares): for square in squares: if square == None: pass else: if self.board_index[str(square)]["type"] == None: self.board_index[str(square)]["type"] = "*" else: self.board_index[str(square)]["type"] += "*" def def_square_color(self, square): piece = self.board_index[str(square)]["type"] if piece == None: return None if re.match("[a-z]+", str(piece)): self.board_index[str(square)]["color"] = "b" elif re.match("[A-Z]+", str(piece)): self.board_index[str(square)]["color"] = "w" return self.board_index[str(square)]["color"] def clean_moves(self, origin, moves): clean_moves = [] for move in moves: if move == None: pass else: if self.board_index[str(origin)]["color"] == self.board_index[str(move)]["color"]: pass else: clean_moves.append(str(move)) return clean_moves def calc_board_position_pos_moves(self, fen): moves = [] chessboard.position(fen) for square in self.board_index: piece = self.board_index[str(square)]["type"] color = self.board_index[str(square)]["color"] possible_moves = self.calc_piece_pos_moves(piece, str(square), color) for move in possible_moves: move_object = { "origin": "{origin}".format(origin=str(square)), "dest": "{dest}".format(dest=str(move)), } moves.append(move_object) return moves def calc_piece_pos_moves(self, piece, pos, color): possible_moves = [] # - Calculates moves for a knight (N / n) if piece == "n" or piece == "N": position_index = int(self.board_index[pos]["index"]) possible_moves.append(self.get_ref_from_index(position_index - 17)) possible_moves.append(self.get_ref_from_index(position_index - 15)) possible_moves.append(self.get_ref_from_index(position_index - 10)) possible_moves.append(self.get_ref_from_index(position_index - 6)) possible_moves.append(self.get_ref_from_index(position_index + 6)) possible_moves.append(self.get_ref_from_index(position_index + 10)) possible_moves.append(self.get_ref_from_index(position_index + 15)) possible_moves.append(self.get_ref_from_index(position_index + 17)) if pos[0] == "a" or pos[0] == "b": possible_moves[4] = None possible_moves[2] = None if pos[0] == "a": possible_moves[0] = None possible_moves[6] = None if pos[0] == "g" or pos[0] == "h": possible_moves[3] = None possible_moves[5] = None if pos[0] == "h": possible_moves[1] = None possible_moves[7] = None if pos[1] == "7" or pos[1] == "8": possible_moves[0] = None possible_moves[1] = None if pos[1] == "8": possible_moves[2] = None possible_moves[3] = None if pos[1] == "1" or pos[1] == "2": possible_moves[6] = None possible_moves[7] = None if pos[1] == "h": possible_moves[4] = None possible_moves[5] = None # - Calculates moves for a bishop (B / b) if piece == "b" or piece == "B": og_pos_index = self.board_index[pos]["index"] valid = True index = og_pos_index curr_square = index - 9 diag_index = 9 og_color = self.board_index[pos]["color"] op_color = None if og_color == "w": op_color = "b" else: op_color = "w" directions = 0 while valid: if directions == 4: break if curr_square < 1 or curr_square > 64: break square_ref = str(self.get_ref_from_index(curr_square)) if self.board_index[square_ref]["color"] == og_color: directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 elif self.board_index[square_ref]["color"] == op_color: possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 elif self.is_edge_square(square_ref): possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 else: possible_moves.append(square_ref) index = self.board_index[square_ref]["index"] if directions == 0: curr_square = index - 9 elif directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 # - Calculates moves for a rook (R / r) if piece == "r" or piece == "R": og_pos_index = self.board_index[pos]["index"] valid = True index = og_pos_index curr_square = index - 8 og_color = self.board_index[pos]["color"] op_color = None if og_color == "w": op_color = "b" else: op_color = "w" directions = 0 while valid: if directions == 4: break if curr_square < 1 or curr_square > 64: break square_ref = str(self.get_ref_from_index(curr_square)) if self.board_index[square_ref]["color"] == og_color: directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 elif self.board_index[square_ref]["color"] == op_color: possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 elif self.is_edge_square(square_ref): possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 else: possible_moves.append(square_ref) index = self.board_index[square_ref]["index"] if directions == 0: curr_square = index - 8 elif directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 # - Calculates moves for a queen (Q / q) if piece == "q" or piece == "Q": og_pos_index = self.board_index[pos]["index"] valid = True index = og_pos_index curr_square = index - 8 og_color = self.board_index[pos]["color"] op_color = None if og_color == "w": op_color = "b" else: op_color = "w" directions = 0 while valid: if directions == 4: break if curr_square < 1 or curr_square > 64: break square_ref = str(self.get_ref_from_index(curr_square)) if self.board_index[square_ref]["color"] == og_color: directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 elif self.board_index[square_ref]["color"] == op_color: possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 elif self.is_edge_square(square_ref): possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 else: possible_moves.append(square_ref) index = self.board_index[square_ref]["index"] if directions == 0: curr_square = index - 8 elif directions == 1: curr_square = index + 8 elif directions == 2: curr_square = index - 1 elif directions == 3: curr_square = index + 1 valid = True index = og_pos_index curr_square = index - 9 directions = 0 while valid: if directions == 4: break if curr_square < 1 or curr_square > 64: break square_ref = str(self.get_ref_from_index(curr_square)) if self.board_index[square_ref]["color"] == og_color: directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 elif self.board_index[square_ref]["color"] == op_color: possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 elif self.is_edge_square(square_ref): possible_moves.append(square_ref) directions += 1 index = og_pos_index if directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 else: possible_moves.append(square_ref) index = self.board_index[square_ref]["index"] if directions == 0: curr_square = index - 9 elif directions == 1: curr_square = index + 9 elif directions == 2: curr_square = index - 7 elif directions == 3: curr_square = index + 7 # - Calculates moves for a king (K / k) if piece == "k" or piece == "K": square_index = int(self.board_index[pos]["index"]) possible_moves.append(self.get_ref_from_index(square_index - 9)) possible_moves.append(self.get_ref_from_index(square_index - 8)) possible_moves.append(self.get_ref_from_index(square_index - 7)) possible_moves.append(self.get_ref_from_index(square_index - 1)) possible_moves.append(self.get_ref_from_index(square_index + 1)) possible_moves.append(self.get_ref_from_index(square_index + 7)) possible_moves.append(self.get_ref_from_index(square_index + 8)) possible_moves.append(self.get_ref_from_index(square_index + 9)) if pos[0] == "a": possible_moves[0] = None possible_moves[3] = None possible_moves[5] = None if pos[0] == "h": possible_moves[2] = None possible_moves[4] = None possible_moves[7] = None if pos[1] == "1": possible_moves[5] = None possible_moves[6] = None possible_moves[7] = None if pos[1] == "8": possible_moves[0] = None possible_moves[1] = None possible_moves[2] = None safe_possible_moves = [] possible_moves = self.clean_moves(pos, possible_moves) # for move in possible_moves: # if(self.safe(move)): safe_possible_moves.append(move) # possible_moves = safe_possible_moves # - Calculates moves for a pawn (P / p) if piece == "p" or piece == "P": piece_index = self.board_index[pos]["index"] if color == "w": one_ahead_index = int(piece_index) - 8 two_ahead_index = int(piece_index) - 16 l_diag = None r_diag = None if pos[1] != "8": if pos[0 != "a"]: l_diag = int(self.board_index[pos]["index"]) - 9 if pos[0 != "h"]: r_diag = int(self.board_index[pos]["index"]) - 7 l_diag = self.get_ref_from_index(l_diag) r_diag = self.get_ref_from_index(r_diag) if pos[1] == "2": if ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] == None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None ): possible_moves.append(self.get_ref_from_index(one_ahead_index)) possible_moves.append(self.get_ref_from_index(two_ahead_index)) return possible_moves elif ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] != None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None ): possible_moves.append(self.get_ref_from_index(one_ahead_index)) return possible_moves elif ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] == None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] != None ): return [] if r_diag != None: if ( self.board_index[r_diag]["color"] != self.board_index[pos]["color"] and self.board_index[r_diag]["type"] != None ): possible_moves.append(r_diag) if l_diag != None: if ( self.board_index[l_diag]["color"] != self.board_index[pos]["color"] and self.board_index[l_diag]["type"] != None ): possible_moves.append(l_diag) if self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None: possible_moves.append(self.get_ref_from_index(one_ahead_index)) elif self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] != None: return [] elif color == "b": one_ahead_index = int(piece_index) + 8 two_ahead_index = int(piece_index) + 16 l_diag = None r_diag = None if pos[1] != "1": if pos[0 != "a"]: l_diag = int(self.board_index[pos]["index"]) + 7 if pos[0 != "h"]: r_diag = int(self.board_index[pos]["index"]) + 9 l_diag = self.get_ref_from_index(l_diag) r_diag = self.get_ref_from_index(r_diag) if pos[1] == "7": if ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] == None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None ): possible_moves.append(self.get_ref_from_index(one_ahead_index)) possible_moves.append(self.get_ref_from_index(two_ahead_index)) return possible_moves elif ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] != None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None ): possible_moves.append(self.get_ref_from_index(one_ahead_index)) return possible_moves elif ( self.board_index[self.get_ref_from_index(two_ahead_index)]["type"] == None and self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] != None ): return [] if r_diag != None: if ( self.board_index[r_diag]["color"] != self.board_index[pos]["color"] and self.board_index[r_diag]["type"] != None ): possible_moves.append(r_diag) if l_diag != None: if ( self.board_index[l_diag]["color"] != self.board_index[pos]["color"] and self.board_index[l_diag]["type"] != None ): possible_moves.append(l_diag) if self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] == None: possible_moves.append(self.get_ref_from_index(one_ahead_index)) elif self.board_index[self.get_ref_from_index(one_ahead_index)]["type"] != None: return [] possible_moves = self.clean_moves(pos, possible_moves) return possible_moves def safe(self, square): attacked_squares = self.legal_moves() for i in attacked_squares: if square == i: return False return True def legal_moves(self): legal_moves = [] moves = self.calc_board_position_pos_moves(self.fen) for move in moves: for origin in move: legal_moves.append(move[origin]) return legal_moves def is_edge_square(self, square): if str(square)[0] == "a" or str(square)[0] == "h" or str(square)[1] == "8" or str(square)[1] == "1": return True else: return False def legal(self, move): if move in self.calc_board_position_pos_moves(board.fen): return True return False def move(self, origin, dest): move = {"{origin}".format(origin=origin): "{dest}".format(dest=dest)} if self.legal(move): self.board_index[dest]["type"] = self.board_index[origin]["type"] self.board_index[dest]["color"] = self.board_index[origin]["color"] self.board_index[origin]["color"] = None self.board_index[origin]["type"] = None self.fen = self.create_fen() else: return def create_fen(self): dirty_fen = "" clean_fen = "" index = 1 s_index = 0 for square in self.board_index: if self.board_index[square]["type"] == None: dirty_fen += "x" else: dirty_fen += self.board_index[square]["type"] if index == 8: dirty_fen += "/" index = 1 else: index += 1 for char in dirty_fen: if char == "x": s_index += 1 else: if char != "x" and s_index > 0: clean_fen += str(s_index) clean_fen += char s_index = 0 else: clean_fen += char return clean_fen """
39.090577
161
0.474446
5,773
53,515
4.14741
0.028062
0.063902
0.099403
0.063902
0.979409
0.972727
0.962494
0.960197
0.956856
0.941737
0
0.018499
0.429263
53,515
1,369
162
39.090577
0.765413
0.017733
0
0.670711
0
0
0.024761
0.001633
0
0
0
0
0
1
0.036395
false
0.005199
0.005199
0
0.090121
0.010399
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
34f1ff526cf8aeb1b83c83fa64a5aaabd0e28a09
19,314
py
Python
Jwalk/build/lib/Jwalk/SASDTools.py
Topf-Lab/Jwalk
72fac517b57b3724bb24101679afa8407c98666f
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
Jwalk/build/lib/Jwalk/SASDTools.py
Topf-Lab/Jwalk
72fac517b57b3724bb24101679afa8407c98666f
[ "Naumen", "Condor-1.1", "MS-PL" ]
1
2021-10-04T20:21:39.000Z
2021-10-04T20:21:39.000Z
Jwalk/src/Jwalk/SASDTools.py
Topf-Lab/Jwalk
72fac517b57b3724bb24101679afa8407c98666f
[ "Naumen", "Condor-1.1", "MS-PL" ]
1
2021-01-04T11:19:07.000Z
2021-01-04T11:19:07.000Z
#=============================================================================== # This file is part of Jwalk. # # Jwalk - A tool to calculate the solvent accessible surface distance (SASD) # between crosslinked residues. # # Copyright 2016 Jwalk Inventor and Birkbeck College University of London. # The Jwalk Inventor is: Josh Bullock # # # Jwalk is available under Public Licence. # This software is made available under GPL V3 # # Please cite your use of Jwalk in published work: # # J.Bullock, J. Schwab, K. Thalassinos, M. Topf (2016) # The importance of non-accessible crosslinks and solvent accessible surface distance # in modelling proteins with restraints from crosslinking mass spectrometry. # Molecular and Cellular Proteomics (15) pp.2491-2500 # #=============================================================================== from multiprocessing import Pool, Process, freeze_support import itertools import sys import math import os def calculate_specific_SASD(single_crosslink, aa1_voxels, aa2_voxels, dens_map, aa1_CA, aa2_CA, max_dist, vox): ''' Breadth First Search of grid. For general info on algorithm see: https://en.wikipedia.org/wiki/Breadth-first_search Returns dictionary containing solvent accessible surface distances between specific starting res and ending res. {start res, end res, length in angstroms : voxel path of sasd} Arguments: *single_crosslink* start and end residue. start is key of aa1_voxels. aa1_voxels[start_residue] = all the starting voxels for that residue *aa1_voxels* dictionary containing starting voxels {start_residue : starting voxels} *aa2_voxels* dictionary containing ending voxels {end_residue : ending voxels} *dens_map* grid with solvent accessible surface (masked array) *aa1_CA* dictionary containing voxel of C-alpha *aa2_CA* dictionary containing voxel of C-alpha *max_dist* maximum distance BFS will search until *vox* number of angstoms per voxel ''' start_residue = single_crosslink[0] end_residue = single_crosslink[1] specific_xl = {} comb = [[1, 0, 0], [-1, 0, 0], [0, -1, 0], [0, 1, 0], [0, 0, -1], [0, 0, 1], [1, 0, 1], [-1, 0, 1], [0, 1, 1], [0, -1, 1], [1, -1, 0], [-1, -1, 0], [1, 1, 0], [-1, 1, 0], [1, 0, -1], [-1, 0, -1], [0, 1, -1], [0, -1, -1], [1, 1, 1], [1, -1, 1], [-1, 1, 1], [-1, -1, 1], [1, 1, -1], [1, -1, -1], [-1, 1, -1], [-1, -1, -1]] # distance of diagonal steps diag1 = (math.sqrt((vox ** 2) * 2)) # 2d diagonal diag2 = (math.sqrt((vox ** 2) * 3)) # 3d diagonal queue = [] # voxels in queue for searching end_voxels = [] # list of voxels to find path to visited = {} # list works as all the coordinates that have been visited - dictionary gives the path to said coordinate from startpoint distance = {} # keeps distance from starting voxel for each other voxel # place starting voxels into queue and initialise visited and distance for j in aa1_voxels[start_residue]: queue.append([j[0], j[1], j[2]]) visited[j[0], j[1], j[2]] = [[j[0], j[1], j[2]]] distance[j[0], j[1], j[2]] = 0 while queue: x_n, y_n, z_n = queue.pop(0) if distance[x_n, y_n, z_n] <= max_dist: for c in comb: x_temp = x_n + c[0] y_temp = y_n + c[1] z_temp = z_n + c[2] if (x_temp, y_temp, z_temp) not in visited: if ((0 <= x_temp < dens_map.x_size()) and (0 <= y_temp < dens_map.y_size()) and ( 0 <= z_temp < dens_map.z_size())): temp_list = visited[x_n, y_n, z_n][:] temp_list.append([x_temp, y_temp, z_temp]) visited[x_temp, y_temp, z_temp] = temp_list # updated visited list if dens_map.fullMap[z_temp][y_temp][x_temp] <= 0: # if the voxel is in empty space queue.append(([x_temp, y_temp, z_temp])) # calculate the distance diff_x = x_temp - x_n diff_y = y_temp - y_n diff_z = z_temp - z_n if diff_x != 0 and diff_y != 0 and diff_z != 0: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + diag2 elif diff_x != 0 and diff_y != 0: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + diag1 elif diff_x != 0 and diff_z != 0: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + diag1 elif diff_y != 0 and diff_z != 0: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + diag1 else: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + vox # now we have a full set of paths into empty space starting from start_residue # all stored in visited. Now need to extract paths to specific residue shortest_distance = 9999 all_distances = {} for j in aa2_voxels[end_residue]: (x, y, z) = j if (x, y, z) in visited: visited[(x, y, z)].insert(0, aa1_CA[start_residue]) # add aa1 CA voxel to path visited[(x, y, z)].append(aa2_CA[end_residue]) # add aa2 CA voxel to end of path # add the distance between starting/ending residue CA voxel and start/end voxel in path for i in [1, len(visited[(x, y, z)]) - 1]: (x_1, y_1, z_1) = visited[(x, y, z)][i - 1] (x_2, y_2, z_2) = visited[(x, y, z)][i] distance[(x, y, z)] += math.sqrt((x_1 - x_2) ** 2 + (y_1 - y_2) ** 2 + (z_1 - z_2) ** 2) all_distances[distance[(x, y, z)]] = visited[(x, y, z)] # linking distance:path # keep record of shortest distance if shortest_distance > distance[(x, y, z)]: shortest_distance = distance[(x, y, z)] # now adding shortest xl to the final list if shortest_distance != 9999: # this is just to order the dict so that chain goes alphabetically specific_xl[start_residue, end_residue, shortest_distance] = all_distances[ shortest_distance] # start lys, end lys, length of xl = path of xl return specific_xl def calculate_SASDs(start_residue, aa1_voxels, aa2_voxels, dens_map, aa1_CA, aa2_CA, max_dist, vox): """ Breadth First Search of grid. For general info on algorithm see: https://en.wikipedia.org/wiki/Breadth-first_search Returns dictionary containing solvent accessible surface distances between starting res and all possible ending res. {start res, end res, length in angstroms : voxel path of sasd} Arguments: *start_residue* key of aa1_voxels. aa1_voxels[start_residue] = all the starting voxels for that residue *aa1_voxels* dictionary containing starting voxels {start_residue : starting voxels} *aa2_voxels* dictionary containing ending voxels {end_residue : ending voxels} *dens_map* grid with solvent accessible surface (masked array) *aa1_CA* dictionary containing voxel of C-alpha *aa2_CA* dictionary containing voxel of C-alpha *max_dist* maximum distance BFS will search until *vox* number of angstoms per voxel """ sasds = {} # order of voxels to search - by having diagonals last ensures shortest path is returned comb = [[1, 0, 0], [-1, 0, 0], [0, -1, 0], [0, 1, 0], [0, 0, -1], [0, 0, 1], [1, 0, 1], [-1, 0, 1], [0, 1, 1], [0, -1, 1], [1, -1, 0], [-1, -1, 0], [1, 1, 0], [-1, 1, 0], [1, 0, -1], [-1, 0, -1], [0, 1, -1], [0, -1, -1], [1, 1, 1], [1, -1, 1], [-1, 1, 1], [-1, -1, 1], [1, 1, -1], [1, -1, -1], [-1, 1, -1], [-1, -1, -1]] # distance of diagonal steps diag1 = (math.sqrt((vox ** 2) * 2)) # 2d diagonal diag2 = (math.sqrt((vox ** 2) * 3)) # 3d diagonal queue = [] # voxels in queue for searching visited = {} # list works as all the coordinates that have been visited - dictionary gives the path to said coordinate from startpoint distance = {} # keeps distance from starting voxel for each other voxel # place starting voxels into queue and initialise visited and distance for j in aa1_voxels[start_residue]: queue.append([j[0], j[1], j[2]]) visited[j[0], j[1], j[2]] = [[j[0], j[1], j[2]]] distance[j[0], j[1], j[2]] = 0 # grid is searched until queue is empty while queue: x_n, y_n, z_n = queue.pop(0) # take first voxel in queue if distance[x_n, y_n, z_n] <= max_dist: for c in comb: # expand in all directions from voxel - in order of comb. x_temp = x_n + c[0] y_temp = y_n + c[1] z_temp = z_n + c[2] # check voxel hasn't already been searched if (x_temp, y_temp, z_temp) not in visited: # check that voxel is within bounds of the grid if ((0 <= x_temp < dens_map.x_size()) and (0 <= y_temp < dens_map.y_size()) and ( 0 <= z_temp < dens_map.z_size())): # add path to this voxel to visited temp_list = visited[x_n, y_n, z_n][:] temp_list.append([x_temp, y_temp, z_temp]) visited[x_temp, y_temp, z_temp] = temp_list if dens_map.fullMap[z_temp][y_temp][x_temp] <= 0: # if the voxel is in empty space queue.append(([x_temp, y_temp, z_temp])) # add to queue for later searching # calculate the distance to voxel from start voxel diff_x = x_temp - x_n diff_y = y_temp - y_n diff_z = z_temp - z_n if diff_x != 0 and diff_y != 0 and diff_z != 0: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + diag2 elif diff_x != 0 and diff_y != 0: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + diag1 elif diff_x != 0 and diff_z != 0: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + diag1 elif diff_y != 0 and diff_z != 0: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + diag1 else: distance[x_temp, y_temp, z_temp] = distance[x_n, y_n, z_n] + vox # now we have a full set of paths into empty space starting from start_residue # all stored in visited. Now need to extract paths to specific residues for end_residue in aa2_voxels: if start_residue != end_residue: shortest_distance = 9999 all_distances = {} # cycling through possible end coords of end_residue to get shortest sasd for j in aa2_voxels[end_residue]: (x, y, z) = j if (x, y, z) in visited: visited[(x, y, z)].insert(0, aa1_CA[start_residue]) # add aa1 CA voxel to path visited[(x, y, z)].append(aa2_CA[end_residue]) # add aa2 CA voxel to end of path # add the distance between starting/ending residue CA voxel and start/end voxel in path for i in [1, len(visited[(x, y, z)]) - 1]: (x_1, y_1, z_1) = visited[(x, y, z)][i - 1] (x_2, y_2, z_2) = visited[(x, y, z)][i] distance[(x, y, z)] += math.sqrt((x_1 - x_2) ** 2 + (y_1 - y_2) ** 2 + (z_1 - z_2) ** 2) all_distances[distance[(x, y, z)]] = visited[(x, y, z)] # linking distance:path # keep record of shortest distance if shortest_distance > distance[(x, y, z)]: shortest_distance = distance[(x, y, z)] # add shortest distance sasd to output dictionary if shortest_distance != 9999: if start_residue[1] < end_residue[1]: # this to order the dict so that chain goes alphabetically sasds[start_residue, end_residue, shortest_distance] = all_distances[shortest_distance] elif end_residue[1] < start_residue[1]: sasds[end_residue, start_residue, shortest_distance] = all_distances[shortest_distance] # if both on the same chain, then ordered to go numerically elif start_residue[0] < end_residue[0]: sasds[start_residue, end_residue, shortest_distance] = all_distances[shortest_distance] else: sasds[end_residue, start_residue, shortest_distance] = all_distances[shortest_distance] return sasds def calculate_SASDs_star(a_b): """Convert `f([1,2])` to `f(1,2)` call.""" return calculate_SASDs(*a_b) def calculate_specific_SASD_star(a_b): """Convert `f([1,2])` to `f(1,2)` call.""" return calculate_specific_SASD(*a_b) def parallel_BFS(aa1_voxels, aa2_voxels, dens_map, aa1_CA, aa2_CA, crosslink_pairs, max_dist, vox, ncpus, xl_list): """ Parallelised Breadth First Search of grid. Returns dictionary containing all solvent accessible surface distances {start res, end res, length in angstroms : voxel path of sasd} Arguments: *start_residue* key of aa1_voxels. aa1_voxels[start_residue] = all the starting voxels for that residue *aa1_voxels* dictionary containing starting voxels {start_residue : starting voxels} *aa2_voxels* dictionary containing ending voxels {end_residue : ending voxels} *dens_map* grid with solvent accessible surface (masked array) *aa1_CA* dictionary containing voxel of C-alpha *aa2_CA* dictionary containing voxel of C-alpha *crosslink_pairs* list of pairs of crosslinks (empty if not calculating specific crosslinks) *max_dist* maximum distance BFS will search until *vox* number of angstoms per voxel *ncpus* number of allocated cpus """ freeze_support() final_XL = {} if xl_list: if ncpus > 1: pool = Pool(ncpus) xl_dictionaries = pool.map(calculate_specific_SASD_star, itertools.izip(crosslink_pairs, itertools.repeat(aa1_voxels), itertools.repeat(aa2_voxels), itertools.repeat(dens_map), itertools.repeat(aa1_CA), itertools.repeat(aa2_CA), itertools.repeat(max_dist), itertools.repeat(vox))) for c in xl_dictionaries: final_XL.update(c) else: # alternative call to allow single cpu running on Windows machines for single_crosslink in crosslink_pairs: xl_dictionaries = calculate_specific_SASD(single_crosslink, aa1_voxels, aa2_voxels, dens_map, aa1_CA, aa2_CA, max_dist, vox) final_XL.update(xl_dictionaries) else: if ncpus > 1: pool = Pool(ncpus) xl_dictionaries = pool.map(calculate_SASDs_star, itertools.izip(aa1_voxels, itertools.repeat(aa1_voxels), itertools.repeat(aa2_voxels), itertools.repeat(dens_map), itertools.repeat(aa1_CA), itertools.repeat(aa2_CA), itertools.repeat(max_dist), itertools.repeat(vox))) for c in xl_dictionaries: final_XL.update(c) else: # alternative call to allow single cpu running on Windows machines for start_residue in aa1_voxels: xl_dictionaries = calculate_SASDs(start_residue, aa1_voxels, aa2_voxels, dens_map, aa1_CA, aa2_CA, max_dist, vox) final_XL.update(xl_dictionaries) return final_XL def calculate_distance(cords): ''' Calculates the distance of points in 3d, input e.g. [[x1,y1,z1],[x2,y2,z3]] ''' return math.sqrt(((cords[0][0]-cords[1][0])**2)+((cords[0][1]-cords[1][1])**2)+((cords[0][2]-cords[1][2])**2)) def get_euclidean_distances(sasds, pdb, aa1, aa2): residues = {} euc_dists = {} with open (pdb) as inf: for line in inf: if line.startswith('ATOM') and (line[12:16].strip() == 'CA'): if line[21:22].strip() == "": chain = " " else: chain = line[21:22].strip() residues[line[22:26].strip(),chain] = [float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip())] for k,v in residues.iteritems(): for k1,v1 in residues.iteritems(): if k1 != k: euc_dists[int(k[0]),k[1], int(k1[0]),k1[1]] = calculate_distance([v,v1]) sasds_and_eucs = {} for s in sasds: if (s[0][0],s[0][1],s[1][0],s[1][1]) in euc_dists: sasds_and_eucs[s[0],s[1],s[2],euc_dists[(s[0][0],s[0][1],s[1][0],s[1][1])]] = sasds[s] return sasds_and_eucs
41.181237
139
0.511287
2,490
19,314
3.793173
0.12249
0.015881
0.016517
0.020328
0.719428
0.716887
0.707041
0.707041
0.707041
0.697935
0
0.039519
0.380294
19,314
468
140
41.269231
0.749603
0.324739
0
0.746094
0
0
0.000557
0
0
0
0
0
0
1
0.027344
false
0
0.019531
0
0.074219
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
34f6c5c8606dd11e9074f0909b6ec076917a6003
121
py
Python
genie/metrics/__init__.py
epfl-dlab/GenIE
62ae6af936c9375c36d3d5ad60401bf579875bd9
[ "MIT" ]
8
2022-02-08T11:12:37.000Z
2022-03-16T08:27:50.000Z
genie/metrics/__init__.py
epfl-dlab/GenIE
62ae6af936c9375c36d3d5ad60401bf579875bd9
[ "MIT" ]
1
2022-03-07T07:36:24.000Z
2022-03-07T20:58:12.000Z
genie/metrics/__init__.py
epfl-dlab/GenIE
62ae6af936c9375c36d3d5ad60401bf579875bd9
[ "MIT" ]
7
2022-02-22T22:48:35.000Z
2022-03-18T05:18:30.000Z
from .triplet_set_f1 import TSF1 from .triplet_set_precision import TSPrecision from .triplet_set_recall import TSRecall
30.25
46
0.876033
18
121
5.555556
0.555556
0.33
0.42
0
0
0
0
0
0
0
0
0.018349
0.099174
121
3
47
40.333333
0.899083
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
5501ccb1210e7f2924aac469361e07c19b288cae
72
py
Python
graphgallery/functional/edge_level/__init__.py
kisekizzz/GraphGallery
fd4a1f474c244f774397460ae95935638ef48f5b
[ "MIT" ]
null
null
null
graphgallery/functional/edge_level/__init__.py
kisekizzz/GraphGallery
fd4a1f474c244f774397460ae95935638ef48f5b
[ "MIT" ]
null
null
null
graphgallery/functional/edge_level/__init__.py
kisekizzz/GraphGallery
fd4a1f474c244f774397460ae95935638ef48f5b
[ "MIT" ]
null
null
null
from .edge_transform import * from .shape import * from .to_adj import *
24
29
0.763889
11
72
4.818182
0.636364
0.377358
0
0
0
0
0
0
0
0
0
0
0.152778
72
3
30
24
0.868852
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
550ebef428227256067d5240798aa8ea759e5f68
96
py
Python
venv/lib/python3.8/site-packages/pip/_vendor/urllib3/contrib/appengine.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/pip/_vendor/urllib3/contrib/appengine.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/pip/_vendor/urllib3/contrib/appengine.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/96/6f/3a/5e368e23b6a36e89ec37424b0347249204855a183158a2d6fd89ade2c2
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.46875
0
96
1
96
96
0.427083
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
9b337ab25bcd76bbceacde554050848f7d24a4db
23,445
py
Python
Website/apps/Components/header.py
ForensX/genomevisualizer
fac4cd70d991c8d5ad0712890cb67718afbf7a9c
[ "MIT" ]
null
null
null
Website/apps/Components/header.py
ForensX/genomevisualizer
fac4cd70d991c8d5ad0712890cb67718afbf7a9c
[ "MIT" ]
null
null
null
Website/apps/Components/header.py
ForensX/genomevisualizer
fac4cd70d991c8d5ad0712890cb67718afbf7a9c
[ "MIT" ]
null
null
null
import dash_html_components as html import dash_core_components as dcc import dash_html_components as html import dash_core_components as dcc from apps.Components import colorscale ##LOGO FOR THE APP HALogoLink = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADgCAMAAADCMfHtAAAAbFBMVEX///8AgsgAfMYAgMcAesYAf8cAd8QAesWSveCdxOOaweIAdcPP4vLc6/bt9fvx+PzC2u48k8+tzej4/P5Il9GlyOa20+shictho9ZrqNhYntSOuuAYhcmAs93h7vcxj81xq9nI3vAAcMGEtd05A4VxAAAG/UlEQVR4nO2d25KqOhBAJwlJVO6KiPECbv//Hw/BmX1GJMhNkrB7Vc0bZbGmIQmdpvn6AgAAAAAAAAAAAICJucWJr/scPomfMEJZqPs0PkZ4ZBQhhFe6T+RTnCq/5RqmFCO0YEMvZgQt2HBXuH/9Fmm4dyhCCzb0cwehJRtefl+gCzT0EEZ1FmX4GsBlGYbn1wAuyzBrFFySIW64RBdm2CgIhjYBhvYDhvYDhvYDhvYDhvYDhvYDhvYDhvYDhvYDhvYDhvYDhvYDhvYDhvYDhvajMtzoPrHJAEP7AUP7AUP7AUP7+XcNV96mEEmSXbZX3ac4EoUhwhxTSgil2D2frJZUGf6CYPcY6D7P4XQwLKFsrftEB6M0pJizEkwf5Zk8eRwf+F4UpduSNIo834LYqkYaLvZeEAZ+ekFO5UjzU5ZT5jLGOa7gDmOug+LjKfUMfhGs0ZCy+6/gRI9XFUgZzoZy23I0wpixQ7bxdvo0Wmgy5Ent4tuz9ttUilLMeHzy9Fi00WDITi9HXVFzsXRdswym2Bp2xb4asn3DYeG5kyKSI5SbGCX5YojvjccFvKPhI5QimtlDTd2Q5IoD0/f34u9IOvRkyExSN2TKd5wFbXRRB9IVRow7NUOaKY/0ewWx+jEWG3Cx1gxZy7897zrY/AokO6TzuTTzbEhIy6H7bmvYmiPPb7PJNPJ81vTYcmj/y/Q7jrHW/gXPhq2P9rshMawc3ULjBFkzbL1rui1smsC8aRkxD8+GvHXsOww2RMSJdSUK5omhdHQ1JbfmuA+/4bGWVU5tLC1aDh04lv76da5jcnx+qCWo5dBB8+ETxN3OJvaX2mO7eln69RWPuQ2/Fel8Zj/UDFum/NEXqcSZUe2beuqFKQf1ns8WxhrSRHFgNEUITTBEvLnOJOTyQEoxdmQalctUInNk6t86Q9Q43u2q9QwWl0168/0gLAmu/i3d3MWB/00bW2KIGjL4wUGGir8m4Sr89B4zpuhdYKIhYqKW202rtDeN237IW8VlLC0xLBcfq1+PO15SDTL0/C6nHaYZ5+2SphiWd5wjtn55s11vp8OjPxY9d3nG26WCtS19zDGUcSyHyvLv+3x53HVXIlwfmPKWNMnwWZf1qlX0EtW1aqYhoUz0fXxVbQIYZIjlNv5jS4kW/TNJJ8XNaI4hvacXkR8O8XEzKHG9Nt5wbD0NGM4IGA4EDGcEDAdiuGE5zf8Zuc9grmEpx51DsR27H2amIaHcRWJzm2InzDhDufxk+T2dbH/IKEOCOSPH9bRVaar8uAZDxpNTNPWeUKjoGarFcPKSF39bHJgyWaPBcEqC6JTg9oSbxYb+/ogYf5sztdMwSO+x0zHvbZ3hzltnZ/d96Ow0DNJLjHtsWVhluPM2GWW8n5w1htf0krO+obPFMPQ2ArlDQmeDoQyd4wwNneGGu9tK9NkFtctQrsFc5/2+oJWGQbRKaIdNTzsN/X25fHZwt40p2wzl8nn4bGC44a5cPp87LJ/tNPx58vmgnUZD7+Oh02qYxp+764wwjJq+o7MowxHV6ZYYOu/PynLD0ZXN/6ihcjG0CEP5yrMQ5uS8pzQkFDvssXGlquu32LAqLRKrn40rg3ZmJjCUck5+edq4Wo4hwdwl2evG1SIMq9DF97Rx48p6QyKHlONevRdutWEZOvx2z9FSQ1nG4HYrY7DQ8DEbbG4d98LtMqwas9Rmg3fYYyhng3PDbPAOKwyrCpT4PqyMwQZDTIqW2eAdFhiq3nHqiPmGRPUaYkfMN1xQ9SUYDgQMZwQMBwKGMwKGA7HAcOT3Lcw3HLtqUzXRMMcQ8VGNg1eqPpIGGSJ83gx88SJYEeWvmmRYtXYc0PYwFW5LRYBRhkj26MRFH8lddMTtTRVMM5SpDAdn3RrPX7cZdt7ViJlnWEUSs/ObJKm/Lc6dSuDMNESPl76cuFhHfm30Cf1oXcSO27VexVjDb03Zph2f80RkJSLJz5gx3qsYx2zDH1H5xQQJGVC6aIXhKJZvqKEjXY823ROgo+deMWsQR+aZhyEm6TTXDdbWpvhz7NU9j6aFuLo+dHI9zHIzUqqxRftqhiLTlyZw83KN+Wcd9bSffWJLPzioEnY04IsXu4s77esx//s5ByM+kvD1FWQfccRYX7fyF3wxtSPBin6o2vCFulvAED98Mu8TSdeiLZPUy88ha/P8JOGKvunR2UWPurH2CaKFSLBREyR16EXrRy06EOzjoVcr5Twz4PsyHbiu484Zph8IZjxLzbz7GgnTY/cXZWUrn/yk+asyQ7jKhjO8+cN5P24yCyfLag1Ymw0k9PZFgmTqUPY1lVm2R8YNYy4TjHm2mq7FlE52/m27PhWZSOKSRGTFfbONbPiOJQAAAAAAAAAAALAY/gONemWU+x5jzQAAAABJRU5ErkJggg==' JCIBLogoLink = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAANwAAADlCAMAAAAP8WnWAAABKVBMVEX8/v/v3xD///8yNGb9///x4Q0iJV0pLWitq8J5c0b46AXz9Pd9e5gpLGE2NWstMGf05Ano2BY+PFwaIGteWFXVxiNYVVklKWjezx06OmV0a0lsZlj3+PvMvifs7fImKV8AAE4hJmi4uMfW1uAZHFo9QHDh4el7dVHEtzIfJGr87AConDmDek3t7vKPj6rOz9uWiz+Dgp9waVAAAEtEQltoaY2hobcAAFLDxNEbHlq3rDZTU30VHWuoqL1jXVdNTXeLgkxxcJJTUFsMEFOflT5OS11GRnUAEmpDQVyzpzlraY3AsTQAAGuVlq6MgUA3N1sJD1oADGwiIlFMSU2onEJPTl5gWEoRE1AzMUxlYo0PGGsAEG2bkUF/d0M/QGyEgaVKTIGop8YsLFF8fcKQAAAgAElEQVR4nO29e1/iyrI/jJ0bEwgJIgY0BMlNQEkMCiZGxIgyo66oa5+ZPYdZjrN++/2/iKe6g8oddGbtvc7z2f2H48QQ+ttVXbeuqiQS/x3/Hf8dMwZN0xP//w9NZN6gER6vsxqdII1GZxvfSOOLNCI/EopCLiEUPwj/99828eWDRoquaZ7eQM/DUl4A0QnnFR6NnE5b01VF9XSkeO0GjZJBvxt29JRiJR34YCcMgrT190FHJyLR8Fnf4Ew36Vi6Z/f7MTj4gVKDXjeZiulKp8IeW2YNNtMLE1omcBIo9AVKFMqGKBoa7QyOe4JIlX3974KOpu0eK1BkCGX4hWXNRsxiSgKpN2W42nWTngVIw4cKXgZTMBMWW1YR8nZkgxUE+JSgo6hMaV4Gfi+HfxdwSOt3Oq7pY1xlts+2bliy8Ej94SDdJ7hFgd3xgCfTimI5nhX1PNQqBwgoqSI1GQ5cs6yjtCECXLUP95vK30Sq0DBBEAOOHch922vZGpsm2LyMhxwqHQ4ovwzkTKIE7SlYlCCH7SdUVugDuBSRJLTLurAOZRfhz7F/H3C0konSHRTLS0eIVN/A8gBphx3UPuyjNEC0TSAV3EsEC50wWRt5PmUOtQBNp1nWQW6Z1RB+3hMltH49WyL0nvWiW0AZLDsAUG+A0mW87MjxW4rLUl2U1rD01F5nixyDbaMO0GdIHkwr1kJ9CsBhERQIvvYOcItnj/TBe7gB6V2BCACkGWVL5YQAGFChDF31KZGAw7Jl5H4bkwiEh9gYggsEikskRACnWDSt9Ms371hlpIepBZ9CyUObLN0bH40aOn4srWaEAbAbRoo6fpdW2RdwI4NO3AgxOD9WZ7TKgcRJpDgA54QI9l6/8aYJxFpG6ZatReDa5V4bNo8TNd7GFcTcoFGbZTtAFgBHJ7pshNTyDHA0SpnUEJwXKwzHGAWnDPpv0+FIt1XYvDessEg7Ijuwd2CH3PS6bzURwGCi0Y3gOygtCHj1DZj/LHBggqSB/8ieg6XAF5I/hFdwadDqWuMt9hfs2EOQxHYvCd++4LZ0gNydtiKGQOE3oaO9QT9wM7DPUIuAc8tlZxY41DGw3isTaQk/EygyYkU4BBd0012DvVn961HnMOz/SNwcesqDtwhcOACNs5M2vFT3SX0DOlrppA2swwk4KkBoIMwEB+LUMEBMCmmgLf6JvEN2DJzhoVRXKJvqirsO6T0XpbutnoYUYzG4NOw4TQA7QXkKEm/Y1NiWD1/A/YkUEOseBgcKawxcEqxJB0yQgFZFSggSicjTndYYW8LeLVOsu9raIv3BhRXC+4hWasm5HwKJEKbJJgDWRU4vetu2Q43MM7g+Uii8rQBcOZpgS2yQaCzVV0CswE+i+mEpYnC+57RUOuWCQdNf6dvpRICVKu01sPYBcPN8QUuJwSXIbkadnUVEnvwSPEdY8BfKmZRwg8GBlTmpCsA1EClOpUG5UYT5aOSCY0BjyunOU/pHmhiXK305smN1EuuCmoYaM9mZVo8HTnqAiJsC3+d53e7KjEmnOul0u/MKLhEA6VSVBStzBji6T4FPA4iI7EaK1sVgVIMSVQ+zJfxlNXCw4eyOOrTp6JShJc2bWZ+jE5xg+AHYB4CdtmzvRvdXZky4P/BZ9pktuwjBBiy7ztcBLNMUOGJfeVjRgScAq9r3QcIMtb+XTqQSP1YER9MD02LVG8yYKkyi58em94xVCPte5CDakmEpUqbahQmuKrKwdeX1y9SrKtBgwtRgACL3FdyL3YMCClQSKDpMOeT2fKIbQuyiemIQDjIrsiXyHjS9q/cxo/VhLyfbSWO2UQp36tiOQTboAxR6QcLq2W/ZdUp6yJYwURo2HUX52A5+AYec4S90AvzRFHELwMyiW5oaCKylfzXh+z1YGLQi5WglAHqHEZgdVs7BVgSKHmabl0AtIivBbk3Cyre6sKbsW+xo+C4BwA2w4Qi6GZgUmyDPew4Wt4fphIf3Faw8cBuoAD8/BbewIeqmcQQGVEGC7LnMcnCgIz2k9/sq2EbYHICHZdJzPga2gmqJFixDz0KNQ1h1tdx+izoAOwjA9QUsA4F0WDX7LSd2eZCaxk+ERQsCk3IxfPBNCWeA9DYFJwxc+FMrBMopYFiv4tChoAuftcFk6IDsQ5qYSPbmWZd0oxyCBQHo2nB/Fyu7NPUm0qmUYdFyvKeRTgwPwe9m0kmv3RLAkKBR9FWAYTYQcfiM2MgCD8INQN/Ho+tSvSCpLpfUICrxGnVAn+IN5VBqgwrn+TPIMk2Ekhn4xkEHRZjQ+uFbnEZgaKNhsWbsryDHLA+DRqzPCn6EORNMLcAsmB7QMvRjyYYctmz4bHkYYmIp20JzlfHofMOugp0/Cw2AyTWwhvWdMDVzvjQWcDpSMnJGR5ac0vs4BBCs5O7TcZiVTvT7KHpZD6S6cXAIaMJmiGwJu7ql3bAAphUFvZCwBbIM09U8rX1D+WVB8NPqau4krX5t4/UXkQfOcbLHAWd0euYsixt4dyfENoylpc0/7SCisVMFW3aR//f8WUX3HEvBJptmgXJ8DcDiCGsmYwbpJFlROkqR7aelg4wYtGNzwcp4sZhBih4FA31VRxn8C5gbart01w0zXVtrgDJw+uVpXqPp9I6diG0vlEpyfsbFMpNWqOUiBTVCoAU1SLuUq4v26OSwPZ1qKK+uPf18taEO1R5Kv6wGvp5Y3dXphoAmMQhb4lcc3I2NRjU41iaWh1ZaBrlG07CsZr/tpCn4x1ZQlFnm8YN778e8J/6w/ckn4/jWjP3zcpFu7Iwu3+pnICDTYfuEmSBk256b6UceYQ6UAId0TBjRiTQYQkSDR36rYyFLsFPAJe0UiJRoiVeMNCN8gs0lCiLbfXMUnLYOg3fEpGiktkzM9hb8CG4U4Ogu5cVqtHPYGXP9wWN8Clo2CB8P74pESydURnGgZ/GM6bRFji+6geu943SGtjtvB4el3xMxCYgwi9rk7MUBoGGrS33NjJspltexOQOUL+Eq4H3PDUwzAFMTWRljitdGh0IisGS7vCuQ+o5PAe8dphUQ0J2+2R9EenyIBBsQaYfdtuZMmMSYmj0cNabxgZlqP/hBOooGfsqKUop77CaIqH8diZHfX/6USNAzxpyrP3ErMKLfQZZNa8fpyA6fdkwQxiT8RidCyprWJGCagLuutgMMzTdsC+O1WE+5kTuJtjFQU6NDoZXU9KATMy7OvgqW7Kq3Tn2X4lCinko/tFHaxOyY0m8AHkiYvqYgpctOBX+Q/hBaWsj5LTUyWDAR8DIi3XDArAkyVsiWMyNDDOBPmckBVvJAnLqaSViVqWugcNvs9FW9YU5fBJNQnviisu3lbND1bkYhwWzk3Oz0kzoFCszR+8GEyYhjqF32a58yXerBtYYaCMwxWGEFeWo4NI2eB6snzIlLoAuC2IsbH+CVdqdupWTF6k1dBKN08otgGGpj8qLgWhYCRekcx0YO8KMe7ATYU+qZfWNSNdPpIOw4A6HMPkMj2IB/NdFDU98J04jKU9NgdRz0mZxIixx5TN6q4fOBqZHSp1cHGGLy6wUXKdEDeDra1+HhBqZeqycIthO1+tNhDXCHBr6Bxfozp/ZNFUtaRw7pqQXlFGt6GuDBuNOQyzoOi01CDnEIbCXIYl/xJm4VbKvbIp6F9vASCsEbyPdtrB2mxCu4I70XquF7lWN8bOANVNruB5PfCMw2mF75TMKZnjGGPH2rqDbEWZDbMyB7dHfiUpfSkN5VQT6m0yOmLFDv0JmptujUK9Vw0CDa0XDSRBtwOJmyMD7YFkoawuToeXR/8k6hbCa83tStsDHS7NStrGqxUw8ALz3yx+/rN4AUOgKhmxZHLQeUsGYgG7Lmy69KJFNgwnuyBchAtKWnhtKYvpbuIG36YmgpM261kR5O3+qgaPpWV7EmriTV4EZFGqeCScJ2tdnpL/MGtscGDlhiSUCnRgr6uw2rjV0eMBLVZN8/XsEtGyViwnO7D/6NpiY1/Cwv+fcaDp5UpFudoOcP2vobjVPsWults2d0gzDy1Buf/TsNEOpJ+yYwe72Bpr7LpMXmtqVFYYvqGVMS7D89Hox+y21r6krBljngEN2wLEXVnkCz/YJB/arHBHoqpVskZvGu/AvUCIOnr4c7D8cPgQt67sPPj92iuP795x+zJVJB+mln53jnsNxtRW8lHSYa+AaR5jmWannttCne1/mfHmufuLNf8JhsjXpywWlTdcfT3K8zXJyF0BJO1D32B4lnwZtoCevS2k+P7Dq3wf/0U5hCDWzL56GWqbS2Mj7w56L+g+l6ZoRoRbVgeZTEoHz187P6peCsyNNJAK3bHRh+0FFXkJng5dqGH2op1ChrSP2Ty3TBFvg7gksP+pz5g0ZhC1mdQeyDLsYGFiVbjjCVkfNgoYSqKiRV9+8HDucMNywdJ/bgGVp22XDVRfAAUdAb3gJOWIrkClqa97cElyKKABuKKkmWViNf1OafnsAqkIyHmIZ2l/BoUA4AXOtvBi4NxheFNw9C+mEceQRZEe7czD4HwX+1RwKwqHWDlCCIHAUH1bvil/rfBdxaFp9Lx6HxIFJU4dnxBudTmHkOguFEO8kRX5UCF5dkXYNTp5ji5lvAMQzzV4NLYV2csDRF6b6mJSC1W56Z+IScnZEzDFo5ThJ/HYcZNAB38QZwTOGoMAvdrwTntnAMBMdM0yO53siiZiU+AeuNhsSQtUMi6khxMx79RnDS48cSM0Pp/0pwSoTj4Jh8ndFcI+R8nZFfgrSHkSMBfCZvR5GHrH6Ik6pM7k1secEV1g6m0f3SPYccM1QSnSgKqdGTAWQfT+eXoNE8AKR3vEw58EOcXQHUb5hcc/VZSdXKunQg/2Xg1ookGwClbFURy122lUiN7CeVmsovAYn6mumFNP/hKQg1nzi8ibeC47crzaw4Q3f8KnD3caoDnpzNWlTkmunXQ0RkTyUpoMiMiYsPFFJ+2GjteEFX9zzPwhfeAo4pVSq3B7nvfzU42vE8R2Oj9lcxEsI4/Qv/sHYm82VRK4zhOjhBgIuQ4jmHxvFO7yvW5Q7F7a48K36Lu89TuSo/JTCnwTErDHKjxI9w+b3Yb4CJ8mD0jo8NU21Z6McTTvaP0aH+RDidTjwRTkXWYQusyUELNqyqgTOn9RzgbYOSz5eCk7JZPBGpVJEft+X1tc3SJLpJcIxUyC8dpSw8U6o2H1/QMZ9EgeqAzcHqquWQbIEWljCdOLsWueG4NgDPIU7mUSkckHZNNMy/CFp0xyjb5nJwUl4+YuKv/nQny5eP35aBk/Kb33KLx+nv99cMv3Z99ceX7MvTmCPRTB92HZVtD2WCUgQHz+n1SIgPRZPHPChNrtCJQPQ9pBnxBqRVdmD2XEtdTjkme58D6c/wWzJ3vi4Wb+XNqXvGwTFZgVsSBpLXr9d45vJTpbL9ig0vX592guPAvBkmDahGEtS3EEsS1JrMSgTjmmxDZPcDs6F/HVYzKC0zDSJTp+S7JeD43coF3kP7FXHzQuQ+fDo9mfrEODj+sbIYGlc+L9TX9i4qIjdCN6D4hZhRkKK1Mu3ndECQITdPVBgz31SGL40yN3HGVi/JphvcS/IgrrDE4Gbo5NEhndS4fRAhWzDhe5G62uSu1pawpXQ2fRIyMsRKM89L1wCNEtfzo98OH8wo2PJKDWNDSOs12sawqgCli1P5akiLASPTTfa84NmGIR9fDo7JF0XxaC27LZOJFb+Iler25uRHJsA1F3ElJ+zxfKlZwQtQuR17krSBwY2cCyA7o7Nu+4nUWVr+dFoQ7LYnrAlx0lHLHIwVHq5AuTOYKLd1MZxuEe7f//h5Cbi92nxstbNCvbBPIp2UvD/O4JhyY7kYICxvMhbJdaLRzawcUaTvYHLRKdG2fGHMtl4KDpiSrPYLn3Hb+7mL7ORtk6rgoiaKM1lT5C75enU9ZgOxmC2MPYZvcuNpTcjs+452SGijzc6pR+0HL84hUjX2SXkLuLXN8TmKGxuynJ36xAQ4JntwRJ0Vp7HJV/l6drcyfGbtNtsc273SBDhaEVkXthMpiWBnZ1zSKDQiMEFTRjhUDKuCY0oTx6FH61ylms8vU+JrPC8x65O0E2u7TL16Lz//96i+f7YYXKNnJiLDwglB/cycEjqkCSGl0SB70qlxnl4CTrqWx6ZXBFbau/vjdvGeG1JvEpwoXvPMd/nlauW6UNleCC5Ba6r2FWyPhpuZWzEJqFS923dQ8jAYCwIuBXc5Dg60wYf1j3cr2JbT4Lj1fD3/5VUDivfS1jJwCLUPWziTNO0Y84rMUJJtoET6a2g5fd9OoZe0J+SIi8F9ngInynfT3u0K4OSzbH2PGtER8kGW48bB8fvcSIkjWIn64NBWvKCvI6s3F1ybwmpxkOHaOI0o0i1LVRWFRJUqewssFGlvEpz46fPe3TvAVT5I0l1t9IqcfaxMgjvncPX88zmG5e48edaN76sI7LB59UfIxT0HkMV5aVOz3LLv+yxrdoPWzUCQ7wqzI1oE3MmkIbV+dv/x9u1sWbnkC2djz5L34YZxcAyAEz0v2Wm3XTe8CQyxbdmZtunhOiyuMw/czYDYI+0bpJdbqYSla1qn07bTrUFXeDBGbdeJGeYnU2u406vSSs7qKDhRvq4X1sfNFq4AFugYOKnU/BfrHx/3jGI3GLRarpbwWFexsSag6cycGhZa6bqx5ckB87JUZyR5gE6o7se9+dtuUs+d3UqrRb9GwIlctX5LjT9I/o5VxSg46fJjq615lppKKfHhr9La6SDrIa40G8zJpgf/JqYpugHvyE4HroKGSZoNJ/rzYWMu5cAlGF/vfH3mOiwEJwrV+nVuQi+UC3sVahzc9WkLH4UPpR1CnjgIaPDh4qm7lTkRZ31YK4JsU6EVygu7UXyibous/8/q2lxsa/y4LuD2Z0ufReBErlSf8oHk8+wnkZpgy+o/WTGwdXL6oVgdyjNBbw1DC6hzPDvfHKu5OAjhPljgtKeRfdzngqRKhwZVmfbORoZ0PT6typTNvBQcd1t/zE1gE++zB/jB4wJFqn/nBKOPyxNuMseU2m4j2uppw+KSw9m6AN30hx5qNwNGpmUqKEl53o2ZSiT7xu60qTjyfScTBr58Ls2g8wJwtev6wZTvWrsuFMUpcAy/L7M3Viot3nS6YUPhVNBVudj7ftlak9isOMEfJaIBLgpDAbjgHtjSKVyTb+eOqvNpJ91Oei+15pvAVQ7qe5N0o7gzUNfUJDhp7a5najhjOaUEIS5dxin0gwoRKKClZxZJo7SIuRI1BkHDxkUycaVSpkNqKZAV9C7nK7opcFRlxr6bC45r8lN2ADBloRpfHBMo+c2cTdK1kZWJwFa28TRdu5MhjIm84xlqHCwX4uwlwHVA7RgXafxx2I5PWml72hJ++cZJtoTxbfqgZx44sbhWnX5AriR9EifBMQV8BBfXNwEHgrzEVQ8o7CCPiiMJg/60z4MCXFtMJ1qiRmorce0lCTZz3ShudeL8fj0X3PV0rKcyffc8cFwp3lvj2C7r50NqjoCTPufUoegg3VRoi5BgAJPWSDsZoMZ0/W8HixlaabVxdYVnYrqT4zlaVPHpHuzZ2vYcaKAK7qaYCkypVcFV7viLqXBK7bx+/UzNUcrl/ZDUqnhl508SOMDTTpggHzSX0A655YmWIEBjF6uNdAt1Inx+1R5mjGFwKRBHifbHs/lKfNJCGXLViuDW16YEpVg550svgMf23J7RUnEtiJPqD2tCwWsBZwBFmi7r8JcUlR4vVGp0u6SWL6MgLcRnJ91+shGDwxmbSA+OD+Yr8VlcKR6tdIQF4Gr50pTyFq/rJXEkIjPy1XyVlUkOqNUdRi1TbR8Xf6UdpMk4zcQ5HD1/pBNp30Id2U0OAEcfKabpeIO4EQstW6AIqNoCl4fJXk0RDgyOFcFRd/WLCX9V3MrXqyO7cEKJ5zdlfCI6BEd78k2bTSPlyUJKfyCCKLVHHR/AmgSW1JGNM6J0pAVRhOK8Bpruq8j2qdxUQGRkLfendhxHzcA2G9ynce0tytwumKaf5RHAk/7cdaUsOqhhDvW2hSLXVJUb4M+Bp7YChIL+a5gE18TQCZJYS0q2BprXGuZ2ANgErklfAG6aKcXKdmGWYJ15PlfIvwoTkZOPDgp16XZz3FadCDOAMc2CsvKexQJq6SFWC7jGExxWGqzkFzsFtmAbDUvL8H+VMKWSnmK04pIy2oXgpPwk3SrU3vTZ3Fxw9S/PbkElt/69KtX50mZtgk+nwZEeOCiloqHQ8zJxX40UqdkCz+c5HI30Bwd1XqIShFxYxyVwJX4H2yplKjcz92IN89WEipK5u3lm6Cxw0mUNCFapcdTZZZ6p17P44GOSySfAgeNfjkihsY+nSVt9JaEOdxGWICnUfm66RRIZwsOQcCT+WwQ+QagRZcLaO7ich10AblxFcbXd7FzRMwOcdJsTa/fNz9UCAKtnT5qVKWgzweHuZw3BJJIDOzB0YkhEfD5nOM5LpT9Mvk+FRfirC2xMen8RGwWWxHdaQQrp/lxwUnXU3hW57dIsd2AuONAElc2TLCNJTLZ0sMGd1uQZo3Y2TTlQUGkq2SNVl0AJFGHLGKVwZNYEpn0J8SHvUNStQw/Zh7jsp8vi/n435MQySFsAVTHngmNGj2oqm6VZ0YVF4HZPv0skM1jKl6qleSM/tocBnNhVkLfj2SK2wBQRIAa+i88RQaU5O1b6K/sMjlYiUPAZ1/Jt+9B1/B/AyXQqjhV9VaOvDrLLc8CBHTuy2+7WFh8FTYfT977tStnLJh5bC0bzblT6AriyiywqrQy7MGlgYZX7bMIr980AuX3Q293GiAjBQcvAxGXRbN/CvTTiY+OGkEaBqVrU6Wxw0u2rqJT3Z8vI+eCkfG5Dkq5qy5PtK/cj0hrA+TpqsUrEDhtXJFCy5wmtw5uUdxj1cVH9+DEW0llsw4CBYiZeQ0i4yYZlhMg+ngPu4AWcuL7A9pxDuc3NrHQy5aTOGvL+K+ngIwPU6XkN9iWOh8K+Kho2OGZh2cDKeqKwkw7IeTlSLNT2n9OnQD/2E9pOR8l8mCkDpY0X2VabZZMsBMd/P1pjRpZn0RA3Xp/OX1YsnGIYlp95j1bYdiLCXR3AA5jVJQbpcaUWqEB9h9TKk6vWToRcQ3dy+RlzZ14t99rj8iyciWyG0hXQeuH56uvgtl7TULJ+RzG7tPOa0IWw6I8tkEQw85znJUmWNLtBDtEUYE/39ASwqnc1g+ukl3hlbZUMowlwBfJE5qgiLh1c8XXP8f/8gUJKVXFxAdHZuG9YX3nhtSUp+MjOIKtHqeBWPLhKECRwW5ZwmjGZwnNEtbK9IKo5B9xafPjAFHY3l47mK8/zt38m2ocdlGZTXjpFg7EIHoE5r/XQDHBOz2r1qQA3eDKCTk8LBUNTuOokY0ql4al1ZX/+Kcl8cM8Y+eX5X6+WAZPHpZ++p3/V2r0yqDD361dNnxUVmjPolBn4utNzwXbxzDKbdgUqk3L+NSExpWyswUVqb4FZshTc2wazEdG4JZorDox02QNx3rEp11i5QRn2gfwWQu3jjkYlUu2+lhaooxD92BqbmVQtxtiK+RVn/AvASXt/gi9K+U54aDop33N6adQQ2Jk96OaB00gbK7fc6pJeFWmhUjLaqcy43zPccJW5Eb+JwWBwP5fBzxT+ZensP0XfSekKUrioi/ssunPatM3hS8UlrbsCv0WEUSjkste5RrQ1AoMpEe0r5uYG/CaGtHYmUtwKGmPB4D/8UP48uJZxDi/4bRkBm5VI4xZ1AZ5Gh/UhjdSnOI4GtmWe3/C8h5Fdx+S/cZxcu18QZR+fV/6KE2WQPcuMtAWDyZYd7b5+IJO+8WD/41Al4Hpb72A8cE+bhuLhgC4GV3/8f97Hg9F0yetm8/vJElv5FdtJTeSuHkGybs4MQaw0pL0H58cegCMmVkLR8OZR6Le0UI1pp/p93Mem5XvgLJVzpfqW1/7zaix7ThpLzl00GGa/hiMrfJXiuOKME+UVH7NZ8+y9+jnHgj8N/hi2Suz+W8s6SdJ2xmiDs4Dt6U65lr/mUgMtM5VmuNLg85sVkbsDZSjlzyo47+Ndj2FKFdd1/lf6zgkqsro4p1cPfPFtRas0fo+F8hS5xy3LKQ96Ha0s/tPQrIwSLsspnY3ttshxYixUpbXtilg7X8WgmX7OXeCYStjcFjMpyxTBIdVYc/BEWtKvQrH4aDXsu5o6sJFn+iFr2caAKoseCkGp/+vtU2KYc1mUN5+VISNdAu2+vGfjMettFHhKYFCmRgXeP7TWcbqRbqGUEy23UGhFjbswRL5oZKhWAjXcMhhh9lfKVZFuWMiazspeNqTsVgW224jg4a8pkfv09o0HXGkhrUsrkciyXTXlk0PIQcvu+0aaVC0toiAo627oJj1VCVpWe2BjaZvChU74xSaKiWOYwYxaiIWDL6zLYm7cPuPzn0AnXL8VHXAlbm3YRkjt4KZ9noYPIZ+OzSjgcGKC5XXc+RREHmuG/bLPZqhDfF6CfVeno5CGWSgd4lq66P5N7MTw1aLIrU/qQrBKQXjOPDdf9LD/xSepFoUjXHhGDdJpH3cjYsOOG3DsP8q9BZ0BkfZVQynL0drpYYyWTrlUB7dDjVqJRFpHem3BocH0dNbOT4Elp4O0DH8AYmV20H3uw/K/67h/tl4k6CybsuIsGaQ9FIN0W9M9w11QkguUwW1uRjse0Mi5yYSddD9FMmnRn2+wn6TCpizW7mbGVqQTcEHv37Lx+INMCiWPVdSR9YaXFt3nGkc6oRNZAb7Mwj6qNN2a6sgOdormt1TkGFwEFJyuhZg7m5N7TqSu55hbfP6CE4XPbyjuugKlbeO+A8ked9MZqZum4y5l4ZJO/TTxB+JKl9drVhAqKNXP/AhgDz6sKC+JyF8kFKXsBrDm3UpuLj0qavsAABfSSURBVH5ewUiCbBwYsMJJbrxlIXkXhL6kHTqttI3AAunYwq0V6WFHLY0DSwW13MDjLJQ6nZ0dND11UNaVqdScsWyZtTvQeBuLsndGH3hbUZFqJsMuLlHJuMpL5QPOJVJp2nsIF1EOpeL2pLSitbhBxyJKL2XjfCKkBSlZDUEMBytV0uHjT1G+m5SHDLgVI6TiP8PGu5gVV5seoAhAdIR6Szfxi4laJFMZh4NULcz0cQNWrWzO7otFsIG5pg3z9FDKc03T1SzN7Cq4wb+oqhXF64PIOVoF23WZ4+6ngy77j5JUGJG3oPE4kVvNI/yfCCUCL5WhcZUVQh0DlFTKaQeZUBs2JVCDh+QcdHQqMF9rcvGaWJ2AfQpxSXWK0pDTTyT6OvJ6S5UBjrSKtbMpQS9d/1biC5+qo8lO2e2aKD8u9/GY/FcHWbWUAnQb4HA/cs2W3e27uMfV81mckp7nk9MWOxjN4MC9QwJD6+MXp4SwizUQVp00UmvLFppMuHY3MWHpOlsoHq0VxMsxtgaNBzc3l248ae9bCrk2ogMHpUiDVd0NjcF4W2QQODtzHFfgPV97lkI4S1+002kHt9x1A2ycgH3QgE395/niifAlClhtKoWR3yjc1Zr1rT8m1R5/LYMRs2zj8VsBSuHe5zcwRdLxA/V18YeZHmmEghrh17mt7JGa3gnVOJFNT3OR5Q7wi6zojvncmR+vnXuxaBIM/wiS5Gr64IB/OLiS7/j70+l5V9eJPlyM7spGHfwyIxsfGOMKTmRrnui1uefehyD0hq+bmj1wkr1P9mTIRSpKd1Oon4K9iG0B1MLN1K02GKALNp20tlWbaXCtSTmR4vbztdyMD2WxJbPQgwUtp6EAhzw6Lt5wtgmmUxe0lN6I2AHuaoZS4U5rYcMX3HcD1//TDtiT+k0KJz4kSItpWjEtHOUD43tBJrdUugK6TScbZbNrPEtR4uYWNysvgsnug1JctPGkk5yFGth0J1kkoIrx63JwQVwKNdoRKALPFJPLOlwjq5d8zUon5wxIxz2QVTEFZib+fGae28NIJ+ADUNMZw8zJtcRvcyQ6XZllcRH5Ki8IHUl3GWwqeQrSu5g4Fsn5wtuHKGPstRwu7dMDK9DDzew6URTZNpE8yBN3QDmmYJ3Cf/TAEAu/zDkfXzuvzA5uMfn1l4TTOeX0fKnIwbrMQ8ecgazWuOM2arQwJH/YbzVh4aGqaso7XGhZxsRyyoLAsqJJmSZ5AQp4S4FIFAuNWs1bv5Xq/GPmMSsxuGr7M3cOc3bOZ+/jU5Oz2XSHjQe26JwUM6bwryTq/N78H5fkSqIOa8R17R2j1+vh3r2ciF3QuQ3ckIbLKyykpHRWfMKHeQHeu6l+v9EaxCeRrX/W8/8yO7Up04MsPRgb8hyZJ538vs8ckfA7tz2XqfdrYm72eZF08qDbx4/8hhs3abm58XbIS8LCPm6xYLJUp9OhUUrXvJnBZ9raCcKA7XU1OmKtoAd2980A51WBIxH5cXJDeMRL2YtaZboki+Gva1hbjZ3nxyP+/ap2sR4fLcQOIRP/cTxj7RJrkVlihT/g+j3Yy/8bl+qn/DaycfYrGoRgKbYE19dxbbXx1e/NTuDWjx3c9+bmwSy7YMgcRrjjAWrjV7Y5uEoZn4zcS0SyTYGTsvu5qXS2fZx+0YxP6qUqx8WHpHFeBFMlf2yOH6KQGOCsmC3/z9NPoDqz/7Ax5ZAHQkAJBdBQlIssk3JosxWJvUHHsZL+rNMsOnUckdYwVhjiKmz70I1EheRv00ofdp8DSCmy5oXJ75YKVyBKJn2AdVmuyfJ6PNX65RdyRvolFpbS5xxXq3G1iVxhqfBFFiuPU/tWusUJZUwBS3Ks5Th8pGr2lVS54xldMFI0A/doxn1thJn2F3J9axhiIDZKp5dhHapFXpPodpFzPFA1Nl72yXW9xgs+JemOuI39jVwMjjnfHo4zouekzx+p3d11edI5lLJN4gZOocOWKpM/9lAbvJQuTt7Cr4qy2BAkHNZYOoaGO7fMfvssSplmKhVaOLREXCWPEjPxm7iQdgyGgNCPZhknDAMaQL6a1gBHlb169rYq4XPf7MHLiC8Ubks8vzFdFwPmGzztYqbGY6rHjm30HNyKgYhK/4fw1aaHThpK6GAjGjMVAp2wffAAOmaY1C2HGDQ6G7+UEb9rRGv1RUqcISeHGmBGfBzAMc2NjfjIfvtlxBc2zjavZ4HDG48SueKskzHputItB4PIY4dxrxtfbMd59DjQE/QVfWd24y/8PkQcf/3R88WMiDcu0nrhc4uGYCAku5T4aUrD8fkiN0c9AbhCkZubflH5MBscbLwjkJozHomrmlmtHbhDiQhW05NCFEIwCEhad8IzhRnokPaAX1egt0DipPBrSMgLD56VBigDyuqw3FSVLX8Lym19dhQIwOUX5c0054DD1ndFzE06hOTskeJSDlt+zj5Hnh9Y5I00WqedDrqw71IDc1rTIc3vewg9v2EH7Lhur/362jGHbdGqKQrjO3147DbnAPLd4GDj3eGY7aTGAz2ItZRovAQMkG76w/ZfMG+9AeamfTijDgvpg+PwxTVKtB/6zmh4L8AxS3a8LYqU3wDRNrfggLDlAnDz2JI8+loU5am44L2Ik0iDkZc6g3966DbQME4HKqFvRLMisyiRFHEDSERYt+eONRyk1RR2DCh5ZNPxVYEThdu5TiaAW5uuOHgZYKrMBwcLh625sQMU/rIi4HecaaOvraRpjfW1RBwJ04OHYM5Li3DQhDPSnt4JdoLJIBmJ6dpl+SXHi5Hu4NsvFuSgHFUu67vze57I1bWr+eBACp9hw+CVLZgsJbLxy1bGZ22Fx4HWQIoHxpU236UD07Nt9h6MwKNnaotUn6oMAwixtm0uOO+Xzir79akK8pchbjKFojzXxyGrB9/wGrPl9+XZ7zxEyAt65YACaMpCl45Giu7Mba4LUkeMi475PHa5LxdFBkAoHElr07Vjw1G7xsgXhgn5PdAyz+EYKS+L894si2gnCm0nsdRbpRe0DabpllDBfaT4a/jaORrgZeVLtVqVn1E2GBPunuc35bMlgbT8uixyRJ8za5vc/JeM0+/uxT0ykFUU5SrP3NXE2tmyWCN/JJ8x/O5sxpRLfClXW8CVMblICPQANB5/XhEGb044eSM6zxA/lS5ACy1kyXhmt1zuROK/zMjzFXMnvHQmf1kaR2dwH4paM8vfVkTzLW9bfs+gUciKYFKVV4nvMxtyk2cKG1O0EzlYmrwsL2iI8LpEZOPdHolvyl57J7pUIICkW+lkRvpcKYIXxhxMYjuCj0snuU9L89nxIPELTlz1Tc4/NcBXoLjzlTILcREyQwJbE+C2cS7Rde5/VnkINqQ38Aus35SZ9250Hjs7+Dg1mGpOYODnqTxRA1E7Ix0kmqsmxYEpkHnrq+nfi67jL+nV8DIuTnFyfbO5Oza2mlsFRrr+bZmsfMZ2UKG4N7wh+6cGjd9+W1klEYFZq67hEAFfuq2+jluJB0nLFJZXWcTYLmuU/+/ChnW57S+ymMfgkZ+l9dPa8/h4Pry42kk/f12jjHnNd/4SdMhmxcpK6OIhFV4a+cgHb0pz5g9y1HzL5K9BlwDaLT1SG53jSw8A+U3ZiLgJptH+WcPqregI7VZPkWEK97EFvUJz2pFPSfvAkwvfOPnXoEu4Ps41X5UMfDNXweOP6uqEY9b2ZYr9d9ONoCMyc3mdizQsWMmf3OJx8nxhOUZ84EqVvf8ANhKT9il5kbuKTzv4g2G4sjkcz+HLPC8tNr2lwgUn/Nv02zQ6TRTko/nqiqmenDxWZkcuufXbk5NFrip/S3Hlee9W+HcMpD+VOWquSmAuTmvyPE9cBI33OH9d+M9gK7f+g9hwZC3Nirn9OazJnFHFRUOcH/sqbNeosr36K6r/kgHGikFVvsyxxQpLxlTX2WeyAUsK7Lw8rn8jOuRRglg5mPVmiaV93+eQDeezl7srvzX9rxy4KoOSN0s/UYA0TraTI44y0n91TGHFgRJtURDlD+8vQBqBJuW3gWzmv98qmTfomHhHe6uWZM0dUvagyAl+uDxf5t84UELLlMXa1fXMrbfq4NceixWRDd7zzvU3jpfj8dVuVl0f4G2XVq06m6Yaf3IB0NjOygqApt8biqXVjKvpKXpVgDSy0mWBy12crL1DtDB89vN9TSxT0eJw/xiwRErvpBfXEswFZ5SNXqYVedaKJETIG/gCV/t0WXgb+RiJz58XAdpqm40QjLa8aCA+GGXzneCo/Y2iXDP8TNh28OtMl2fF0U4ogOCUt6+zS8ziEWRSYe8CLNAy5VpLNxvGpaheu5XxjRp7v7ErvhdcEXzM2/MzqpKr9biBrZFc/IUkxC+GDjkWyFfcr2Z5foljAySTCtfNsiwKPmcvgUYIpliaHchGLVcpbt9VC1KBeze4LIO/fi1//X3zXjYMn7oBJlUWkhCXMLSDclnkKtTZY6mA2wvNzOODP6wVbu++CEA0Vmzhd8ktXDRasTy7RfmGId+fnV/ncWoZw+R/BtzLAmerB9tCrlYzuO4yEuK3fbqiAdJFlqnmXTWfZXheGh28xGTzJ3cbVAVoxhp93K9q7tMIwXTN7XJGrVYTmo+lbBwbhNX5eXAMruLAfqdUODk/u6/kDN8kJJw7IyzJdDsQWSCgXKncX21/uPt8PXTFrz/fbW0cURUMrOxTrY41lxXIi0AsLRqYhpGrrJ/d3RYwLkJ1pnBw9/OUY7J3G/vY7Jcwk/LZ0t7u/SnsQrFrLxCk8esi3a7hs2y5zGGML0PmuHK5zPpsEHmNOaWYQ5Go2X2xlzs9/bR7XSKvN4hnVP3MSLd/XEg/DU7arK2vfzzns5jP+SHC6uP2JxlImGnNF6Rkfildi9JBn8Ivky7jgf8tdoN05OnzBHAsEp2oJfpGrvwJMyLGRTZIFuRA9Vstd1C/zW3+PDhGqJSkL7+VCn/kimePVcKnMcDL5hHw6IMc2J1hHvXsieLXP3task1GJ6l581+5HDcM1ztA8l4uxx5tXcYEG0qh0gbV5KVqpUjJ2eqvACfd1b5UC9elbI3a3RB/zzNSoZrHJjIWhIXq3fansmEYXPAsZ2Y9i0ZjY7YsikWiZoPkMAwBCFYdylqJZ7JZ/I3SNuzTglStXR3ktkq/hHLMXe3bdp4v1IprfPY2K93JHz9ekMBJLPuypcfmOperGZV+uqPPexno8i9sOO2wb/RqNW599xJEIjEDGFBF1buze+HiGkQse7GV2+OrtfXsFfdd/gXgstVC4U6uYcp92AOSVU/Fx+uLb9cgsE5ucUIRFqSg7Q+2jzAJ2RnvXFxloIjFBLtqHsBDhyIRH+6dna/Jlc3dM/lbnj/5dletXQC4e6nEieLPg2OyH8VC/XOuma1QuY+/79XPaxuw4SihwGSLNa7ErGXXYHWJLsSCtPzjneBs9mj3WTcTTqze7Wf5x28f1ighzzObtcv6bu42L+SyAI7h9yvcr9hznyv3j015G9iykH+s1ndr5zzQ77zEn1TuuV1eOrjfxsf/RK/WH413g6td1/EzwCUsZK+3b28/VkAmXuYAXDkPYHIHUlFcr8jyYwnAMdlPxd2fB7cm3V79/jFXzeaKhXqd5+9yuHM4LC2/VTu/upf4g4pMZaXLc1ypKx38BDic3AB69GCz9vns2wmIDVHIfn4G1zzdy1eK+ycHuS/V3/FpdCH7cxbKWiyDJT5bWJMKVO700/l1Nn96BY8GViwAa+6flvgD+Uh+rJ//cScRcPb73Edk5wAcc5H7Vjy7K2zXTqqnW4+53SE4Pntfyx4AJaXC0Vk13uokUf7d4Ki76ov2xJKl9Hgm/Pa53vx2WZc2zwp7lfvqHnzfQW2/WeTP8bv1AFwlVN/iwieejRE1rGBwn2q3EvDHGYDLbUsX3IaM91yhsP3tgC/i9gJ4WWPu5flC9fv7/TmuIsdS+dk+AJlRkLLNU0qADbjBFeUcd1E/yJ2Xvu3dDcHJAvH/GspKAF+MkYwh4Jwb5qhWyu/dEnCn2zzIRA7AidQ37lECE+nFkGeypcut+0qFo94HLvUQJ/vIteLm95PYYiWvZAb7Z39jK1/4VgHd/uVb4RE24cbVC7j1YoVYnj+WOQ9Dc/9Hl+th/2ydUO5IpD7+dlePwWGZWNldY6l/fi48iyxwEgvX3y+KNTl+adq7wCXo9Ms5Njhn5fXmZRV2HiEhAOWl/IctULF73w4uAdxtZV2OweX2JfD/Noj/R4IUs8yW2O/0okHGePbPpHOy54645h6wyhAck9282GPya4RziAdYfWwWucrry+D896lVmnaNsXwt8M6w08E82+Y4UMIUchd333b5tTPuBRxPHGzw/8rE/wvG/b/YeoytrFotR21fVgnb899jcDXQaYx0dnp9+9s2UCrLSEOHi8nfnm9QssyNnhv57xNfw7PT0WGaRi4nfxohIRFYpe+XkpQ/rdw9Uy52Anns/92drdeAOE9D/2/oUMf+2f028c/4uE/gCziiUba/lLL5lyALuOzghdRyOcM0x2bkd94dvcX1PCNPKtsKWH/lnpGrUZsHIEjrsZghJJTO2VFwWOLFOwTMlub9t9NcT+xHGgDrs9g/u9/dyxNJTJigBJJiCG7997ixBd5fGFgdvI+7LxTg6mXCjqPY5V+DLa4+HgVHTHrVi1qm8ZDL3ZPd8eyP8HF1VQxOOtm8GnbwHXq4B81PlQeg4WnlqHkZ+2fkc3msn38DWENwj7fZ1+ARiMRmMZd7MJ7CyIvf7TUKrjzvlWUrDuQYo+BixORlPzeYtU6Fi+/XWJA+e1wxOCZf4S5ycl7CkoBsTYYIua2r5glhRDz1OtZYe7/t8/xu7gXcGrkb/ghS6cMFe4pDGmHnVSiNgBNmFba8EZ2XmQD3ClCzWyYLUoE6O6gSOfMCrpS7qpdyF2trd0Ll6vOzhT+sdcTsBrvxAoz9tb3T83FwAAwHxTYEkEWs2YomnMRXcMJII4n3o3NkYQrcM8BEQ++EWJzn2Kst8If4+mO85zbBi8jfMo817nzjY3OtcE4UJaHtWgkcz/NvlS9NKncyDq7OrwEjHsk5rEbC5Ixg/gs44dfkKCK9L8wCRwA+k3CASVgpbh5sVcieyzZrlf01/qr2WOfPf3/Mc7XcenMPb83P1LdHCZTaY71+kauOgqt9uLugKqA8zJu50cNncEL/F+UCICsjzAH3gjCR0pPpjCjKIkf2XDWb3zzdzmJrkCls7+epo8vtGu5RLm3K4iZQrnZw25Q3+VFwONlXNNMLT1+G4MrdX3bwitSgvADcM0CUwttT3sX5y79tSPXNXPUKNysAYVKirkBGFEjXtfMNrsqfy5WP8nqB/5zbr2fXsW7jP+DUtwy92OiOweE2cr8IG674xOgWgSMAMThOxsldTPPj5n4xV9o/3eOxsZanqO2D2N/7WIVtCe78bulePqhfn1K76x+xbcOUyhyAW1I6QMCxofIrTyeRkvZXAlcZllJIexdU8ZwBdlyrl5rVPFW8kH/D9LnIZa8rFDYk69c5ubDWvF9vxiWcUnZDXgkcm/7FxRM0cv0VwHF39frQwZWyAFOqnt6fr39r5qkvoMizsP9kUfxDrFXvTsFkw0VYUmyjYl1fr3/gVgD3F6Rf0oo7v0vM8JaGKIjru3vPUTmCMb+7fr9VyIufsFXCSNe5zc/53dr++emexBQKrwcthdLn5jolcEvBfX2vqbxw6nR6SdkCrURB2TBqueLm+bP/BxNfA6z5Yu4P0BMFfvdjledvhfN8fnjCEp+vfL+gQLcZ5UF7ycRJG6JfiOpl7glryWNxRy09+SMQ/VqNvW8+lojzEEcpStdbF6fVbPFTdi3uKP7in20X5VrNzwQ/QAcsP1m13tZkdeWxymOfj3UHRg+UOrX54SRPLM/4rDGL992zMc3kr3eJ0u7JC05Tpr7g53H81BiaLUk3AMMsVzt68f+eQ1aYYAexf5YZuJqVeFtA6T8/YsvT6YTcAwAsXuzvEf8P/LPC3u4XqpYzHriwg4uf/2/hehnx4Y7qgfNggIcK/l91b/v+W65nPLXiY8f/o8BeBzHMcMzEB/8vZ1QCe1nSwP+x8ew8hP9/A/Y8aNKL5P88I/53/Hf8d8wf/x96SfggNAnjTwAAAABJRU5ErkJggg==' def Header(): return html.Div([ get_logo(), get_header(), ]) def get_logo(): logo = html.Div( className = 'jcib-logo', children=[ html.Img(src=JCIBLogoLink, height='80', width='80'), html.Img(src=HALogoLink, height='80', width='80'), ] ) return logo def get_header(): header=html.Div( className='row', style={ 'margin-left':'50px', 'margin-right':'0px', 'background-color':colorscale.navBG, 'height':100, 'margin-bottom':'25px', }, children=[ #html.H5('Tanzanian Water Wells: Analytics and Machine Learning-based Forecasting', style={'color':colorscale.navText, 'padding': '20px 0px', 'font-size':'35px'}), html.H5('Visualization Tool for Mapping Genomic Intersections', className='website-title', style={'color':colorscale.navText}), ] ) return header
478.469388
19,599
0.946726
860
23,445
25.795349
0.904651
0.001803
0.001262
0.002164
0.005319
0.005319
0.005319
0.005319
0.005319
0.005319
0
0.153447
0.013777
23,445
48
19,600
488.4375
0.805986
0.007592
0
0.166667
0
0.055556
0.962426
0.955591
0
1
0
0
0
1
0.083333
false
0
0.138889
0.027778
0.305556
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
9b5a1d80092971ab8ce6ecfd9fedd811417dfb3f
229
py
Python
tests/context.py
DerekRies/arkpy
b8305c8bbbe7c1772b262d7fdaa9d05e0a1728d0
[ "MIT" ]
19
2016-07-14T00:47:21.000Z
2022-03-30T15:22:59.000Z
tests/context.py
DerekRies/arkpy
b8305c8bbbe7c1772b262d7fdaa9d05e0a1728d0
[ "MIT" ]
23
2016-07-19T06:53:16.000Z
2021-03-25T21:44:57.000Z
tests/context.py
DerekRies/arkpy
b8305c8bbbe7c1772b262d7fdaa9d05e0a1728d0
[ "MIT" ]
5
2017-02-06T13:11:43.000Z
2022-03-28T21:04:25.000Z
import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from arkpy import arktypes from arkpy import ark from arkpy import binary from arkpy import utils from arkpy import entities
25.444444
82
0.786026
38
229
4.631579
0.447368
0.255682
0.426136
0
0
0
0
0
0
0
0
0.004951
0.117904
229
9
83
25.444444
0.866337
0
0
0
0
0
0.008696
0
0
0
0
0
0
1
0
true
0
0.875
0
0.875
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9b673e5a745751b869760660bc0533091fcfd720
2,131
py
Python
Baseline/model/model.py
ndkhanh360/CAER
93d25828ce2ea050fb379d85258ba3fdbf59d2a6
[ "MIT" ]
18
2020-06-01T18:09:47.000Z
2022-02-01T13:35:20.000Z
Baseline/model/model.py
ndkhanh360/CAER
93d25828ce2ea050fb379d85258ba3fdbf59d2a6
[ "MIT" ]
12
2020-06-25T09:01:06.000Z
2022-03-12T00:48:22.000Z
Baseline/model/model.py
ndkhanh360/CAER
93d25828ce2ea050fb379d85258ba3fdbf59d2a6
[ "MIT" ]
6
2020-10-30T07:35:30.000Z
2022-03-28T09:33:33.000Z
import torch.nn as nn import torch.nn.functional as F from base import BaseModel from torchvision import models class ResNet(BaseModel): def __init__(self, drop_out=False, num_classes=7, fine_tune=True): super().__init__() self.model = models.resnet152(pretrained=True) if not fine_tune: for param in self.model.parameters(): param.requires_grad = False num_features = self.model.fc.in_features self.fc = nn.Sequential( nn.Dropout(0.5), nn.Linear(num_features, num_classes) ) if drop_out else nn.Linear(num_features, num_classes) def forward(self, x): return self.model(x) class AlexNet(BaseModel): def __init__(self, drop_out=False, num_classes=7, fine_tune=True): super().__init__() self.model = models.alexnet(pretrained=True) if not fine_tune: for param in self.model.parameters(): param.requires_grad = False num_features = self.model.classifier[6].in_features self.model.classifier[6] = nn.Sequential( nn.Dropout(0.5), nn.Linear(num_features, num_classes) ) if drop_out else nn.Linear(num_features, num_classes) def forward(self, x): return self.model(x) class VGGNet(BaseModel): def __init__(self, drop_out=False, num_classes=7, fine_tune=True): super().__init__() self.model = models.vgg19(pretrained=True) if not fine_tune: for param in self.model.parameters(): param.requires_grad = False num_features = self.model.classifier[6].in_features self.model.classifier[6] = nn.Sequential( nn.Dropout(0.5), nn.Linear(num_features, num_classes) ) if drop_out else nn.Linear(num_features, num_classes) def forward(self, x): return self.model(x) class DumbNet(BaseModel): def __init__(self): super().__init__() self.fc = nn.Linear(224*224*3, 7) def forward(self, x): return self.fc(x.reshape(-1, 224*224*3))
32.287879
70
0.623182
282
2,131
4.468085
0.205674
0.1
0.052381
0.090476
0.815873
0.815873
0.796032
0.796032
0.796032
0.796032
0
0.021879
0.270765
2,131
66
71
32.287879
0.788932
0
0
0.692308
0
0
0
0
0
0
0
0
0
1
0.153846
false
0
0.076923
0.076923
0.384615
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
9b6ff43d5fa41122620e34299e6a1073924a4683
173
py
Python
unit/admin.py
alexhong121/ai_cupboard
50baa791c969b951de5b47d980e19c0df3c04e7f
[ "MIT" ]
null
null
null
unit/admin.py
alexhong121/ai_cupboard
50baa791c969b951de5b47d980e19c0df3c04e7f
[ "MIT" ]
null
null
null
unit/admin.py
alexhong121/ai_cupboard
50baa791c969b951de5b47d980e19c0df3c04e7f
[ "MIT" ]
null
null
null
from django.contrib import admin from unit.models import Unit,Product_category # Register your models here. admin.site.register(Unit) admin.site.register(Product_category)
24.714286
45
0.83237
25
173
5.68
0.52
0.211268
0.239437
0
0
0
0
0
0
0
0
0
0.092486
173
6
46
28.833333
0.904459
0.150289
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
9b773a6ba4817bb21699bc385cfbb3f527d23cf9
181
py
Python
popular/exceptions.py
ryannjohnson/popular-python
6acba4c9e93dbbe8e1f14ff2dc391aebb46705ab
[ "MIT" ]
null
null
null
popular/exceptions.py
ryannjohnson/popular-python
6acba4c9e93dbbe8e1f14ff2dc391aebb46705ab
[ "MIT" ]
null
null
null
popular/exceptions.py
ryannjohnson/popular-python
6acba4c9e93dbbe8e1f14ff2dc391aebb46705ab
[ "MIT" ]
null
null
null
class SocialError(Exception): """Raised when this package errs.""" pass class SocialProviderError(Exception): """Raised when a vendor is sending an error.""" pass
20.111111
51
0.685083
21
181
5.904762
0.761905
0.241935
0.306452
0
0
0
0
0
0
0
0
0
0.20442
181
8
52
22.625
0.861111
0.39779
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
6
9b988fa26638c6abd9d9b68669042c398c42ab01
28
py
Python
sorolla/__init__.py
bq/sorolla
f9fc2f35a673f2f11d370975be4e06c520341d88
[ "Apache-2.0" ]
16
2015-04-22T09:17:17.000Z
2015-12-05T17:17:22.000Z
sorolla/__init__.py
bq/sorolla
f9fc2f35a673f2f11d370975be4e06c520341d88
[ "Apache-2.0" ]
null
null
null
sorolla/__init__.py
bq/sorolla
f9fc2f35a673f2f11d370975be4e06c520341d88
[ "Apache-2.0" ]
null
null
null
from sorolla import Sorolla
14
27
0.857143
4
28
6
0.75
0
0
0
0
0
0
0
0
0
0
0
0.142857
28
1
28
28
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
9bbb86eaa780e8d455df77ee9e076b31f348099c
25
py
Python
olpy/pipelines/__init__.py
openlattice/olpy
4e89b6f3561bd8de09f98cabeac31f7f4ee10977
[ "Apache-2.0" ]
null
null
null
olpy/pipelines/__init__.py
openlattice/olpy
4e89b6f3561bd8de09f98cabeac31f7f4ee10977
[ "Apache-2.0" ]
null
null
null
olpy/pipelines/__init__.py
openlattice/olpy
4e89b6f3561bd8de09f98cabeac31f7f4ee10977
[ "Apache-2.0" ]
null
null
null
from . import integration
25
25
0.84
3
25
7
1
0
0
0
0
0
0
0
0
0
0
0
0.12
25
1
25
25
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fd1153aa603a9421ad759389110b8cf71e710414
195
py
Python
torchsat_imc/models/classification/__init__.py
Exdenta/torchsat
70ea3db758757104fb3ba618ddf7997f0f3a75b4
[ "MIT" ]
316
2019-08-14T11:56:13.000Z
2022-03-31T06:15:50.000Z
torchsat_imc/models/classification/__init__.py
Exdenta/torchsat
70ea3db758757104fb3ba618ddf7997f0f3a75b4
[ "MIT" ]
8
2019-10-07T20:16:08.000Z
2021-09-03T18:09:20.000Z
torchsat_imc/models/classification/__init__.py
Exdenta/torchsat
70ea3db758757104fb3ba618ddf7997f0f3a75b4
[ "MIT" ]
49
2019-08-14T11:55:22.000Z
2022-01-31T16:43:41.000Z
from .densenet import * from .inception import * from .mobilenet import * from .resnet import * from .vgg import * from .efficientnet import * from .senet import * from .resnest.resnest import *
21.666667
30
0.748718
25
195
5.84
0.4
0.479452
0
0
0
0
0
0
0
0
0
0
0.164103
195
8
31
24.375
0.895706
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fd2f99d953adb97d7b4aad0e4e03be478060e57e
100
py
Python
wordgen/__init__.py
snsinfu/web-wordgen
118b7e8ae59b9a314e52c88a0807dbb67cd69894
[ "MIT" ]
1
2020-09-08T21:50:14.000Z
2020-09-08T21:50:14.000Z
wordgen/__init__.py
snsinfu/web-wordgen
118b7e8ae59b9a314e52c88a0807dbb67cd69894
[ "MIT" ]
null
null
null
wordgen/__init__.py
snsinfu/web-wordgen
118b7e8ae59b9a314e52c88a0807dbb67cd69894
[ "MIT" ]
1
2020-09-08T21:50:15.000Z
2020-09-08T21:50:15.000Z
from .model import LoadedModel, StoredModel from .train import train from .generate import generate
25
43
0.83
13
100
6.384615
0.538462
0
0
0
0
0
0
0
0
0
0
0
0.13
100
3
44
33.333333
0.954023
0
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bd382c712f5db73bec61c5669a9dd418f96264f2
122
py
Python
clients/python/inspr/__init__.py
inspr/inspr
870bb13b4eb60653c8567dd8a70ccdfa69e1391b
[ "CC-BY-4.0", "MIT" ]
50
2021-04-13T11:34:18.000Z
2021-12-28T10:34:22.000Z
clients/python/inspr/__init__.py
inspr/inspr
870bb13b4eb60653c8567dd8a70ccdfa69e1391b
[ "CC-BY-4.0", "MIT" ]
147
2021-04-13T21:11:02.000Z
2022-02-04T15:45:38.000Z
clients/python/inspr/__init__.py
inspr/inspr
870bb13b4eb60653c8567dd8a70ccdfa69e1391b
[ "CC-BY-4.0", "MIT" ]
5
2021-04-14T03:45:23.000Z
2021-11-19T23:16:43.000Z
from .client import * from .rest import * from .models import * from .controller.controller_client import ControllerClient
30.5
58
0.811475
15
122
6.533333
0.466667
0.306122
0
0
0
0
0
0
0
0
0
0
0.122951
122
4
58
30.5
0.915888
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
95259d3aa0d475ff5b0e59479b6908248726b9e9
20
py
Python
fython/fml/__init__.py
nicolasessisbreton/fython
988f5a94cee8b16b0000501a22239195c73424a1
[ "Apache-2.0" ]
41
2016-01-21T05:14:45.000Z
2021-11-24T20:37:21.000Z
fython/fml/__init__.py
nicolasessisbreton/fython
988f5a94cee8b16b0000501a22239195c73424a1
[ "Apache-2.0" ]
5
2016-01-21T05:36:37.000Z
2016-08-22T19:26:51.000Z
fython/fml/__init__.py
nicolasessisbreton/fython
988f5a94cee8b16b0000501a22239195c73424a1
[ "Apache-2.0" ]
3
2016-01-23T04:03:44.000Z
2016-08-21T15:58:38.000Z
from .fml import fml
20
20
0.8
4
20
4
0.75
0
0
0
0
0
0
0
0
0
0
0
0.15
20
1
20
20
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
95290db920c0098a54654b60ab8bd7ac109e94bb
34
py
Python
2019/others/test2.py
rishidevc/stkovrflw
c33dffbce887f32f609a10dd717d594390ceac8b
[ "MIT" ]
null
null
null
2019/others/test2.py
rishidevc/stkovrflw
c33dffbce887f32f609a10dd717d594390ceac8b
[ "MIT" ]
5
2020-05-04T03:11:14.000Z
2021-06-10T20:20:38.000Z
2019/others/test2.py
rishidevc/stkovrflw
c33dffbce887f32f609a10dd717d594390ceac8b
[ "MIT" ]
1
2019-07-31T18:28:34.000Z
2019-07-31T18:28:34.000Z
from . import test print(test.a)
8.5
18
0.705882
6
34
4
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.176471
34
4
19
8.5
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
6
1f0c0c89840710be63d33fbdaefade7fd454b0b4
27
py
Python
src/dashboard/pages/home/__init__.py
ddlatumalea/disease_and_life
aa8c84fdd4a0b41bc0ee275538ac70a362eb26ba
[ "Apache-2.0" ]
null
null
null
src/dashboard/pages/home/__init__.py
ddlatumalea/disease_and_life
aa8c84fdd4a0b41bc0ee275538ac70a362eb26ba
[ "Apache-2.0" ]
null
null
null
src/dashboard/pages/home/__init__.py
ddlatumalea/disease_and_life
aa8c84fdd4a0b41bc0ee275538ac70a362eb26ba
[ "Apache-2.0" ]
null
null
null
from .home import HomePage
13.5
26
0.814815
4
27
5.5
1
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1f5ec7815a44018c5f069ad2302fab73fdbe7be4
292
py
Python
pytracer/sampler/__init__.py
zjiayao/pyTracer
c2b4ef299ecbdca1c519059488f7cd2438943ee4
[ "MIT" ]
9
2017-11-20T18:17:27.000Z
2022-01-27T23:00:31.000Z
pytracer/sampler/__init__.py
zjiayao/pyTracer
c2b4ef299ecbdca1c519059488f7cd2438943ee4
[ "MIT" ]
4
2021-06-08T19:03:51.000Z
2022-03-11T23:18:44.000Z
pytracer/sampler/__init__.py
zjiayao/pyTracer
c2b4ef299ecbdca1c519059488f7cd2438943ee4
[ "MIT" ]
1
2017-11-20T22:48:01.000Z
2017-11-20T22:48:01.000Z
""" __init__.py pytracer.sampler package Modelling samplers and samples. Created by Jiayao on Aug 9, 2017 Modified on Aug 13, 2017 """ from __future__ import absolute_import from pytracer.sampler.utility import * from pytracer.sampler.sample import * from pytracer.sampler.sampler import *
20.857143
38
0.797945
41
292
5.463415
0.585366
0.267857
0.241071
0.334821
0
0
0
0
0
0
0
0.043478
0.133562
292
14
39
20.857143
0.841897
0.441781
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1f69b490fd147303e90b0df859b7b6072be0d699
432
py
Python
great_expectations/datasource/generator/__init__.py
cicdw/great_expectations
0aecddf7da591df19389c8abadbb1700a51b8739
[ "Apache-2.0" ]
1
2020-04-07T22:15:13.000Z
2020-04-07T22:15:13.000Z
great_expectations/datasource/generator/__init__.py
cicdw/great_expectations
0aecddf7da591df19389c8abadbb1700a51b8739
[ "Apache-2.0" ]
1
2020-03-26T12:34:24.000Z
2020-03-26T12:34:24.000Z
great_expectations/datasource/generator/__init__.py
cicdw/great_expectations
0aecddf7da591df19389c8abadbb1700a51b8739
[ "Apache-2.0" ]
null
null
null
from .databricks_generator import DatabricksTableBatchKwargsGenerator from .glob_reader_generator import GlobReaderBatchKwargsGenerator from .subdir_reader_generator import SubdirReaderBatchKwargsGenerator from .query_generator import QueryBatchKwargsGenerator from .table_generator import TableBatchKwargsGenerator from .s3_generator import S3GlobReaderBatchKwargsGenerator from .manual_generator import ManualBatchKwargsGenerator
54
69
0.918981
37
432
10.486486
0.486486
0.270619
0.108247
0
0
0
0
0
0
0
0
0.004951
0.064815
432
7
70
61.714286
0.955446
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2f473914bb410c1820ff62a6a57934f643c3e6e9
5,615
py
Python
metaci/build/tests/test_views.py
sfdc-qbranch/MetaCI
78ac0d2bccd2db381998321ebd71029dd5d9ab39
[ "BSD-3-Clause" ]
48
2018-10-24T14:52:06.000Z
2022-03-25T21:14:50.000Z
metaci/build/tests/test_views.py
sfdc-qbranch/MetaCI
78ac0d2bccd2db381998321ebd71029dd5d9ab39
[ "BSD-3-Clause" ]
2,034
2018-10-31T20:59:16.000Z
2022-03-22T21:38:03.000Z
metaci/build/tests/test_views.py
sfdc-qbranch/MetaCI
78ac0d2bccd2db381998321ebd71029dd5d9ab39
[ "BSD-3-Clause" ]
27
2018-12-24T18:16:23.000Z
2021-12-15T17:57:27.000Z
import pytest from django.urls import reverse from guardian.shortcuts import assign_perm from metaci.fixtures.factories import RebuildFactory @pytest.mark.django_db class TestBuildViews: def test_build_list(self, client, superuser, data): client.force_login(superuser) url = reverse("home") response = client.get(url, {"repo": data["repo"].name}) assert response.status_code == 200 def test_build_detail__permission_denied(self, client, user, data): client.force_login(user) url = reverse("build_detail", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 403 def test_build_detail(self, client, superuser, data): client.force_login(superuser) url = reverse("build_detail", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 200 def test_build_detail__stacktrace_present(self, client, superuser, data): client.force_login(superuser) data["build"].status = "error" data["build"].traceback = "This is the stacktrace." data["build"].save() url = reverse("build_detail", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 200 assert response.templates[0].name == "build/detail.html" assert "Stacktrace" in str(response.content) def test_build_detail__build_error_no_stacktrace(self, client, user, data): assign_perm("plan.view_builds", user, data["planrepo"]) client.force_login(user) data["build"].status = "error" data["build"].traceback = "This is the stacktrace." data["build"].save() url = reverse("build_detail", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 200 assert response.templates[0].name == "build/detail.html" # non-superusers shouldn't see a stacktrace assert "Stacktrace" not in str(response.content) def test_build_detail_flows(self, client, superuser, data): client.force_login(superuser) url = reverse("build_detail_flows", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 200 def test_build_detail_tests(self, client, superuser, data): client.force_login(superuser) url = reverse("build_detail_tests", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 200 def test_build_detail_rebuilds(self, client, superuser, data): client.force_login(superuser) url = reverse("build_detail_rebuilds", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 200 def test_build_detail_org(self, client, superuser, data): client.force_login(superuser) url = reverse("build_detail_org", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 200 def test_build_detail_org__rebuild(self, client, superuser, data): rebuild = RebuildFactory(build=data["build"]) client.force_login(superuser) url = reverse( "build_detail_org", kwargs={"build_id": data["build"].id, "rebuild_id": rebuild.id}, ) response = client.get(url) assert response.status_code == 200 def test_build_detail_org__permission_denied(self, client, user, data): # This permission is checked for in build_detail_base() assign_perm("plan.view_builds", user, data["planrepo"]) client.force_login(user) url = reverse("build_detail_org", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 403 def test_build_detail_qa(self, client, superuser, data): client.force_login(superuser) url = reverse("build_detail_qa", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 200 def test_build_detail_qa__post(self, client, superuser, data): client.force_login(superuser) url = reverse("build_detail_qa", kwargs={"build_id": data["build"].id}) response = client.post(url) assert response.status_code == 200 def test_build_detail_qa__permission_denied(self, client, user, data): # This permission is checked for in build_detail_base() assign_perm("plan.view_builds", user, data["planrepo"]) client.force_login(user) url = reverse("build_detail_qa", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 403 def test_build_rebuild(self, client, superuser, data): client.force_login(superuser) url = reverse("build_rebuild", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 302 def test_build_rebuild__permission_denied(self, client, user, data): client.force_login(user) url = reverse("build_rebuild", kwargs={"build_id": data["build"].id}) response = client.get(url) assert response.status_code == 403 def test_build_search(self, client, superuser, data): client.force_login(superuser) url = reverse("build_search") response = client.get(url, {"q": data["build"]}) assert response.status_code == 200
37.433333
85
0.658771
695
5,615
5.100719
0.120863
0.093089
0.057546
0.115092
0.858392
0.839774
0.839774
0.839774
0.804795
0.792102
0
0.012048
0.216563
5,615
149
86
37.684564
0.793817
0.026536
0
0.638889
0
0
0.123764
0.003845
0
0
0
0
0.194444
1
0.157407
false
0
0.037037
0
0.203704
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2f5ba6e928db0a01ebc9f40bcc0fff48eabb41ea
14,094
py
Python
permafrost/tests.py
jared-hardy/django-permafrost
588c0783791ec10f683da0235162a90f6936110a
[ "MIT" ]
null
null
null
permafrost/tests.py
jared-hardy/django-permafrost
588c0783791ec10f683da0235162a90f6936110a
[ "MIT" ]
null
null
null
permafrost/tests.py
jared-hardy/django-permafrost
588c0783791ec10f683da0235162a90f6936110a
[ "MIT" ]
null
null
null
import json from unittest import skipIf from django.test import TestCase from django.contrib.sites.models import Site from django.contrib.auth import get_user_model from django.db.utils import IntegrityError from django.db import transaction from django.contrib.auth.models import Group, Permission try: from rest_framework.test import APIClient SKIP_DRF_TESTS = False except ImportError: SKIP_DRF_TESTS = True from permafrost.models import PermafrostRole class PermafrostRoleModelTest(TestCase): fixtures = ['unit_test'] def setUp(self): User = get_user_model() self.user = User.objects.create_user(username='john', email='jlennon@beatles.com', password='Passw0rd!') self.staffuser = User.objects.create_user(username='staffy', email='staffy@beatles.com', password='Passw0rd!') self.administrationuser = User.objects.create_user(username='adminy', email='adminy@beatles.com', password='Passw0rd!') self.site_1 = Site.objects.get(pk=1) self.site_2 = Site.objects.get(pk=2) self.perm_view_permafrostrole = Permission.objects.get_by_natural_key(*('view_permafrostrole', 'permafrost', 'permafrostrole')) self.perm_change_permafrostrole = Permission.objects.get_by_natural_key(*('change_permafrostrole', 'permafrost', 'permafrostrole')) self.perm_delete_permafrostrole = Permission.objects.get_by_natural_key(*('delete_permafrostrole', 'permafrost', 'permafrostrole')) self.perm_add_logentry = Permission.objects.get_by_natural_key(*('add_logentry', 'admin', 'logentry')) def test_role_rename_updates_group(self): ''' Make sure renaming the PermafrostRole properly renames the Django Group model. ''' role = PermafrostRole(name="Awesome Students", category="user") role.save() pk_check = role.group.pk self.assertEqual(role.group.name, "1_user_awesome-students") role.name = "OK Students" role.save() new_role_group = Group.objects.get(name=role.get_group_name()) self.assertEqual(role.group.name, "1_user_ok-students") self.assertEqual(role.group.pk, pk_check) # Make sure a new group was not generated # User Roles def test_create_user_role(self): ''' Test that creating a PermafrostRole creates a matching Group ''' role = PermafrostRole(name="Bobs Super Group", category="user") role.save() role.users_add(self.user) perms = list(self.user.get_all_permissions()) self.assertEqual(list(role.group.permissions.all()), []) # Check the permissions on the group self.assertEqual(role.group.name, "1_user_bobs-super-group") # Checks that the user is created self.assertEqual(perms, []) def test_add_optional_to_user_role(self): ''' Test that the optional role can be added ''' role = PermafrostRole(name="Bobs Super Group", category="user") role.save() role.permissions_add(self.perm_view_permafrostrole) role.users_add(self.user) perms = list(self.user.get_all_permissions()) self.assertListEqual(list(role.group.permissions.all()), [self.perm_view_permafrostrole]) # Check the permissions on the group self.assertEqual(role.group.name, "1_user_bobs-super-group") # Checks that the user is created self.assertListEqual(perms, ["permafrost.view_permafrostrole"]) def test_add_not_allowed_to_user_role(self): ''' Test that a permission that is not optional or required can be added ''' role = PermafrostRole(name="Bobs Super Group", category="user") role.save() role.permissions_add(self.perm_delete_permafrostrole) role.users_add(self.user) perms = list(self.user.get_all_permissions()) self.assertEqual(list(role.group.permissions.all()), []) # Check the permissions on the group self.assertEqual(role.group.name, "1_user_bobs-super-group") # Checks that the user is created self.assertListEqual(perms, []) def test_clear_permissions_on_user_role(self): ''' Test that clearning permissions restores them to just the required. ''' role = PermafrostRole(name="Bobs Super Group", category="user") role.save() role.permissions_add(self.perm_view_permafrostrole) role.permissions_clear() role.users_add(self.user) perms = list(self.user.get_all_permissions()) self.assertEqual(list(role.group.permissions.all()), []) # Check the permissions on the group self.assertEqual(role.group.name, "1_user_bobs-super-group") # Checks that the user is created self.assertListEqual(perms, []) # Staff Roles def test_create_staff_role(self): role = PermafrostRole(name="Bobs Staff Group", category="staff") role.save() role.users_add(self.staffuser) # Add user to the Group perms = list(self.staffuser.get_all_permissions()) self.assertEqual([perm.name for perm in role.group.permissions.all()], ['Can view Role']) # Make sure the required permission is present in the group self.assertEqual(role.group.name, "1_staff_bobs-staff-group") # Checks that the user is created self.assertListEqual(perms, ['permafrost.view_permafrostrole']) def test_add_optional_to_staff_role(self): ''' Test that the optional role can be added ''' role = PermafrostRole(name="Bobs Staff Group", category="staff") role.save() role.permissions_add(self.perm_change_permafrostrole) role.users_add(self.staffuser) # Add user to the Group perms = list(self.staffuser.get_all_permissions()) perms.sort() self.assertListEqual(list(role.group.permissions.all()), [self.perm_change_permafrostrole, self.perm_view_permafrostrole]) # Check the permissions on the group self.assertEqual(role.group.name, "1_staff_bobs-staff-group") # Checks that the user is created self.assertListEqual(perms, ['permafrost.change_permafrostrole', 'permafrost.view_permafrostrole']) def test_add_not_allowed_to_staff_role(self): ''' Test that a permission that is not optional or required can be added ''' role = PermafrostRole(name="Bobs Staff Group", category="staff") role.save() role.permissions_add(self.perm_delete_permafrostrole) role.users_add(self.staffuser) perms = list(self.staffuser.get_all_permissions()) self.assertEqual([perm.name for perm in role.group.permissions.all()], ['Can view Role']) # Make sure the required permission is present in the group self.assertEqual(role.group.name, "1_staff_bobs-staff-group") # Checks that the user is created self.assertListEqual(perms, ['permafrost.view_permafrostrole']) def test_clear_permissions_on_staff_role(self): role = PermafrostRole(name="Bobs Staff Group", category="staff") role.save() role.permissions_add(self.perm_view_permafrostrole) role.permissions_clear() role.users_add(self.staffuser) # Add user to the Group perms = list(self.staffuser.get_all_permissions()) self.assertEqual([perm.name for perm in role.group.permissions.all()], ['Can view Role']) # Make sure the required permission is present in the group self.assertEqual(role.group.name, "1_staff_bobs-staff-group") # Checks that the user is created self.assertListEqual(perms, ['permafrost.view_permafrostrole']) # Administration Roles def test_create_administration_role(self): role = PermafrostRole(name="Bobs Administration Group", category="administration") role.save() role.users_add(self.administrationuser) # Add user to the Group perms = list(self.administrationuser.get_all_permissions()) perms.sort() self.assertListEqual([perm.name for perm in role.group.permissions.all()], ['Can add Role', 'Can change Role', 'Can view Role']) # Make sure the required permission is present in the group self.assertEqual(role.group.name, "1_administration_bobs-administration-group") # Checks that the user is created self.assertListEqual(perms, ['permafrost.add_permafrostrole', 'permafrost.change_permafrostrole', 'permafrost.view_permafrostrole']) def test_add_optional_to_administration_role(self): role = PermafrostRole(name="Bobs Administration Group", category="administration") role.save() role.permissions_add(self.perm_delete_permafrostrole) role.users_add(self.administrationuser) # Add user to the Group perms = list(self.administrationuser.get_all_permissions()) perms.sort() self.assertListEqual([perm.name for perm in role.group.permissions.all()], ['Can add Role', 'Can change Role', 'Can delete Role', 'Can view Role']) # Make sure the required permission is present in the group self.assertEqual(role.group.name, "1_administration_bobs-administration-group") # Checks that the user is created self.assertListEqual(perms, ['permafrost.add_permafrostrole', 'permafrost.change_permafrostrole', 'permafrost.delete_permafrostrole', 'permafrost.view_permafrostrole']) def test_add_not_allowed_to_administration_role(self): role = PermafrostRole(name="Bobs Administration Group", category="administration") role.save() role.permissions_add(self.perm_add_logentry) role.permissions_add(self.perm_delete_permafrostrole) role.users_add(self.administrationuser) # Add user to the Group perms = list(self.administrationuser.get_all_permissions()) perms.sort() self.assertListEqual([perm.name for perm in role.group.permissions.all()], ['Can add Role', 'Can change Role', 'Can delete Role', 'Can view Role']) # Make sure the required permission is present in the group self.assertEqual(role.group.name, "1_administration_bobs-administration-group") # Checks that the user is created self.assertListEqual(perms, ['permafrost.add_permafrostrole', 'permafrost.change_permafrostrole', 'permafrost.delete_permafrostrole', 'permafrost.view_permafrostrole']) def test_clear_permissions_on_administration_role(self): role = PermafrostRole(name="Bobs Administration Group", category="administration") role.save() role.permissions_add(self.perm_view_permafrostrole) role.permissions_clear() role.users_add(self.administrationuser) # Add user to the Group perms = list(self.administrationuser.get_all_permissions()) perms.sort() self.assertListEqual([perm.name for perm in role.group.permissions.all()], ['Can add Role', 'Can change Role', 'Can view Role']) # Make sure the required permission is present in the group self.assertEqual(role.group.name, "1_administration_bobs-administration-group") # Checks that the user is created self.assertListEqual(perms, ['permafrost.add_permafrostrole', 'permafrost.change_permafrostrole', 'permafrost.view_permafrostrole']) # Test Role Creation Rules def test_create_duplicate_role(self): ''' Test that creating a PermafrostRole of the same name producers and error ''' role_a = PermafrostRole(name="Bobs Super Group", site=self.site_1, category="user") role_a.save() role_c = PermafrostRole(name="Bobs Super Group", site=self.site_2, category="user") role_c.save() with self.assertRaises(IntegrityError): with transaction.atomic(): role_b = PermafrostRole(name="Bobs Super Group", site=self.site_2, category="user") role_b.save() with transaction.atomic(): role_d = PermafrostRole(name="Bobs Super Group", site=self.site_2, category="staff") role_d.save() # Test that deleting a PermafrostRole deletes the matching group def test_delete_role_deletes_group(self): role = PermafrostRole(name="Awesome Students", category="user") role.save() group = role.group group_name = group.name self.assertEqual(role.group.name, "1_user_awesome-students") role.delete() with self.assertRaises(Group.DoesNotExist): group = Group.objects.get(name=group_name) # Don't run the following tests if DRF is not loaded @skipIf(SKIP_DRF_TESTS, "Django Rest Framework not installed, skipping tests") class PermafrostAPITest(TestCase): fixtures = ['unit_test'] def setUp(self): User = get_user_model() self.user = User.objects.create_user(username='john', email='jlennon@beatles.com', password='Passw0rd!') self.staffuser = User.objects.create_user(username='staffy', email='staffy@beatles.com', password='Passw0rd!', is_active=True, is_staff=True, ) self.adminuser = User.objects.create_user(username='adminy', email='adminy@beatles.com', password='Passw0rd!', is_active=True, is_staff=True, is_superuser=True) self.site_1 = Site.objects.get(pk=1) self.site_2 = Site.objects.get(pk=2) self.client = APIClient() def test_superuser_can_access_permissions_endpoint(self): ''' Uses a user that has all the permissions. ''' self.client.force_authenticate(user=self.adminuser) response = self.client.get('/permissions/', format='json') assert response.status_code == 200 def test_can_not_access_permissions_endpoint(self): ''' Uses a user that does not have the required permission ''' self.client.force_authenticate(user=self.user) response = self.client.get('/permissions/', format='json') assert response.status_code == 403
48.6
217
0.689655
1,721
14,094
5.484602
0.099942
0.029558
0.032207
0.040682
0.833139
0.805912
0.79055
0.766713
0.747431
0.716813
0
0.003482
0.205336
14,094
289
218
48.768166
0.839286
0.137647
0
0.617801
0
0
0.182399
0.088676
0
0
0
0
0.230366
1
0.099476
false
0.031414
0.057592
0
0.17801
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2f7179eb68e9801f50aaa52ff3584eb177556d0c
15,831
py
Python
tests/draining/mesos_test.py
akshaysharma096/clusterman
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
[ "Apache-2.0" ]
281
2019-11-15T03:12:43.000Z
2022-01-07T06:36:58.000Z
tests/draining/mesos_test.py
akshaysharma096/clusterman
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
[ "Apache-2.0" ]
38
2019-11-18T20:15:47.000Z
2022-03-28T11:28:45.000Z
tests/draining/mesos_test.py
akshaysharma096/clusterman
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
[ "Apache-2.0" ]
21
2019-11-16T07:49:40.000Z
2022-02-09T18:13:48.000Z
# Copyright 2019 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import mock import pytest from clusterman.draining.mesos import build_maintenance_payload from clusterman.draining.mesos import build_maintenance_schedule_payload from clusterman.draining.mesos import down from clusterman.draining.mesos import drain from clusterman.draining.mesos import get_machine_ids from clusterman.draining.mesos import get_maintenance_schedule from clusterman.draining.mesos import Hostname from clusterman.draining.mesos import hostnames_to_components from clusterman.draining.mesos import load_credentials from clusterman.draining.mesos import up @mock.patch("clusterman.draining.mesos.gethostbyname", autospec=True) def test_build_maintenance_payload(mock_gethostbyname,): ip = "169.254.121.212" mock_gethostbyname.return_value = ip hostname = "fqdn1.example.org" hostnames = [hostname] assert build_maintenance_payload(hostnames, "start_maintenance",)["start_maintenance"][ "machines" ] == get_machine_ids(hostnames) @mock.patch("clusterman.draining.mesos.gethostbyname", autospec=True) def test_get_machine_ids_one_host(mock_gethostbyname,): ip = "169.254.121.212" mock_gethostbyname.return_value = ip hostname = "fqdn1.example.org" hostnames = [hostname] expected = [ {"hostname": hostname, "ip": ip,}, ] assert get_machine_ids(hostnames) == expected @mock.patch("clusterman.draining.mesos.gethostbyname", autospec=True) def test_get_machine_ids_multiple_hosts(mock_gethostbyname,): ip1 = "169.254.121.212" ip2 = "169.254.121.213" ip3 = "169.254.121.214" mock_gethostbyname.side_effect = [ip1, ip2, ip3] hostname1 = "fqdn1.example.org" hostname2 = "fqdn2.example.org" hostname3 = "fqdn3.example.org" hostnames = [hostname1, hostname2, hostname3] expected = [ {"hostname": hostname1, "ip": ip1,}, {"hostname": hostname2, "ip": ip2,}, {"hostname": hostname3, "ip": ip3,}, ] assert get_machine_ids(hostnames) == expected def test_get_machine_ids_multiple_hosts_ips(): ip1 = "169.254.121.212" ip2 = "169.254.121.213" ip3 = "169.254.121.214" hostname1 = "fqdn1.example.org" hostname2 = "fqdn2.example.org" hostname3 = "fqdn3.example.org" hostnames = [hostname1 + "|" + ip1, hostname2 + "|" + ip2, hostname3 + "|" + ip3] expected = [ {"hostname": hostname1, "ip": ip1,}, {"hostname": hostname2, "ip": ip2,}, {"hostname": hostname3, "ip": ip3,}, ] assert get_machine_ids(hostnames) == expected @mock.patch("clusterman.draining.mesos.get_maintenance_schedule", autospec=True) @mock.patch("clusterman.draining.mesos.get_machine_ids", autospec=True) def test_build_maintenance_schedule_payload_no_schedule( mock_get_machine_ids, mock_get_maintenance_schedule, ): mock_get_maintenance_schedule.return_value.json.return_value = { "get_maintenance_schedule": {"schedule": {}}, } machine_ids = [{"hostname": "machine2", "ip": "10.0.0.2"}] mock_get_machine_ids.return_value = machine_ids hostnames = ["fake-hostname"] start = "1443830400000000000" duration = "3600000000000" actual = build_maintenance_schedule_payload(mock.Mock(), hostnames, start, duration, drain=True) assert mock_get_maintenance_schedule.call_count == 1 assert mock_get_machine_ids.call_count == 1 assert mock_get_machine_ids.call_args == mock.call(hostnames) expected = { "type": "UPDATE_MAINTENANCE_SCHEDULE", "update_maintenance_schedule": { "schedule": { "windows": [ { "machine_ids": machine_ids, "unavailability": { "start": {"nanoseconds": int(start),}, "duration": {"nanoseconds": int(duration),}, }, }, ] } }, } assert actual == expected @mock.patch("clusterman.draining.mesos.get_maintenance_schedule", autospec=True) @mock.patch("clusterman.draining.mesos.get_machine_ids", autospec=True) def test_build_maintenance_schedule_payload_no_schedule_undrain( mock_get_machine_ids, mock_get_maintenance_schedule, ): mock_get_maintenance_schedule.return_value.json.return_value = { "get_maintenance_schedule": {"schedule": {}}, } machine_ids = [{"hostname": "machine2", "ip": "10.0.0.2"}] mock_get_machine_ids.return_value = machine_ids hostnames = ["fake-hostname"] start = "1443830400000000000" duration = "3600000000000" actual = build_maintenance_schedule_payload(mock.Mock(), hostnames, start, duration, drain=False) assert mock_get_maintenance_schedule.call_count == 1 assert mock_get_machine_ids.call_count == 1 assert mock_get_machine_ids.call_args == mock.call(hostnames) expected = { "type": "UPDATE_MAINTENANCE_SCHEDULE", "update_maintenance_schedule": {"schedule": {"windows": [],}}, } assert actual == expected @mock.patch("clusterman.draining.mesos.get_maintenance_schedule", autospec=True) @mock.patch("clusterman.draining.mesos.get_machine_ids", autospec=True) def test_build_maintenance_schedule_payload_schedule( mock_get_machine_ids, mock_get_maintenance_schedule, ): mock_get_maintenance_schedule.return_value.json.return_value = { "type": "GET_MAINTENANCE_SCHEDULE", "get_maintenance_schedule": { "schedule": { "windows": [ { "machine_ids": [ {"hostname": "machine1", "ip": "10.0.0.1"}, {"hostname": "machine2", "ip": "10.0.0.2"}, ], "unavailability": { "start": {"nanoseconds": 1443830400000000000}, "duration": {"nanoseconds": 3600000000000}, }, }, { "machine_ids": [{"hostname": "machine3", "ip": "10.0.0.3"},], "unavailability": { "start": {"nanoseconds": 1443834000000000000}, "duration": {"nanoseconds": 3600000000000}, }, }, ] } }, } machine_ids = [{"hostname": "machine2", "ip": "10.0.0.2"}] mock_get_machine_ids.return_value = machine_ids hostnames = ["machine2"] start = "1443830400000000000" duration = "3600000000000" actual = build_maintenance_schedule_payload(mock.Mock(), hostnames, start, duration, drain=True) assert mock_get_maintenance_schedule.call_count == 1 assert mock_get_machine_ids.call_count == 1 assert mock_get_machine_ids.call_args == mock.call(hostnames) expected = { "type": "UPDATE_MAINTENANCE_SCHEDULE", "update_maintenance_schedule": { "schedule": { "windows": [ { "machine_ids": [{"hostname": "machine1", "ip": "10.0.0.1"},], "unavailability": { "start": {"nanoseconds": 1443830400000000000}, "duration": {"nanoseconds": 3600000000000}, }, }, { "machine_ids": [{"hostname": "machine3", "ip": "10.0.0.3"},], "unavailability": { "start": {"nanoseconds": 1443834000000000000}, "duration": {"nanoseconds": 3600000000000}, }, }, { "machine_ids": machine_ids, "unavailability": { "start": {"nanoseconds": int(start)}, "duration": {"nanoseconds": int(duration)}, }, }, ] } }, } assert actual == expected @mock.patch("clusterman.draining.mesos.get_maintenance_schedule", autospec=True) @mock.patch("clusterman.draining.mesos.get_machine_ids", autospec=True) def test_build_maintenance_schedule_payload_schedule_undrain( mock_get_machine_ids, mock_get_maintenance_schedule, ): mock_get_maintenance_schedule.return_value.json.return_value = { "type": "GET_MAINTENANCE_SCHEDULE", "get_maintenance_schedule": { "schedule": { "windows": [ { "machine_ids": [ {"hostname": "machine1", "ip": "10.0.0.1"}, {"hostname": "machine2", "ip": "10.0.0.2"}, ], "unavailability": { "start": {"nanoseconds": 1443830400000000000}, "duration": {"nanoseconds": 3600000000000}, }, }, { "machine_ids": [{"hostname": "machine3", "ip": "10.0.0.3"},], "unavailability": { "start": {"nanoseconds": 1443834000000000000}, "duration": {"nanoseconds": 3600000000000}, }, }, ] } }, } machine_ids = [{"hostname": "machine2", "ip": "10.0.0.2"}] mock_get_machine_ids.return_value = machine_ids hostnames = ["machine2"] start = "1443830400000000000" duration = "3600000000000" actual = build_maintenance_schedule_payload(mock.Mock(), hostnames, start, duration, drain=False) assert mock_get_maintenance_schedule.call_count == 1 assert mock_get_machine_ids.call_count == 1 assert mock_get_machine_ids.call_args == mock.call(hostnames) expected = { "type": "UPDATE_MAINTENANCE_SCHEDULE", "update_maintenance_schedule": { "schedule": { "windows": [ { "machine_ids": [{"hostname": "machine1", "ip": "10.0.0.1"},], "unavailability": { "start": {"nanoseconds": 1443830400000000000}, "duration": {"nanoseconds": 3600000000000}, }, }, { "machine_ids": [{"hostname": "machine3", "ip": "10.0.0.3"},], "unavailability": { "start": {"nanoseconds": 1443834000000000000}, "duration": {"nanoseconds": 3600000000000}, }, }, ] } }, } assert actual == expected @mock.patch("clusterman.draining.mesos.open", create=True, autospec=None) def test_load_credentials(mock_open,): principal = "username" secret = "password" credentials = { "principal": principal, "secret": secret, } mock_open.side_effect = mock.mock_open(read_data=json.dumps(credentials)) credentials = load_credentials("/nail/blah") assert credentials.principal == principal assert credentials.secret == secret @mock.patch("clusterman.draining.mesos.open", create=True, side_effect=IOError, autospec=None) def test_load_credentials_missing_file(mock_open,): with pytest.raises(IOError): assert load_credentials("/nail/blah") @mock.patch("clusterman.draining.mesos.open", create=True, autospec=None) def test_load_credentials_keyerror(mock_open,): credentials = {} mock_open.side_effect = mock.mock_open(read_data=json.dumps(credentials)) with pytest.raises(KeyError): assert load_credentials("/nail/blah") def test_get_maintenance_schedule(): mock_operator_api = mock.Mock() get_maintenance_schedule(mock_operator_api) assert mock_operator_api.call_count == 1 assert mock_operator_api.call_args == mock.call(data={"type": "GET_MAINTENANCE_SCHEDULE"}) @mock.patch("clusterman.draining.mesos.build_maintenance_schedule_payload", autospec=True) def test_drain(mock_build_maintenance_schedule_payload,): mock_operator_api = mock.Mock() fake_schedule = {"fake_schedule": "fake_value"} mock_build_maintenance_schedule_payload.return_value = fake_schedule drain( mock_operator_api, hostnames=["some-host"], start="some-start", duration="some-duration", ) assert mock_build_maintenance_schedule_payload.call_count == 1 expected_args = mock.call(mock_operator_api, ["some-host"], "some-start", "some-duration", drain=True) assert mock_build_maintenance_schedule_payload.call_args == expected_args expected_args = mock.call(["some-host"]) assert mock_operator_api.call_count == 1 expected_args = mock.call(data=fake_schedule) assert mock_operator_api.call_args == expected_args @mock.patch("clusterman.draining.mesos.build_maintenance_payload", autospec=True) def test_down(mock_build_maintenance_payload,): mock_operator_api = mock.Mock() fake_payload = [{"fake_schedule": "fake_value"}] mock_build_maintenance_payload.return_value = fake_payload down(mock_operator_api, hostnames=["some-host"]) assert mock_build_maintenance_payload.call_count == 1 assert mock_build_maintenance_payload.call_args == mock.call(["some-host"], "start_maintenance") assert mock_operator_api.call_count == 1 expected_args = mock.call(data=fake_payload) assert mock_operator_api.call_args == expected_args @mock.patch("clusterman.draining.mesos.build_maintenance_payload", autospec=True) def test_up(mock_build_maintenance_payload,): mock_operator_api = mock.Mock() fake_payload = [{"fake_schedule": "fake_value"}] mock_build_maintenance_payload.return_value = fake_payload up(mock_operator_api, hostnames=["some-host"]) assert mock_build_maintenance_payload.call_count == 1 assert mock_build_maintenance_payload.call_args == mock.call(["some-host"], "stop_maintenance") assert mock_operator_api.call_count == 1 expected_args = mock.call(data=fake_payload) assert mock_operator_api.call_args == expected_args def sideeffect_mock_get_count_running_tasks_on_slave(hostname): if hostname == "host1": return 3 else: return 0 def test_hostnames_to_components_simple(): hostname = "fake-host" ip = None expected = [Hostname(host=hostname, ip=ip)] actual = hostnames_to_components([hostname]) assert actual == expected def test_hostnames_to_components_pipe(): hostname = "fake-host" ip = "127.0.0.1" expected = [Hostname(host=hostname, ip=ip)] actual = hostnames_to_components([f"{hostname}|{ip}"]) assert actual == expected @mock.patch("clusterman.draining.mesos.gethostbyname", autospec=True) def test_hostnames_to_components_resolve(mock_gethostbyname,): hostname = "fake-host" ip = "127.0.0.1" mock_gethostbyname.return_value = ip expected = [Hostname(host=hostname, ip=ip)] actual = hostnames_to_components([hostname], resolve=True) assert actual == expected
39.088889
106
0.621692
1,623
15,831
5.789279
0.109673
0.097063
0.06854
0.051724
0.839187
0.791933
0.759579
0.715517
0.702107
0.702107
0
0.05529
0.25968
15,831
404
107
39.185644
0.746416
0.034489
0
0.596491
0
0
0.205095
0.075699
0
0
0
0
0.119883
1
0.055556
false
0.002924
0.038012
0
0.099415
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2f818680599014adfab1550717056ab2186b5d3a
15,601
py
Python
api_tests/wikis/views/test_wiki_detail.py
chennan47/osf.io
270608592b39a94941a3e329c0dc16d295a82472
[ "Apache-2.0" ]
null
null
null
api_tests/wikis/views/test_wiki_detail.py
chennan47/osf.io
270608592b39a94941a3e329c0dc16d295a82472
[ "Apache-2.0" ]
18
2020-03-24T16:16:14.000Z
2022-03-03T22:37:48.000Z
api_tests/wikis/views/test_wiki_detail.py
kounoAkihiro/SV-RDM-OSF
76fb0c739f4cdabf03b5bfd2bc63d83b1c2d4796
[ "Apache-2.0" ]
1
2021-10-04T21:16:56.000Z
2021-10-04T21:16:56.000Z
import mock import pytest import furl from urlparse import urlparse from nose.tools import * # flake8: noqa from api.base.settings.defaults import API_BASE from osf.models import Guid from addons.wiki.models import NodeWikiPage from tests.base import ApiWikiTestCase from osf_tests.factories import (ProjectFactory, RegistrationFactory, PrivateLinkFactory, CommentFactory) from addons.wiki.tests.factories import NodeWikiFactory class TestWikiDetailView(ApiWikiTestCase): def _set_up_public_project_with_wiki_page(self, project_options=None): project_options = project_options or {} self.public_project = ProjectFactory( is_public=True, creator=self.user, **project_options) self.public_wiki = self._add_project_wiki_page( self.public_project, self.user) self.public_url = '/{}wikis/{}/'.format(API_BASE, self.public_wiki._id) def _set_up_private_project_with_wiki_page(self): self.private_project = ProjectFactory(creator=self.user) self.private_wiki = self._add_project_wiki_page( self.private_project, self.user) self.private_url = '/{}wikis/{}/'.format( API_BASE, self.private_wiki._id) def _set_up_public_registration_with_wiki_page(self): self._set_up_public_project_with_wiki_page() self.public_registration = RegistrationFactory( project=self.public_project, user=self.user, is_public=True) self.public_registration_wiki_id = self.public_registration.wiki_pages_versions[ 'home'][0] self.public_registration.wiki_pages_current = { 'home': self.public_registration_wiki_id} self.public_registration.save() self.public_registration_url = '/{}wikis/{}/'.format( API_BASE, self.public_registration_wiki_id) def _set_up_private_registration_with_wiki_page(self): self._set_up_private_project_with_wiki_page() self.private_registration = RegistrationFactory( project=self.private_project, user=self.user) self.private_registration_wiki_id = self.private_registration.wiki_pages_versions[ 'home'][0] self.private_registration.wiki_pages_current = { 'home': self.private_registration_wiki_id} self.private_registration.save() self.private_registration_url = '/{}wikis/{}/'.format( API_BASE, self.private_registration_wiki_id) def test_public_node_logged_out_user_can_view_wiki(self): self._set_up_public_project_with_wiki_page() res = self.app.get(self.public_url) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.public_wiki._id) def test_public_node_logged_in_non_contributor_can_view_wiki(self): self._set_up_public_project_with_wiki_page() res = self.app.get(self.public_url, auth=self.non_contributor.auth) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.public_wiki._id) def test_public_node_logged_in_contributor_can_view_wiki(self): self._set_up_public_project_with_wiki_page() res = self.app.get(self.public_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.public_wiki._id) def test_private_node_logged_out_user_cannot_view_wiki(self): self._set_up_private_project_with_wiki_page() res = self.app.get(self.private_url, expect_errors=True) assert_equal(res.status_code, 401) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') def test_private_node_logged_in_non_contributor_cannot_view_wiki(self): self._set_up_private_project_with_wiki_page() res = self.app.get( self.private_url, auth=self.non_contributor.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_equal( res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_private_node_logged_in_contributor_can_view_wiki(self): self._set_up_private_project_with_wiki_page() res = self.app.get(self.private_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.private_wiki._id) def test_private_node_user_with_anonymous_link_can_view_wiki(self): self._set_up_private_project_with_wiki_page() private_link = PrivateLinkFactory(anonymous=True) private_link.nodes.add(self.private_project) private_link.save() url = furl.furl( self.private_url).add( query_params={ 'view_only': private_link.key}).url res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.private_wiki._id) def test_private_node_user_with_view_only_link_can_view_wiki(self): self._set_up_private_project_with_wiki_page() private_link = PrivateLinkFactory(anonymous=False) private_link.nodes.add(self.private_project) private_link.save() url = furl.furl( self.private_url).add( query_params={ 'view_only': private_link.key}).url res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.private_wiki._id) def test_public_registration_logged_out_user_cannot_view_wiki(self): self._set_up_public_registration_with_wiki_page() res = self.app.get(self.public_registration_url, expect_errors=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.public_registration_wiki_id) def test_public_registration_logged_in_non_contributor_cannot_view_wiki( self): self._set_up_public_registration_with_wiki_page() res = self.app.get( self.public_registration_url, auth=self.non_contributor.auth, expect_errors=True) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.public_registration_wiki_id) def test_public_registration_contributor_can_view_wiki(self): self._set_up_public_registration_with_wiki_page() res = self.app.get(self.public_registration_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.public_registration_wiki_id) def test_user_cannot_view_withdrawn_registration_wikis(self): self._set_up_public_registration_with_wiki_page() # TODO: Remove mocking when StoredFileNode is implemented with mock.patch('osf.models.AbstractNode.update_search'): withdrawal = self.public_registration.retract_registration( user=self.user, save=True) token = withdrawal.approval_state.values()[0]['approval_token'] withdrawal.approve_retraction(self.user, token) withdrawal.save() res = self.app.get( self.public_registration_url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_equal( res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_private_registration_logged_out_user_cannot_view_wiki(self): self._set_up_private_registration_with_wiki_page() res = self.app.get(self.private_registration_url, expect_errors=True) assert_equal(res.status_code, 401) assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.') def test_private_registration_logged_in_non_contributor_cannot_view_wiki( self): self._set_up_private_registration_with_wiki_page() res = self.app.get( self.private_registration_url, auth=self.non_contributor.auth, expect_errors=True) assert_equal(res.status_code, 403) assert_equal( res.json['errors'][0]['detail'], 'You do not have permission to perform this action.') def test_private_registration_contributor_can_view_wiki(self): self._set_up_private_registration_with_wiki_page() res = self.app.get(self.private_registration_url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.json['data']['id'], self.private_registration_wiki_id) def test_wiki_has_user_link(self): self._set_up_public_project_with_wiki_page() res = self.app.get(self.public_url) url = res.json['data']['relationships']['user']['links']['related']['href'] expected_url = '/{}users/{}/'.format(API_BASE, self.user._id) assert_equal(res.status_code, 200) assert_equal(urlparse(url).path, expected_url) def test_wiki_has_node_link(self): self._set_up_public_project_with_wiki_page() res = self.app.get(self.public_url) url = res.json['data']['relationships']['node']['links']['related']['href'] expected_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id) assert_equal(res.status_code, 200) assert_equal(urlparse(url).path, expected_url) def test_wiki_has_comments_link(self): self._set_up_public_project_with_wiki_page() res = self.app.get(self.public_url) assert_equal(res.status_code, 200) url = res.json['data']['relationships']['comments']['links']['related']['href'] comment = CommentFactory( node=self.public_project, target=Guid.load( self.public_wiki._id), user=self.user) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(res.json['data'][0]['type'], 'comments') def test_only_project_contrib_can_comment_on_closed_project(self): self._set_up_public_project_with_wiki_page( project_options={'comment_level': 'private'}) res = self.app.get(self.public_url, auth=self.user.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert_equal(res.status_code, 200) assert_equal(can_comment, True) res = self.app.get(self.public_url, auth=self.non_contributor.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert_equal(res.status_code, 200) assert_equal(can_comment, False) def test_any_loggedin_user_can_comment_on_open_project(self): self._set_up_public_project_with_wiki_page( project_options={'comment_level': 'public'}) res = self.app.get(self.public_url, auth=self.non_contributor.auth) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert_equal(res.status_code, 200) assert_equal(can_comment, True) def test_non_logged_in_user_cant_comment(self): self._set_up_public_project_with_wiki_page( project_options={'comment_level': 'public'}) res = self.app.get(self.public_url) can_comment = res.json['data']['attributes']['current_user_can_comment'] assert_equal(res.status_code, 200) assert_equal(can_comment, False) def test_wiki_has_download_link(self): self._set_up_public_project_with_wiki_page() res = self.app.get(self.public_url) url = res.json['data']['links']['download'] expected_url = '/{}wikis/{}/content/'.format( API_BASE, self.public_wiki._id) assert_equal(res.status_code, 200) assert_in(expected_url, url) def test_wiki_invalid_id_not_found(self): url = '/{}wikis/{}/'.format(API_BASE, 'abcde') res = self.app.get(url, expect_errors=True) assert_equal(res.status_code, 404) def test_old_wiki_versions_not_returned(self): self._set_up_public_project_with_wiki_page() # TODO: Remove mocking when StoredFileNode is implemented with mock.patch('osf.models.AbstractNode.update_search'): current_wiki = NodeWikiFactory( node=self.public_project, user=self.user) old_version_id = self.public_project.wiki_pages_versions[current_wiki.page_name][-2] old_version = NodeWikiPage.load(old_version_id) url = '/{}wikis/{}/'.format(API_BASE, old_version._id) res = self.app.get(url, expect_errors=True) assert_equal(res.status_code, 404) def test_public_node_wiki_relationship_links(self): self._set_up_public_project_with_wiki_page() res = self.app.get(self.public_url) expected_nodes_relationship_url = '{}nodes/{}/'.format( API_BASE, self.public_project._id) expected_comments_relationship_url = '{}nodes/{}/comments/'.format( API_BASE, self.public_project._id) assert_in( expected_nodes_relationship_url, res.json['data']['relationships']['node']['links']['related']['href']) assert_in( expected_comments_relationship_url, res.json['data']['relationships']['comments']['links']['related']['href']) def test_private_node_wiki_relationship_links(self): self._set_up_private_project_with_wiki_page() res = self.app.get(self.private_url, auth=self.user.auth) expected_nodes_relationship_url = '{}nodes/{}/'.format( API_BASE, self.private_project._id) expected_comments_relationship_url = '{}nodes/{}/comments/'.format( API_BASE, self.private_project._id) assert_in( expected_nodes_relationship_url, res.json['data']['relationships']['node']['links']['related']['href']) assert_in( expected_comments_relationship_url, res.json['data']['relationships']['comments']['links']['related']['href']) def test_public_registration_wiki_relationship_links(self): self._set_up_public_registration_with_wiki_page() res = self.app.get(self.public_registration_url) expected_nodes_relationship_url = '{}registrations/{}/'.format( API_BASE, self.public_registration._id) expected_comments_relationship_url = '{}registrations/{}/comments/'.format( API_BASE, self.public_registration._id) assert_in( expected_nodes_relationship_url, res.json['data']['relationships']['node']['links']['related']['href']) assert_in( expected_comments_relationship_url, res.json['data']['relationships']['comments']['links']['related']['href']) def test_private_registration_wiki_relationship_links(self): self._set_up_private_registration_with_wiki_page() res = self.app.get(self.private_registration_url, auth=self.user.auth) expected_nodes_relationship_url = '{}registrations/{}/'.format( API_BASE, self.private_registration._id) expected_comments_relationship_url = '{}registrations/{}/comments/'.format( API_BASE, self.private_registration._id) assert_in( expected_nodes_relationship_url, res.json['data']['relationships']['node']['links']['related']['href']) assert_in( expected_comments_relationship_url, res.json['data']['relationships']['comments']['links']['related']['href'])
46.84985
92
0.684123
1,969
15,601
5.013205
0.080752
0.048627
0.059568
0.03951
0.836896
0.826157
0.809847
0.766285
0.71705
0.667106
0
0.007173
0.204666
15,601
332
93
46.990964
0.788362
0.007948
0
0.621993
0
0
0.092354
0.014606
0
0
0
0.003012
0.195876
1
0.109966
false
0
0.037801
0
0.151203
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2f831a78f4ccf4a5fc8f721bbbda4df246e6fa02
122
py
Python
author/admin.py
CMPUT404-stev-sand-pant-ashw-mehr/CMPUT404-stev-sand-pant-ashw-mehr-repo
0f96d938e9e3ec51103f2b20cb9673bd0b145343
[ "MIT" ]
null
null
null
author/admin.py
CMPUT404-stev-sand-pant-ashw-mehr/CMPUT404-stev-sand-pant-ashw-mehr-repo
0f96d938e9e3ec51103f2b20cb9673bd0b145343
[ "MIT" ]
50
2021-10-08T00:01:43.000Z
2021-12-06T06:34:29.000Z
author/admin.py
CMPUT404-stev-sand-pant-ashw-mehr/CMPUT404-stev-sand-pant-ashw-mehr-repo
0f96d938e9e3ec51103f2b20cb9673bd0b145343
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Author from author.models import Author admin.site.register(Author)
20.333333
32
0.827869
18
122
5.611111
0.5
0.237624
0.356436
0
0
0
0
0
0
0
0
0
0.114754
122
6
33
20.333333
0.935185
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
2f8f42c8fc1deae2ea109bf97894ed404fbc5d2f
36
py
Python
pySrc/keywords.py
marchers/amadeus-learning
3d593fec3f3aebba3e069297f74f9dba1410b7d7
[ "MIT" ]
null
null
null
pySrc/keywords.py
marchers/amadeus-learning
3d593fec3f3aebba3e069297f74f9dba1410b7d7
[ "MIT" ]
null
null
null
pySrc/keywords.py
marchers/amadeus-learning
3d593fec3f3aebba3e069297f74f9dba1410b7d7
[ "MIT" ]
null
null
null
import keyword print(keyword.kwlist)
18
21
0.861111
5
36
6.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.055556
36
2
21
18
0.911765
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
6
85cfec14d2ac9d14343e0c196b80850281b6bc9f
112
py
Python
ml/metrics/__init__.py
rmaestre/ml-code-lectures
9cdc5e8552da10c30dca531ba6eaff41f0689713
[ "MIT" ]
2
2022-03-28T13:42:07.000Z
2022-03-28T13:42:12.000Z
ml/metrics/__init__.py
rmaestre/ml-code-lectures
9cdc5e8552da10c30dca531ba6eaff41f0689713
[ "MIT" ]
null
null
null
ml/metrics/__init__.py
rmaestre/ml-code-lectures
9cdc5e8552da10c30dca531ba6eaff41f0689713
[ "MIT" ]
1
2022-03-03T08:36:52.000Z
2022-03-03T08:36:52.000Z
from ml.metrics.classification import ClassificationMetrics from ml.metrics.regression import RegressionMetrics
37.333333
59
0.892857
12
112
8.333333
0.666667
0.12
0.26
0
0
0
0
0
0
0
0
0
0.071429
112
2
60
56
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c8030532ba7e66034e00fe184465d1e468c196c8
10,740
py
Python
code/residuals.py
dlaredo/stochastic_dynamics
a851d8fe1e88f5bafa187c4d43c21c7904716670
[ "MIT" ]
null
null
null
code/residuals.py
dlaredo/stochastic_dynamics
a851d8fe1e88f5bafa187c4d43c21c7904716670
[ "MIT" ]
null
null
null
code/residuals.py
dlaredo/stochastic_dynamics
a851d8fe1e88f5bafa187c4d43c21c7904716670
[ "MIT" ]
null
null
null
import tensorflow as tf # Derivatives def first_order_central_finite_difference(tf_fx_delta_plus, tf_fx_delta_minus, delta): derivative = tf.subtract(tf_fx_delta_plus, tf_fx_delta_minus) / (2 * delta) return derivative def second_order_central_finite_difference(tf_fx, tf_fx_delta_plus, tf_fx_delta_minus, delta): tf_fx = tf.Print(tf_fx, [tf_fx, tf_fx_delta_plus, tf_fx_delta_minus], "\nf(y) f(y+h) f(y-h): ") second_derivative = tf.add(tf.subtract(tf_fx_delta_plus, 2 * tf_fx), tf_fx_delta_minus) / (delta ** 2) return second_derivative # Integrals def R1_integral(x2, tf_fx_x_plus, tf_fx_x_minus, delta_y): R1 = 2 * delta_y * tf.multiply(x2, tf.subtract(tf_fx_x_plus, tf_fx_x_minus)) # R1 = tf.Print(R1, [tf.shape(x2), tf.shape(tf_fx_x_plus), tf.shape(tf_fx_x_minus)], message="shapes in R1") return R1 def R2_integral(x2, x2_delta_plus, x2_delta_minus, tf_fx, tf_fx_y_plus, tf_fx_y_minus, delta_x, delta_y): s1 = (2 * delta_x) * tf.subtract(tf.multiply(tf_fx_y_plus, x2_delta_plus), tf.multiply(tf_fx_y_minus, x2_delta_minus)) s2 = 4 * tf_fx * delta_x * delta_y R2 = tf.subtract(s1, s2) return R2 def R3_integral(x1, tf_fx_y_plus, tf_fx_y_minus, delta_x): R3 = 2 * delta_x * tf.multiply(x1, tf.subtract(tf_fx_y_plus, tf_fx_y_minus)) return R3 def R4_integral(tf_fx_y_plus, tf_fx_y_minus, delta_x, delta_y): R4 = (delta_x / delta_y) * tf.pow(tf.subtract(tf_fx_y_plus, tf_fx_y_minus), 2) return R4 def R5_integral(tf_fx, tf_fx_y_plus, tf_fx_y_minus, delta_x, delta_y): R5 = (2 * delta_x / delta_y) * tf.subtract(tf.add(tf_fx_y_plus, tf_fx_y_minus), 2 * tf_fx) return R5 def residual_ode1(X_batches, y_pred_batches, y_real_batches, deltas, batch_size, num_conditions, alpha=1, **kwargs): #y pred batches y_pred_original = y_pred_batches[0] y_pred_delta1_plus = y_pred_batches[1] y_pred_delta1_minus = y_pred_batches[2] #y boundaries y_pred_initial = y_pred_batches[-1] y_real_initial = y_real_batches[-1] delta_x = deltas[0] r_total = first_order_central_finite_difference(y_pred_delta1_plus, y_pred_delta1_minus, delta_x) - y_pred_original - 2*tf.ones(tf.shape(y_pred_original), dtype=tf.float32, name=None) e1 = tf.div(tf.reduce_sum(tf.pow(r_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") e2 = tf.div(tf.reduce_sum(tf.pow(tf.subtract(y_pred_initial, y_real_initial), 2)), 2 * tf.constant(num_conditions, tf.float32), name="initial_conditions") r = tf.add(e1, alpha * e2, name="residual_total") return r def residual_ode2(X_batches, y_pred_batches, y_real_batches, deltas, batch_size, num_conditions, alpha=1, **kwargs): #y pred batches y_pred_original = y_pred_batches[0] y_pred_delta1_plus = y_pred_batches[1] y_pred_delta1_minus = y_pred_batches[2] #y boundaries y_pred_initial = y_pred_batches[-1] y_real_initial = y_real_batches[-1] delta_x = deltas[0] print("num conditions") print(num_conditions) print("batch size") print(batch_size) r_total = first_order_central_finite_difference(y_pred_delta1_plus, y_pred_delta1_minus, delta_x) - y_pred_original e1 = tf.div(tf.reduce_sum(tf.pow(r_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") e2 = tf.div(tf.reduce_sum(tf.pow(tf.subtract(y_pred_initial, y_real_initial), 2)), 2 * tf.constant(num_conditions, tf.float32), name="initial_conditions") r = tf.add(e1, alpha * e2, name="residual_total") return r def residual_integral2(X_batches, y_pred_batches, y_real_batches, deltas, batch_size, num_conditions, alpha=1, **kwargs): # y pred batches y_pred_original = y_pred_batches[0] y_pred_delta1_plus = y_pred_batches[1] y_pred_delta1_minus = y_pred_batches[2] # y boundaries y_pred_initial = y_pred_batches[-1] y_real_initial = y_real_batches[-1] delta_x = deltas[0] print("num conditions") print(num_conditions) print("batch size") print(batch_size) r_total = tf.multiply(y_pred_delta1_plus, (1 - delta_x/2)) - tf.multiply(y_pred_original, (1 + delta_x/2)) e1 = tf.div(tf.reduce_sum(tf.pow(r_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") e2 = tf.div(tf.reduce_sum(tf.pow(tf.subtract(y_pred_initial, y_real_initial), 2)), 2 * tf.constant(num_conditions, tf.float32), name="initial_conditions") r = tf.add(e1, alpha * e2, name="residual_total") return r def residual_eg1(X_batches, y_pred_batches, y_real_batches, deltas, batch_size, alpha=1, **kwargs): #x batches X_original = X_batches[0] #y pred batches y_pred_original = y_pred_batches[0] y_pred_delta1_plus = y_pred_batches[1] y_pred_delta1_minus = y_pred_batches[2] #y boundaries y_pred_initial = y_pred_batches[-1] y_real_initial = y_real_batches[-1] y_pred_initial = tf.Print(y_pred_initial, [y_real_initial, y_pred_initial], message="Initial conditions") delta_x = deltas[0] d1 = first_order_central_finite_difference(y_pred_delta1_plus, y_pred_delta1_minus, delta_x) r_total = d1 + (X_original + (1 + 3 * tf.pow(X_original, 2))/(1 + X_original + tf.pow(X_original, 3))) * y_pred_original - tf.pow(X_original, 3) - 2 * X_original - tf.pow(X_original, 2) * (1 + 3 * tf.pow(X_original, 2))/(1 + X_original + tf.pow(X_original, 3)) e1 = tf.div(tf.reduce_sum(tf.pow(r_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") e2 = tf.reduce_sum(tf.pow(tf.subtract(y_pred_initial, y_real_initial), 2), name="initial_conditions") r = tf.add(e1, alpha * e2, name="residual_total") ic = tf.multiply(y_real_initial, tf.ones(tf.shape(y_pred_original))) r_paper_total = y_pred_original + X_original * d1 - (- (ic + tf.multiply(X_original, y_pred_original)) * (X_original + (1 + 3 * tf.pow(X_original, 2))/(1 + X_original + tf.pow(X_original, 3))) + tf.pow(X_original, 3) + 2 * X_original + tf.pow(X_original, 2) * (1 + 3 * tf.pow(X_original, 2))/(1 + X_original + tf.pow(X_original, 3))) r_paper = tf.div(tf.reduce_sum(tf.pow(r_paper_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") return r def residual_eg2(X_batches, y_pred_batches, y_real_batches, deltas, batch_size, alpha=1, **kwargs): #x batches X_original = X_batches[0] #y pred batches y_pred_original = y_pred_batches[0] y_pred_delta1_plus = y_pred_batches[1] y_pred_delta1_minus = y_pred_batches[2] #y boundaries y_pred_initial = y_pred_batches[-1] y_real_initial = y_real_batches[-1] y_pred_initial = tf.Print(y_pred_initial, [y_real_initial, y_pred_initial], message="Initial conditions") delta_x = deltas[0] d1 = first_order_central_finite_difference(y_pred_delta1_plus, y_pred_delta1_minus, delta_x) r_total = d1 + y_pred_original/5 - tf.exp(- X_original/5) * tf.cos(X_original) e1 = tf.div(tf.reduce_sum(tf.pow(r_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") e2 = tf.reduce_sum(tf.pow(tf.subtract(y_pred_initial, y_real_initial), 2), name="initial_conditions") r = tf.add(e1, alpha * e2, name="residual_total") ic = tf.multiply(y_real_initial, tf.ones(tf.shape(y_pred_original))) r_paper_total = y_pred_original + X_original * d1 - (- (ic + tf.multiply(X_original, y_pred_original))/5 + tf.exp(-X_original/5) * tf.cos(X_original)) r_paper = tf.div(tf.reduce_sum(tf.pow(r_paper_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") return r def residual_phi_derivatives(X_batches, y_pred_batches, y_real_batches, deltas, batch_size, num_conditions, alpha=1, **kwargs): #Retrieve function specific parameters c = kwargs["c"] k = kwargs["k"] D = kwargs["D"] # x batches X_original = X_batches[0] X_delta1_plus = X_batches[1] X_delta1_minus = X_batches[2] X_delta2_plus = X_batches[3] X_delta3_minus = X_batches[4] x1 = tf.slice(X_original, [0, 0], tf.stack([batch_size, 1])) x2 = tf.slice(X_original, [0, 1], tf.stack([batch_size, 1])) # y pred batches y_pred_original = y_pred_batches[0] y_pred_delta1_plus = y_pred_batches[1] y_pred_delta1_minus = y_pred_batches[2] y_pred_delta2_plus = y_pred_batches[3] y_pred_delta2_minus = y_pred_batches[4] # y boundaries y_pred_initial = y_pred_batches[-1] y_real_initial = y_real_batches[-1] # compute the approximate derivatives (tensors) given y_pred nn_partial1_x = first_order_central_finite_difference(y_pred_delta1_plus, y_pred_delta1_minus, deltas[0]) nn_partial1_y = first_order_central_finite_difference(y_pred_delta2_plus, y_pred_delta2_minus, deltas[1]) nn_partial2_y = second_order_central_finite_difference(y_pred_original, y_pred_delta2_plus, y_pred_delta2_minus, deltas[1]) r1 = tf.multiply(x2, nn_partial1_x) r2 = tf.multiply(c * x2, nn_partial1_y) r3 = tf.multiply(k * x1, nn_partial1_y) r4 = D * tf.subtract(tf.pow(nn_partial1_y, 2), nn_partial2_y) r_total = r1 + c - r2 - r3 + r4 e1 = tf.div(tf.reduce_sum(tf.pow(r_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") e2 = tf.reduce_sum(tf.pow(tf.subtract(y_pred_initial, y_real_initial), 2), name="initial_conditions") r = tf.add(e1, alpha * e2, name="residual_total") return r def residual_phi_integral(X_batches, y_pred_batches, y_real_batches, deltas, batch_size, num_conditions, alpha=1, **kwargs): # Retrieve function specific parameters c = kwargs["c"] k = kwargs["k"] D = kwargs["D"] # x batches X_original = X_batches[0] X_delta1_plus = X_batches[1] X_delta1_minus = X_batches[2] X_delta2_plus = X_batches[3] X_delta2_minus = X_batches[4] x1 = tf.slice(X_original, [0, 0], tf.stack([batch_size, 1])) x2 = tf.slice(X_original, [0, 1], tf.stack([batch_size, 1])) x2_plus = tf.slice(X_delta2_plus, [0,1], tf.stack([batch_size, 1])) x2_minus = tf.slice(X_delta2_minus, [0,1], tf.stack([batch_size, 1])) # y pred batches y_pred_original = y_pred_batches[0] y_pred_delta1_plus = y_pred_batches[1] y_pred_delta1_minus = y_pred_batches[2] y_pred_delta2_plus = y_pred_batches[3] y_pred_delta2_minus = y_pred_batches[4] # y boundaries y_pred_initial = y_pred_batches[-1] y_real_initial = y_real_batches[-1] r1 = R1_integral(x2, y_pred_delta1_plus, y_pred_delta1_minus, deltas[1]) r2 = R2_integral(x2, x2_plus, x2_minus, y_pred_original, y_pred_delta2_plus, y_pred_delta2_minus, deltas[0], deltas[1]) r3 = R3_integral(x1, y_pred_delta2_plus, y_pred_delta2_minus, deltas[0]) r4 = R4_integral(y_pred_delta2_plus, y_pred_delta2_minus, deltas[0], deltas[1]) r5 = R5_integral(y_pred_original, y_pred_delta2_plus, y_pred_delta2_minus, deltas[0], deltas[1]) r_total = r1 + c * deltas[0] * deltas[1] - c * r2 - k * r3 + D * tf.subtract(r4, r5) e1 = tf.div(tf.reduce_sum(tf.pow(r_total, 2)), 2 * tf.cast(batch_size, tf.float32), name="residual") e2 = tf.reduce_sum(tf.pow(tf.subtract(y_pred_initial, y_real_initial), 2), name="initial_conditions") r = tf.add(e1, alpha * e2, name="residual_total") return r
35.562914
334
0.741341
1,950
10,740
3.705128
0.055385
0.091349
0.076401
0.028789
0.856747
0.838478
0.821176
0.81301
0.80083
0.786851
0
0.038695
0.126536
10,740
301
335
35.681063
0.731479
0.045438
0
0.633136
0
0
0.039894
0
0
0
0
0
0
1
0.08284
false
0
0.005917
0
0.171598
0.047337
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c8422983d6e06c5e9a8bbaa72956ee4f0dd993e8
73
py
Python
gym-kinova-gripper/gym_kinova_gripper/envs/__init__.py
OSUrobotics/KinovaGrasping
f22af60d3683fdc4ffecf49ccff179fbc6750748
[ "Linux-OpenIB" ]
16
2020-05-16T00:40:31.000Z
2022-02-22T11:59:03.000Z
gym-kinova-gripper/gym_kinova_gripper/envs/__init__.py
OSUrobotics/KinovaGrasping
f22af60d3683fdc4ffecf49ccff179fbc6750748
[ "Linux-OpenIB" ]
9
2020-08-10T08:33:55.000Z
2021-08-17T02:10:50.000Z
gym-kinova-gripper/gym_kinova_gripper/envs/__init__.py
OSUrobotics/KinovaGrasping
f22af60d3683fdc4ffecf49ccff179fbc6750748
[ "Linux-OpenIB" ]
7
2020-07-27T09:45:05.000Z
2021-06-21T21:42:50.000Z
from gym_kinova_gripper.envs.kinova_gripper_env import KinovaGripper_Env
36.5
72
0.917808
11
73
5.636364
0.727273
0.419355
0
0
0
0
0
0
0
0
0
0
0.054795
73
1
73
73
0.898551
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c09ecd5ddf88636414aea8b1081f6c6813322a1e
50
py
Python
code/answer_1-2-4.py
KoyanagiHitoshi/AtCoder-Python-Introduction
6d014e333a873f545b4d32d438e57cf428b10b96
[ "MIT" ]
1
2022-03-29T13:50:12.000Z
2022-03-29T13:50:12.000Z
code/answer_1-2-4.py
KoyanagiHitoshi/AtCoder-Python-Introduction
6d014e333a873f545b4d32d438e57cf428b10b96
[ "MIT" ]
null
null
null
code/answer_1-2-4.py
KoyanagiHitoshi/AtCoder-Python-Introduction
6d014e333a873f545b4d32d438e57cf428b10b96
[ "MIT" ]
null
null
null
s1, s2, s3 = input().split(",") print(s1, s2, s3)
16.666667
31
0.54
9
50
3
0.666667
0.296296
0.444444
0
0
0
0
0
0
0
0
0.142857
0.16
50
2
32
25
0.5
0
0
0
0
0
0.02
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
c0a54b783fee2b764ec1b4dc9a7f6888a632f0bd
31
py
Python
__init__.py
1740415303/tturtle-
8e87b23fd46b33f75784edd93954699e27686ef3
[ "Apache-2.0" ]
null
null
null
__init__.py
1740415303/tturtle-
8e87b23fd46b33f75784edd93954699e27686ef3
[ "Apache-2.0" ]
null
null
null
__init__.py
1740415303/tturtle-
8e87b23fd46b33f75784edd93954699e27686ef3
[ "Apache-2.0" ]
null
null
null
from .ali_sms import send_sms
15.5
29
0.806452
6
31
3.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.16129
31
2
29
15.5
0.884615
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c0cc9acfdd5fc35e47d735c3cb7bd9d7a1e61fa5
5,717
py
Python
tests/test_split.py
Nivratti/split-folders
3fbb73fa33778f64bbda0ec96db659a4f3bb1109
[ "OLDAP-2.8" ]
292
2018-10-05T11:01:36.000Z
2022-03-24T14:21:01.000Z
tests/test_split.py
Nivratti/split-folders
3fbb73fa33778f64bbda0ec96db659a4f3bb1109
[ "OLDAP-2.8" ]
33
2018-11-09T10:49:05.000Z
2022-03-25T03:59:23.000Z
tests/test_split.py
Nivratti/split-folders
3fbb73fa33778f64bbda0ec96db659a4f3bb1109
[ "OLDAP-2.8" ]
59
2018-10-05T19:30:59.000Z
2022-03-25T04:05:40.000Z
import os import pathlib import shutil import pytest from splitfolders import ratio, fixed def test_second_package(): from split_folders import ratio, fixed def test_split_ratio(): input_dir = os.path.join(os.path.dirname(__file__), "imgs") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) ratio(input_dir, output_dir) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a == b def test_split_ratio_2(): input_dir = os.path.join(os.path.dirname(__file__), "imgs") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) ratio(input_dir, output_dir, ratio=(0.7, 0.2, 0.1)) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a == b def test_split_ratio_no_test(): input_dir = os.path.join(os.path.dirname(__file__), "imgs") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) ratio(input_dir, output_dir, ratio=(0.8, 0.2)) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a == b def test_split_fixed(): input_dir = os.path.join(os.path.dirname(__file__), "imgs") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) fixed(input_dir, output_dir, fixed=(2, 2)) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a == b def test_split_fixed_simple(): input_dir = os.path.join(os.path.dirname(__file__), "imgs") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) fixed(input_dir, output_dir, fixed=(2,)) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a == b def test_split_fixed_simple_2(): input_dir = os.path.join(os.path.dirname(__file__), "imgs") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) fixed(input_dir, output_dir, fixed=2) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a == b def test_split_fixed_oversample(): input_dir = os.path.join(os.path.dirname(__file__), "imgs") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) fixed(input_dir, output_dir, fixed=(2, 2), oversample=True) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a != b def test_split_fixed_oversample_unbalanced(): input_dir = os.path.join(os.path.dirname(__file__), "imgs") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) with pytest.raises(ValueError): fixed(input_dir, output_dir, fixed=(9, 1), oversample=True) def test_split_ratio_prefix(): input_dir = os.path.join(os.path.dirname(__file__), "imgs_texts") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) ratio(input_dir, output_dir, group_prefix=2) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a == b def test_split_fixed_prefix(): input_dir = os.path.join(os.path.dirname(__file__), "imgs_texts") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) fixed(input_dir, output_dir, fixed=(1, 1), oversample=False, group_prefix=2) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a == b def test_split_fixed_oversample_prefix(): input_dir = os.path.join(os.path.dirname(__file__), "imgs_texts") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) fixed(input_dir, output_dir, fixed=(1, 1), oversample=True, group_prefix=2) # ensure the number of pics is the same a = len(list(pathlib.Path(input_dir).glob("**/*.jpg"))) b = len(list(pathlib.Path(output_dir).glob("**/*.jpg"))) assert a != b def test_split_ratio_prefix_error_1(): input_dir = os.path.join(os.path.dirname(__file__), "imgs_texts_error_1") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) with pytest.raises(ValueError): ratio(input_dir, output_dir, group_prefix=2) def test_split_ratio_prefix_error_2(): input_dir = os.path.join(os.path.dirname(__file__), "imgs_texts_error_2") output_dir = os.path.join(os.path.dirname(__file__), "output") shutil.rmtree(output_dir, ignore_errors=True) with pytest.raises(ValueError): ratio(input_dir, output_dir, group_prefix=2)
31.761111
80
0.683051
875
5,717
4.161143
0.067429
0.085691
0.064268
0.092832
0.954683
0.93738
0.919253
0.919253
0.919253
0.919253
0
0.006828
0.154627
5,717
179
81
31.938547
0.746534
0.066294
0
0.695238
0
0
0.063075
0
0
0
0
0
0.095238
1
0.133333
false
0
0.057143
0
0.190476
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c0e1595e180129d736b486ae0d8079f3901f0534
6,030
py
Python
sizakat/mustahik/migrations/0001_initial.py
artmxra7/sizkt-backend
49263b7d937ac62307b8ced47fa1497226f1d4cc
[ "MIT" ]
null
null
null
sizakat/mustahik/migrations/0001_initial.py
artmxra7/sizkt-backend
49263b7d937ac62307b8ced47fa1497226f1d4cc
[ "MIT" ]
5
2021-03-30T14:16:46.000Z
2021-09-22T19:38:45.000Z
sizakat/mustahik/migrations/0001_initial.py
artmxra7/sizkt-backend
49263b7d937ac62307b8ced47fa1497226f1d4cc
[ "MIT" ]
1
2020-11-14T02:58:29.000Z
2020-11-14T02:58:29.000Z
# Generated by Django 3.0.7 on 2020-07-29 08:26 import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='DataSource', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('category', models.CharField(choices=[('WARGA', 'Warga'), ('INSTITUSI', 'Institusi'), ('PEKERJA', 'Pekerja')], max_length=32)), ], ), migrations.CreateModel( name='Mustahik', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=150)), ('no_ktp', models.CharField(max_length=32, unique=True, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('phone', models.CharField(blank=True, max_length=32, null=True, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('address', models.CharField(max_length=255)), ('birthdate', models.DateField()), ('status', models.CharField(choices=[('FAKIR', 'Fakir'), ('MISKIN', 'Miskin'), ('AMIL', 'Amil'), ('MUALAF', 'Mualaf'), ('GHARIM', 'Gharim'), ('FISABILILLAH', 'Fisabilillah'), ('MUSAFIR', 'Musafir')], max_length=32)), ('gender', models.CharField(choices=[('L', 'Laki-Laki'), ('P', 'Perempuan')], max_length=1)), ('photo', models.FileField(default='images/default_photo.jpg', upload_to='images/mustahik')), ('data_source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mustahik.DataSource')), ], ), migrations.CreateModel( name='DataSourceWarga', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pic_name', models.CharField(max_length=150)), ('pic_ktp', models.CharField(max_length=32, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('pic_phone', models.CharField(max_length=32, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('pic_position', models.CharField(max_length=50)), ('province', models.CharField(max_length=50)), ('regency', models.CharField(max_length=50)), ('sub_district', models.CharField(max_length=50)), ('village', models.CharField(max_length=50)), ('rt', models.CharField(max_length=3, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('rw', models.CharField(max_length=3, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('data_source', models.OneToOneField(limit_choices_to={'category': 'WARGA'}, on_delete=django.db.models.deletion.CASCADE, to='mustahik.DataSource')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='DataSourcePekerja', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pic_name', models.CharField(max_length=150)), ('pic_ktp', models.CharField(max_length=32, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('pic_phone', models.CharField(max_length=32, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('pic_position', models.CharField(max_length=50)), ('profession', models.CharField(max_length=50)), ('location', models.CharField(max_length=50)), ('data_source', models.OneToOneField(limit_choices_to={'category': 'PEKERJA'}, on_delete=django.db.models.deletion.CASCADE, to='mustahik.DataSource')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='DataSourceInstitusi', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pic_name', models.CharField(max_length=150)), ('pic_ktp', models.CharField(max_length=32, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('pic_phone', models.CharField(max_length=32, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('pic_position', models.CharField(max_length=50)), ('name', models.CharField(max_length=150)), ('province', models.CharField(max_length=50)), ('regency', models.CharField(max_length=50)), ('sub_district', models.CharField(max_length=50)), ('village', models.CharField(max_length=50)), ('rt', models.CharField(max_length=3, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('rw', models.CharField(max_length=3, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Numeric character only.')])), ('address', models.CharField(max_length=255)), ('data_source', models.OneToOneField(limit_choices_to={'category': 'INSTITUSI'}, on_delete=django.db.models.deletion.CASCADE, to='mustahik.DataSource')), ], options={ 'abstract': False, }, ), ]
62.164948
232
0.594527
604
6,030
5.801325
0.182119
0.149829
0.159247
0.212329
0.767694
0.752854
0.726884
0.726884
0.683219
0.683219
0
0.023984
0.232504
6,030
96
233
62.8125
0.733146
0.007463
0
0.662921
1
0
0.18586
0.004011
0
0
0
0
0
1
0
false
0
0.033708
0
0.078652
0
0
0
0
null
0
0
1
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
239f20367d13490d6b0986f21ea6590da99fe967
76,386
py
Python
pair_fast_forecast_multiGPU_val/pairwise_fusion_kd/utils/model.py
Chezacar/CollaborationWithLatency
da06abea16f1ffcafc35d27cb69ae3116a345965
[ "MIT" ]
null
null
null
pair_fast_forecast_multiGPU_val/pairwise_fusion_kd/utils/model.py
Chezacar/CollaborationWithLatency
da06abea16f1ffcafc35d27cb69ae3116a345965
[ "MIT" ]
null
null
null
pair_fast_forecast_multiGPU_val/pairwise_fusion_kd/utils/model.py
Chezacar/CollaborationWithLatency
da06abea16f1ffcafc35d27cb69ae3116a345965
[ "MIT" ]
null
null
null
# Copyright (c) 2020 Mitsubishi Electric Research Laboratories (MERL). All rights reserved. The software, documentation and/or data in this file is provided on an "as is" basis, and MERL has no obligations to provide maintenance, support, updates, enhancements or modifications. MERL specifically disclaims any warranties, including, but not limited to, the implied warranties of merchantability and fitness for any particular purpose. In no event shall MERL be liable to any party for direct, indirect, special, incidental, or consequential damages, including lost profits, arising out of the use of this software and its documentation, even if MERL has been advised of the possibility of such damages. As more fully described in the license agreement that was required in order to download this software, documentation and/or data, permission to use, copy and modify this software without fee is granted, but only for educational, research and non-commercial purposes. from multiprocessing import RawArray import os from os import XATTR_SIZE_MAX from warnings import formatwarning from numpy.core import shape_base import torch.nn.functional as F import torch.nn as nn import torch import ipdb import math import time import numpy as np from PIL import Image import matplotlib.pyplot as plt import seaborn as sns from torch.nn.modules import padding from torch.nn.modules.rnn import LSTMCell class Predict_Conv(nn.Module): def __init__(self, input_size=256, height_feat_size=64, forecast_num = 3): super(Predict_Conv, self).__init__() self.conv1 = nn.Conv2d(3*height_feat_size, 4*height_feat_size, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(4*height_feat_size, 4*height_feat_size, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(4*height_feat_size, 2*height_feat_size, kernel_size=3, stride=1, padding=1) self.linear1 = nn.Linear(2*height_feat_size, 1*height_feat_size) self.bn1 = nn.BatchNorm2d(4*height_feat_size) self.bn2 = nn.BatchNorm2d(4*height_feat_size) self.bn3 = nn.BatchNorm2d(2*height_feat_size) self.bn4 = nn.BatchNorm2d(1*height_feat_size) def forward(self, x, m): x_m = torch.cat([x,m.squeeze(0)], 0) x_m = x_m.unsqueeze(0) x_m = F.relu(self.bn1(self.conv1(x_m))) x_m = F.relu(self.bn2(self.conv2(x_m))) x_m = F.relu(self.bn3(self.conv3(x_m))) x_m = F.relu(self.bn4(self.linear1(x_m.permute(0,2,3,1)).permute(0,3,1,2))) return x_m class MotionRNN(nn.Module): def __init__(self, channel_size = 256, motion_category_num=2, height_feat_size=64, forecast_num = 3): super(MotionRNN, self).__init__() self.height_feat_size = height_feat_size self.forecast_num = forecast_num self.ratio = int(math.sqrt(channel_size / height_feat_size)) self.channel_size = 256 self.motionconv = STPN_MotionNet(height_feat_size = height_feat_size,forecast_num = forecast_num) self.feature_prediction = Predict_Conv(height_feat_size = height_feat_size) self.conv_pre_1 = nn.Conv2d(self.channel_size, self.ratio * self.height_feat_size, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(self.ratio * self.height_feat_size, self.height_feat_size, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(self.ratio * self.height_feat_size) self.bn_pre_2 = nn.BatchNorm2d(self.height_feat_size) self.conv_after_1 = nn.Conv2d(self.height_feat_size, self.ratio * self.height_feat_size, kernel_size=3, stride=1, padding=1) self.conv_after_2 = nn.Conv2d(self.ratio * self.height_feat_size, self.channel_size, kernel_size=3, stride=1, padding=1) self.bn_after_1 = nn.BatchNorm2d(self.ratio * self.height_feat_size) self.bn_after_2 = nn.BatchNorm2d(self.channel_size) # self.feature_prediction = Predict_Conv(height_feat_size = height_feat_size, output_size = height_feat_size) def forward(self, x ,delta_t): device = x.device x = F.relu(self.bn_pre_1(self.conv_pre_1(x))) x = F.relu(self.bn_pre_2(self.conv_pre_2(x))) a,b,c = x[0].shape # input_d = torch.zeros((int(delta_t), 1, self.forecast_num, a, b, c)).to(device) h_d = torch.zeros((int(delta_t) + self.forecast_num,a,b,c)).to(device) h_d[0:self.forecast_num] = x.clone() for i in range(int(delta_t)): # a = h_d[i:i+self.forecast_num] m = self.motionconv(h_d[i:i+self.forecast_num].unsqueeze(0).clone()) h_d[i + self.forecast_num] = self.feature_prediction(h_d[i + self.forecast_num - 1].clone(), m) h = F.relu(self.bn_after_1(self.conv_after_1(h_d[-1].unsqueeze(0)))) h = F.relu(self.bn_after_2(self.conv_after_2(h))) del h_d return h class Motion_Prediction_LSTM(nn.Module): def __init__(self, channel_size = 256, spatial_size = 32, compressed_size = 256, motion_category_num=2, delta_t = 5, forecast_num = 3): super(Motion_Prediction_LSTM, self).__init__() self.ratio = int(math.sqrt(channel_size / compressed_size)) self.delta_t = delta_t self.forecast_num = forecast_num self.spatial_size = spatial_size self.compressed_size = compressed_size self.channel_size = channel_size self.conv_pre_1 = nn.Conv2d(self.channel_size, self.ratio * self.compressed_size, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(self.ratio * self.compressed_size, self.compressed_size, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(self.ratio * self.compressed_size) self.bn_pre_2 = nn.BatchNorm2d(self.compressed_size) # self.conv_pre_3 = nn.Conv2d(16, self.compressed_size, kernel_size=3, stride=1, padding=1) self.conv_after_1 = nn.Conv2d(self.compressed_size, self.ratio * self.compressed_size, kernel_size=3, stride=1, padding=1) self.conv_after_2 = nn.Conv2d(self.ratio * self.compressed_size, self.channel_size, kernel_size=3, stride=1, padding=1) self.bn_after_1 = nn.BatchNorm2d(self.ratio * self.compressed_size) self.bn_after_2 = nn.BatchNorm2d(self.channel_size) self.lstmcell = MotionLSTM(32, self.compressed_size) self.time_weight = ModulatedTime(input_channel = 512) def forward(self, x, delta_t): self.delta_t = delta_t # Cell Classification head # x_shape = [self.forecast_num _32 _32_256] # x = F.relu(self.bn_pre_1(self.conv_pre_1(x))) # x = F.relu(self.bn_pre_2(self.conv_pre_2(x))) h = x[-1] c = torch.zeros((x[0].shape)).to(x.device) for i in range(self.forecast_num): h,c = self.lstmcell(x[i], (h,c)) # cell_class_pred = self.cell_classify(stpn_out) for t in range(int(self.delta_t)): h,c = self.lstmcell(h, (h,c)) # Motion State Classification head # state_class_pred = self.state_classify(stpn_out) w = self.time_weight(torch.cat([x[-1].unsqueeze(0), h],1), delta_t) w = 0.1 * int(delta_t - 1) * w w = torch.tanh(w) x = w * h + (1-w) * x[-1] # x = h # x = F.relu(self.bn_after_1(self.conv_after_1(x))) # x = F.relu(self.bn_after_2(self.conv_after_2(x))) # Motion Displacement prediction # disp = self.motion_pred(stpn_out) # disp = disp.view(-1, 2, stpn_out.size(-2), stpn_out.size(-1)) # return disp, cell_class_pred, state_class_pred return x class MotionLSTM(nn.Module): def __init__(self, spatial_size, input_channel_size, hidden_size = 0): super().__init__() self.input_channel_size = input_channel_size # channel size self.hidden_size = hidden_size self.spatial_size = spatial_size #i_t # self.U_i = nn.Parameter(torch.Tensor(input_channel_size, hidden_size)) # self.V_i = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) self.U_i = STPN_MotionLSTM(height_feat_size = self.input_channel_size) self.V_i = STPN_MotionLSTM(height_feat_size = self.input_channel_size) self.b_i = nn.Parameter(torch.Tensor(1, self.input_channel_size, self.spatial_size, self.spatial_size)) # #f_t # self.U_f = nn.Parameter(torch.Tensor(input_channel_size, hidden_size)) # self.V_f = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) # self.b_f = nn.Parameter(torch.Tensor(hidden_size)) self.U_f = STPN_MotionLSTM(height_feat_size = self.input_channel_size) self.V_f = STPN_MotionLSTM(height_feat_size = self.input_channel_size) self.b_f = nn.Parameter(torch.Tensor(1, self.input_channel_size, self.spatial_size, self.spatial_size)) # #c_t # self.U_c = nn.Parameter(torch.Tensor(input_channel_size, hidden_size)) # self.V_c = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) # self.b_c = nn.Parameter(torch.Tensor(hidden_size)) self.U_c = STPN_MotionLSTM(height_feat_size = self.input_channel_size) self.V_c = STPN_MotionLSTM(height_feat_size = self.input_channel_size) self.b_c = nn.Parameter(torch.Tensor(1, self.input_channel_size, self.spatial_size, self.spatial_size)) # #o_t # self.U_o = nn.Parameter(torch.Tensor(input_channel_size, hidden_size)) # self.V_o = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) # self.b_o = nn.Parameter(torch.Tensor(hidden_size)) self.U_o = STPN_MotionLSTM(height_feat_size = self.input_channel_size) self.V_o = STPN_MotionLSTM(height_feat_size = self.input_channel_size) self.b_o = nn.Parameter(torch.Tensor(1, self.input_channel_size, self.spatial_size, self.spatial_size)) # self.init_weights() # def init_weights(self): # stdv = 1.0 / math.sqrt(self.hidden_size) # for weight in self.parameters(): # weight.data.uniform_(-stdv, stdv) def forward(self,x,init_states=None): """ assumes x.shape represents (batch_size, sequence_size, input_channel_size) """ h, c = init_states i = torch.sigmoid(self.U_i(x) + self.V_i(h) + self.b_i) f = torch.sigmoid(self.U_f(x) + self.V_f(h) + self.b_f) g = torch.tanh(self.U_c(x) + self.V_c(h) + self.b_c) o = torch.sigmoid(self.U_o(x) + self.V_o(x) + self.b_o) c_out = f * c + i * g h_out = o * torch.tanh(c_out) # hidden_seq.append(h_t.unsqueeze(0)) # #reshape hidden_seq p/ retornar # hidden_seq = torch.cat(hidden_seq, dim=0) # hidden_seq = hidden_seq.transpose(0, 1).contiguous() return (h_out, c_out) class STPN_MotionLSTM(nn.Module): def __init__(self, height_feat_size = 16): super(STPN_MotionLSTM, self).__init__() # self.conv3d_1 = Conv3D(4, 8, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = Conv3D(8, 8, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_1 = Conv3D(64, 64, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = Conv3D(128, 128, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv1_1 = nn.Conv2d(height_feat_size, 2*height_feat_size, kernel_size=3, stride=2, padding=1) self.conv1_2 = nn.Conv2d(2*height_feat_size, 2*height_feat_size, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(2*height_feat_size, 4*height_feat_size, kernel_size=3, stride=2, padding=1) self.conv2_2 = nn.Conv2d(4*height_feat_size, 4*height_feat_size, kernel_size=3, stride=1, padding=1) self.conv7_1 = nn.Conv2d(6*height_feat_size, 2*height_feat_size, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(2*height_feat_size, 2*height_feat_size, kernel_size=3, stride=1, padding=1) self.conv8_1 = nn.Conv2d(3*height_feat_size , height_feat_size, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(height_feat_size, height_feat_size, kernel_size=3, stride=1, padding=1) self.bn1_1 = nn.BatchNorm2d(2*height_feat_size) self.bn1_2 = nn.BatchNorm2d(2*height_feat_size) self.bn2_1 = nn.BatchNorm2d(4*height_feat_size) self.bn2_2 = nn.BatchNorm2d(4*height_feat_size) self.bn7_1 = nn.BatchNorm2d(2*height_feat_size) self.bn7_2 = nn.BatchNorm2d(2*height_feat_size) self.bn8_1 = nn.BatchNorm2d(1*height_feat_size) self.bn8_2 = nn.BatchNorm2d(1*height_feat_size) def forward(self, x): # z, h, w = x.size() batch = 1 # bathc 4 32 32 x = x.view(-1, x.size(-3), x.size(-2), x.size(-1)) # -------------------------------- Encoder Path -------------------------------- # -- STC block 1 x_1 = F.relu(self.bn1_1(self.conv1_1(x))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)).contiguous() # (batch, seq, c, h, w) # x_1 = self.conv3d_1(x_1) x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() # (batch * seq, c, h, w) # -- STC block 2 x_2 = F.relu(self.bn2_1(self.conv2_1(x_1))) x_2 = F.relu(self.bn2_2(self.conv2_2(x_2))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)).contiguous() # (batch, seq, c, h, w) # x_2 = self.conv3d_2(x_2) x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() # (batch * seq, c, h, w), seq = 1 # -- STC block 3 # x_3 = F.relu(self.bn3_1(self.conv3_1(x_2))) # x_3 = F.relu(self.bn3_2(self.conv3_2(x_3))) # -- STC block 4 # x_4 = F.relu(self.bn4_1(self.conv4_1(x_3))) # x_4 = F.relu(self.bn4_2(self.conv4_2(x_4))) # x_4 = x_4.view(batch, -1, x_4.size(1), x_4.size(2), x_4.size(3)) # x_4 = x_4.permute(0, 2, 1, 3, 4).contiguous() # x_4 = F.adaptive_max_pool3d(x_4, (1, None, None)) # x_4 = x_4.permute(0, 2, 1, 3, 4).contiguous() # x_4 = x_4.view(-1, x_4.size(2), x_4.size(3), x_4.size(4)).contiguous() # -------------------------------- Decoder Path -------------------------------- # x_3 = x_3.view(batch, -1, x_3.size(1), x_3.size(2), x_3.size(3)) # x_3 = x_3.permute(0, 2, 1, 3, 4).contiguous() # x_3 = F.adaptive_max_pool3d(x_3, (1, None, None)) # x_3 = x_3.permute(0, 2, 1, 3, 4).contiguous() # x_3 = x_3.view(-1, x_3.size(2), x_3.size(3), x_3.size(4)).contiguous() # x_5 = F.relu(self.bn5_1(self.conv5_1(torch.cat((F.interpolate(x_4, scale_factor=(2, 2)), x_3), dim=1)))) # x_5 = F.relu(self.bn5_2(self.conv5_2(x_5))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = F.adaptive_max_pool3d(x_2, (1, None, None)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() # x_6 = F.relu(self.bn6_1(self.conv6_1(torch.cat((F.interpolate(x_5, scale_factor=(2, 2)), x_2), dim=1)))) # x_6 = F.relu(self.bn6_2(self.conv6_2(x_6))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = F.adaptive_max_pool3d(x_1, (1, None, None)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() x_7 = F.relu(self.bn7_1(self.conv7_1(torch.cat((F.interpolate(x_2, scale_factor=(2, 2)), x_1), dim=1)))) x_7 = F.relu(self.bn7_2(self.conv7_2(x_7))) x = x.view(batch, -1, x.size(1), x.size(2), x.size(3)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = F.adaptive_max_pool3d(x, (1, None, None)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.view(-1, x.size(2), x.size(3), x.size(4)).contiguous() x_8 = F.relu(self.bn8_1(self.conv8_1(torch.cat((F.interpolate(x_7, scale_factor=(2, 2)), x), dim=1)))) res_x = F.relu(self.bn8_2(self.conv8_2(x_8))) return res_x class forecast_lstm(nn.Module): def __init__(self): super(forecast_lstm, self).__init__() self.embedding_dim = 8192 self.hidden_size = 8192 # self.proj_size = 32768 self.lstm_layer = nn.LSTMCell(input_channel_size=self.embedding_dim,hidden_size=self.hidden_size) # self.conv3d_1 = nn.Conv3D(64, 64, kernel_size=(3, 3, 3), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = nn.Conv3D(128, 128, kernel_size=(3, 3, 3), stride=1, padding=(0, 0, 0)) self.linear_1 = nn.Linear(256, 32) self.linear_2 = nn.Linear(32,8) self.linear_3 = nn.Linear(8, 32) self.linear_4 = nn.Linear(32,256) # self.linear_5 = nn.Linear(4,16) # self.linear_6 = nn.Linear(16,4) def forward(self, x_raw, delta_t): x = x_raw.permute(0, 2, 3, 1) x = self.linear_1(x) x = F.relu(x) x = self.linear_2(x) x = F.relu(x) # x = self.linear_6(x) # x = F.relu(x) shape_a, shape_b, shape_c, shape_d = x.shape x = x.reshape(shape_a, 1, shape_b * shape_c * shape_d) for i in range(shape_a): x_temp = x[i] if i != 0: (h_temp, c_temp) = self.lstm_layer(x_temp, (h_temp, c_temp)) else: (h_temp, c_temp) = self.lstm_layer(x_temp) # if delta_t < self.num_layers: # x = x[1][int(delta_t)] # else: # x = x[1][self.num_layers - 1] for j in range(int(delta_t)): x_temp = c_temp (h_temp, c_temp) = self.lstm_layer(x_temp, (h_temp, c_temp)) x = h_temp x = x.reshape(32,32,8) # x = self.linear_5(x) # x = F.relu(x) x = self.linear_3(x) x = F.relu(x) x = self.linear_4(x) x = F.relu(x) x = x.permute(2, 0, 1) x = torch.unsqueeze(x, 0) return x class CellClassification(nn.Module): def __init__(self, category_num=5): super(CellClassification, self).__init__() self.conv1 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(32, category_num, kernel_size=1, stride=1, padding=0) self.bn1 = nn.BatchNorm2d(32) def forward(self, x): x = F.relu(self.bn1(self.conv1(x))) x = self.conv2(x) return x class StateEstimation(nn.Module): def __init__(self, motion_category_num=2): super(StateEstimation, self).__init__() self.conv1 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(32, motion_category_num, kernel_size=1, stride=1, padding=0) self.bn1 = nn.BatchNorm2d(32) def forward(self, x): x = F.relu(self.bn1(self.conv1(x))) x = self.conv2(x) return x class CatTime(nn.Module): def __init__(self): super(CatTime, self).__init__() self.linear1 = nn.Linear(1,32) self.linear2 = nn.Linear(32,1024) self.conv1 = nn.Conv2d(1, 8, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(8, 32, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(32, 128, kernel_size=1, stride=1, padding=0) self.conv4 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1) # self.bn1 = nn.BatchNorm2d(512) def forward(self, x, delta_t): count = 0 a, b, c, d = x.size() x_te = torch.ones(a,1,c,d).to(x.device) x_t = torch.ones(x.size()) for i in range(len(delta_t)): for j in range(len(delta_t[0])): if delta_t[i][j] != 0: t = delta_t[i][j] * torch.ones((1,1)).to(x.device) # print(t[0]) t = F.relu(self.linear1(t)) t = F.relu(self.linear2(t)) t = t.reshape(32,32) x_te[count][0] = t # t = F.relu(self.conv1(t)) # t = F.relu(self.conv2(t)) # t = F.relu(self.conv3(t)) x_te = F.relu(self.conv1(x_te)) x_te = F.relu(self.conv2(x_te)) x_te = F.relu(self.conv3(x_te)) x = F.relu(self.conv4(x)) x_t = torch.cat((x,x_te),dim = 1) return x_t # class CatTime(nn.Module): # def __init__(self): # super(CatTime, self).__init__() # # self.conv1 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) # # self.conv2 = nn.Conv2d(32, 2 * seq_len, kernel_size=1, stride=1, padding=0) # # self.bn1 = nn.BatchNorm2d(512) # def forward(self, x, delta_t): # a,b,c,d = x.size() # y = torch.ones((a,1,c,d)).to(x.device) # count = 0 # for i in range(len(delta_t)): # for j in range(len(delta_t[0])): # if delta_t[i][j] != 0: # y[count] = delta_t[i][j] * y[count] # count += 1 # x = torch.cat((x,y),dim = 1) # return x class ModulatedTime(nn.Module): def __init__(self, input_channel = 128): super(ModulatedTime, self).__init__() self.input_channel = input_channel self.conv1_channel = int(self.input_channel / 2) self.conv2_channel = int(self.conv1_channel / 2) # self.ratio = math.sqrt() self.conv1 = nn.Conv2d(self.input_channel, self.conv1_channel, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(self.conv1_channel, self.conv2_channel, kernel_size=3, stride=1, padding=1) self.convl1 = nn.Conv2d(1, 8, kernel_size=3, stride=1, padding=1) self.convl2 = nn.Conv2d(8,self.conv2_channel, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(int(2 * self.conv2_channel), 8, kernel_size = 3, stride=1, padding = 1) # self.linear3 = nn.Linear(16,8) self.convl4 = nn.Conv2d(8, 1, kernel_size=3, stride=1, padding=1) # self.bnl1 = nn.BatchNorm2d(8) # self.bnl2 = nn.BatchNorm2d(self.conv2_channel) # self.bnc1 = nn.BatchNorm2d(self.conv1_channel) # self.bnc2 = nn.BatchNorm2d(self.conv2_channel) # self.bnc3 = nn.BatchNorm2d(8) # self.bn4 = nn.BatchNorm2d(1) def forward(self, x, delta_t): a,b,c,d = x.size() y = torch.ones((1,1,c,d)).to(x.device) t_y = F.relu(self.convl1(y)) t_y = F.relu(self.convl2(t_y)) t_x = F.relu(self.conv1(x)) t_x = F.relu(self.conv2(t_x)) t_xy = torch.cat([t_x, t_y], 1) t_xy = F.relu(self.conv3(t_xy)) # t_y = F.relu(self.bn3(self.linear3(y))) t_xy = torch.sigmoid(self.convl4(t_xy)) return t_xy class MotionPrediction(nn.Module): def __init__(self, seq_len = 256, forecast_num = 1): super(MotionPrediction, self).__init__() self.conv1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) # self.conv3 = nn.Conv2d(512, seq_len, kernel_size=3, stride=1, padding=1) self.linear1 = nn.Linear(512,seq_len) self.bn1 = nn.BatchNorm2d(512) self.bn2 = nn.BatchNorm2d(512) self.bn3 = nn.BatchNorm2d(seq_len) def forward(self, x): x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x = x.permute(0,2,3,1) x = self.linear1(x) x = x.permute(0,3,1,2) x = F.relu(self.bn3(x)) # x = self.conv2(x) return x class Conv3D(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride, padding): super(Conv3D, self).__init__() self.conv3d = nn.Conv3d(in_channel, out_channel, kernel_size=kernel_size, stride=stride, padding=padding) self.bn3d = nn.BatchNorm3d(out_channel) def forward(self, x): # input x: (batch, seq, c, h, w) x = x.permute(0, 2, 1, 3, 4).contiguous() # (batch, c, seq_len, h, w) x = F.relu(self.bn3d(self.conv3d(x))) x = x.permute(0, 2, 1, 3, 4).contiguous() # (batch, seq_len, c, h, w) return x class MapExtractor(nn.Module): def __init__(self, map_channel=8): super(MapExtractor, self).__init__() self.conv_pre_1 = nn.Conv2d(map_channel, 16, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(16) self.bn_pre_2 = nn.BatchNorm2d(16) self.conv1_1 = nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1) self.conv1_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1) self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) # self.conv3_1 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) # self.conv3_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) # self.conv4_1 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) # self.conv4_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) # self.conv5_1 = nn.Conv2d(256 + 128, 128, kernel_size=3, stride=1, padding=1) # self.conv5_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) # self.conv6_1 = nn.Conv2d(128 + 64, 64, kernel_size=3, stride=1, padding=1) # self.conv6_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv7_1 = nn.Conv2d(64 + 32, 32, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.conv8_1 = nn.Conv2d(32 + 16, 16, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1) self.bn1_1 = nn.BatchNorm2d(32) self.bn1_2 = nn.BatchNorm2d(32) self.bn2_1 = nn.BatchNorm2d(64) self.bn2_2 = nn.BatchNorm2d(64) self.bn3_1 = nn.BatchNorm2d(128) self.bn3_2 = nn.BatchNorm2d(128) self.bn4_1 = nn.BatchNorm2d(256) self.bn4_2 = nn.BatchNorm2d(256) self.bn5_1 = nn.BatchNorm2d(128) self.bn5_2 = nn.BatchNorm2d(128) self.bn6_1 = nn.BatchNorm2d(64) self.bn6_2 = nn.BatchNorm2d(64) self.bn7_1 = nn.BatchNorm2d(32) self.bn7_2 = nn.BatchNorm2d(32) self.bn8_1 = nn.BatchNorm2d(16) self.bn8_2 = nn.BatchNorm2d(16) def forward(self, x): x = x.view(-1, x.size(-3), x.size(-2), x.size(-1)) x = F.relu(self.bn_pre_1(self.conv_pre_1(x))) x = F.relu(self.bn_pre_2(self.conv_pre_2(x))) # -------------------------------- Encoder Path -------------------------------- # -- STC block 1 x_1 = F.relu(self.bn1_1(self.conv1_1(x))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) # -- STC block 2 x_2 = F.relu(self.bn2_1(self.conv2_1(x_1))) x_2 = F.relu(self.bn2_2(self.conv2_2(x_2))) # # -- STC block 3 # x_3 = F.relu(self.bn3_1(self.conv3_1(x_2))) # x_3 = F.relu(self.bn3_2(self.conv3_2(x_3))) # # -- STC block 4 # x_4 = F.relu(self.bn4_1(self.conv4_1(x_3))) # x_4 = F.relu(self.bn4_2(self.conv4_2(x_4))) # -------------------------------- Decoder Path -------------------------------- # x_5 = F.relu(self.bn5_1(self.conv5_1(torch.cat((F.interpolate(x_4, scale_factor=(2, 2)), x_3), dim=1)))) # x_5 = F.relu(self.bn5_2(self.conv5_2(x_5))) # x_6 = F.relu(self.bn6_1(self.conv6_1(torch.cat((F.interpolate(x_5, scale_factor=(2, 2)), x_2), dim=1)))) # x_6 = F.relu(self.bn6_2(self.conv6_2(x_6))) x_7 = F.relu(self.bn7_1(self.conv7_1(torch.cat((F.interpolate(x_2, scale_factor=(2, 2)), x_1), dim=1)))) x_7 = F.relu(self.bn7_2(self.conv7_2(x_7))) x_8 = F.relu(self.bn8_1(self.conv8_1(torch.cat((F.interpolate(x_1, scale_factor=(2, 2)), x), dim=1)))) res_x = F.relu(self.bn8_2(self.conv8_2(x_8))) return res_x class STPN(nn.Module): def __init__(self, height_feat_size=13): super(STPN, self).__init__() self.conv_pre_1 = nn.Conv2d(height_feat_size, 32, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(32) self.bn_pre_2 = nn.BatchNorm2d(32) # self.conv3d_1 = Conv3D(64, 64, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = Conv3D(128, 128, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_1 = Conv3D(64, 64, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_2 = Conv3D(128, 128, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv1_1 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512 + 256, 256, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv6_1 = nn.Conv2d(256 + 128, 128, kernel_size=3, stride=1, padding=1) self.conv6_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv7_1 = nn.Conv2d(128 + 64, 64, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv8_1 = nn.Conv2d(64 + 32, 32, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn1_1 = nn.BatchNorm2d(64) self.bn1_2 = nn.BatchNorm2d(64) self.bn2_1 = nn.BatchNorm2d(128) self.bn2_2 = nn.BatchNorm2d(128) self.bn3_1 = nn.BatchNorm2d(256) self.bn3_2 = nn.BatchNorm2d(256) self.bn4_1 = nn.BatchNorm2d(512) self.bn4_2 = nn.BatchNorm2d(512) self.bn5_1 = nn.BatchNorm2d(256) self.bn5_2 = nn.BatchNorm2d(256) self.bn6_1 = nn.BatchNorm2d(128) self.bn6_2 = nn.BatchNorm2d(128) self.bn7_1 = nn.BatchNorm2d(64) self.bn7_2 = nn.BatchNorm2d(64) self.bn8_1 = nn.BatchNorm2d(32) self.bn8_2 = nn.BatchNorm2d(32) def forward(self, x): batch, seq, z, h, w = x.size() x = x.view(-1, x.size(-3), x.size(-2), x.size(-1)) x = F.relu(self.bn_pre_1(self.conv_pre_1(x))) x = F.relu(self.bn_pre_2(self.conv_pre_2(x))) # -------------------------------- Encoder Path -------------------------------- # -- STC block 1 x_1 = F.relu(self.bn1_1(self.conv1_1(x))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)).contiguous() # (batch, seq, c, h, w) x_1 = self.conv3d_1(x_1) x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() # (batch * seq, c, h, w) # -- STC block 2 x_2 = F.relu(self.bn2_1(self.conv2_1(x_1))) x_2 = F.relu(self.bn2_2(self.conv2_2(x_2))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)).contiguous() # (batch, seq, c, h, w) x_2 = self.conv3d_2(x_2) x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() # (batch * seq, c, h, w), seq = 1 # -- STC block 3 x_3 = F.relu(self.bn3_1(self.conv3_1(x_2))) x_3 = F.relu(self.bn3_2(self.conv3_2(x_3))) # -- STC block 4 x_4 = F.relu(self.bn4_1(self.conv4_1(x_3))) x_4 = F.relu(self.bn4_2(self.conv4_2(x_4))) # -------------------------------- Decoder Path -------------------------------- x_5 = F.relu(self.bn5_1(self.conv5_1(torch.cat((F.interpolate(x_4, scale_factor=(2, 2)), x_3), dim=1)))) x_5 = F.relu(self.bn5_2(self.conv5_2(x_5))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = F.adaptive_max_pool3d(x_2, (1, None, None)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() x_6 = F.relu(self.bn6_1(self.conv6_1(torch.cat((F.interpolate(x_5, scale_factor=(2, 2)), x_2), dim=1)))) x_6 = F.relu(self.bn6_2(self.conv6_2(x_6))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = F.adaptive_max_pool3d(x_1, (1, None, None)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() x_7 = F.relu(self.bn7_1(self.conv7_1(torch.cat((F.interpolate(x_6, scale_factor=(2, 2)), x_1), dim=1)))) x_7 = F.relu(self.bn7_2(self.conv7_2(x_7))) x = x.view(batch, -1, x.size(1), x.size(2), x.size(3)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = F.adaptive_max_pool3d(x, (1, None, None)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.view(-1, x.size(2), x.size(3), x.size(4)).contiguous() x_8 = F.relu(self.bn8_1(self.conv8_1(torch.cat((F.interpolate(x_7, scale_factor=(2, 2)), x), dim=1)))) res_x = F.relu(self.bn8_2(self.conv8_2(x_8))) return res_x class STPN_MotionNet(nn.Module): def __init__(self, height_feat_size=256, forecast_num=3): super(STPN_MotionNet, self).__init__() self.conv_pre_1 = nn.Conv2d(height_feat_size, height_feat_size * 2, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(height_feat_size * 2, height_feat_size * 2, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(height_feat_size * 2) self.bn_pre_2 = nn.BatchNorm2d(height_feat_size * 2) self.conv3d_1 = Conv3D(height_feat_size * 4, height_feat_size * 4, kernel_size=(forecast_num, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_2 = Conv3D(height_feat_size * 8, height_feat_size * 8, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_1 = Conv3D(64, 64, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = Conv3D(128, 128, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv1_1 = nn.Conv2d(height_feat_size * 2, height_feat_size * 4, kernel_size=3, stride=2, padding=1) self.conv1_2 = nn.Conv2d(height_feat_size * 4, height_feat_size * 4, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(height_feat_size * 4, height_feat_size * 8, kernel_size=3, stride=2, padding=1) self.conv2_2 = nn.Conv2d(height_feat_size * 8, height_feat_size * 8, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(int(height_feat_size / 2), height_feat_size * 1, kernel_size=3, stride=2, padding=1) self.conv3_2 = nn.Conv2d(height_feat_size * 1, height_feat_size * 1, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(height_feat_size * 1, height_feat_size * 2, kernel_size=3, stride=2, padding=1) self.conv4_2 = nn.Conv2d(height_feat_size * 2, height_feat_size * 2, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(height_feat_size * 3, height_feat_size * 1, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(height_feat_size * 1, height_feat_size * 1, kernel_size=3, stride=1, padding=1) self.conv6_1 = nn.Conv2d(int(height_feat_size * 3 / 2), int(height_feat_size / 2), kernel_size=3, stride=1, padding=1) self.conv6_2 = nn.Conv2d(int(height_feat_size / 2), int(height_feat_size / 2), kernel_size=3, stride=1, padding=1) self.conv7_1 = nn.Conv2d(height_feat_size * 12, height_feat_size * 4, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(height_feat_size * 4, height_feat_size * 4, kernel_size=3, stride=1, padding=1) self.conv8_1 = nn.Conv2d(height_feat_size * 6, height_feat_size * 2, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(height_feat_size * 2, height_feat_size , kernel_size=3, stride=1, padding=1) self.bn1_1 = nn.BatchNorm2d(height_feat_size * 4) self.bn1_2 = nn.BatchNorm2d(height_feat_size * 4) self.bn2_1 = nn.BatchNorm2d(height_feat_size * 8) self.bn2_2 = nn.BatchNorm2d(height_feat_size * 8) self.bn3_1 = nn.BatchNorm2d(height_feat_size * 1) self.bn3_2 = nn.BatchNorm2d(height_feat_size * 1) self.bn4_1 = nn.BatchNorm2d(height_feat_size * 2) self.bn4_2 = nn.BatchNorm2d(height_feat_size * 2) self.bn5_1 = nn.BatchNorm2d(height_feat_size * 1) self.bn5_2 = nn.BatchNorm2d(height_feat_size * 1) self.bn6_1 = nn.BatchNorm2d(int(height_feat_size / 2)) self.bn6_2 = nn.BatchNorm2d(int(height_feat_size / 2)) self.bn7_1 = nn.BatchNorm2d(height_feat_size * 4) self.bn7_2 = nn.BatchNorm2d(height_feat_size * 4) self.bn8_1 = nn.BatchNorm2d(height_feat_size * 2) self.bn8_2 = nn.BatchNorm2d(height_feat_size) def forward(self, x): batch, seq, z, h, w = x.size() x = x.view(-1, x.size(-3), x.size(-2), x.size(-1)) x = F.relu(self.bn_pre_1(self.conv_pre_1(x))) x = F.relu(self.bn_pre_2(self.conv_pre_2(x))) # -------------------------------- Encoder Path -------------------------------- # -- STC block 1 x_1 = F.relu(self.bn1_1(self.conv1_1(x))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)).contiguous() # (batch, seq, c, h, w) x_1 = self.conv3d_1(x_1) x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() # (batch * seq, c, h, w) # -- STC block 2 x_2 = F.relu(self.bn2_1(self.conv2_1(x_1))) x_2 = F.relu(self.bn2_2(self.conv2_2(x_2))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)).contiguous() # (batch, seq, c, h, w) x_2 = self.conv3d_2(x_2) x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() # (batch * seq, c, h, w), seq = 1 # -- STC block 3 # x_3 = F.relu(self.bn3_1(self.conv3_1(x_2))) # x_3 = F.relu(self.bn3_2(self.conv3_2(x_3))) # -- STC block 4 # x_4 = F.relu(self.bn4_1(self.conv4_1(x_3))) # x_4 = F.relu(self.bn4_2(self.conv4_2(x_4))) # x_4 = x_4.view(batch, -1, x_4.size(1), x_4.size(2), x_4.size(3)) # x_4 = x_4.permute(0, 2, 1, 3, 4).contiguous() # x_4 = F.adaptive_max_pool3d(x_4, (1, None, None)) # x_4 = x_4.permute(0, 2, 1, 3, 4).contiguous() # x_4 = x_4.view(-1, x_4.size(2), x_4.size(3), x_4.size(4)).contiguous() # -------------------------------- Decoder Path -------------------------------- # x_3 = x_3.view(batch, -1, x_3.size(1), x_3.size(2), x_3.size(3)) # x_3 = x_3.permute(0, 2, 1, 3, 4).contiguous() # x_3 = F.adaptive_max_pool3d(x_3, (1, None, None)) # x_3 = x_3.permute(0, 2, 1, 3, 4).contiguous() # x_3 = x_3.view(-1, x_3.size(2), x_3.size(3), x_3.size(4)).contiguous() # x_5 = F.relu(self.bn5_1(self.conv5_1(torch.cat((F.interpolate(x_4, scale_factor=(2, 2)), x_3), dim=1)))) # x_5 = F.relu(self.bn5_2(self.conv5_2(x_5))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = F.adaptive_max_pool3d(x_2, (1, None, None)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() # x_6 = F.relu(self.bn6_1(self.conv6_1(torch.cat((F.interpolate(x_5, scale_factor=(2, 2)), x_2), dim=1)))) # x_6 = F.relu(self.bn6_2(self.conv6_2(x_6))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = F.adaptive_max_pool3d(x_1, (1, None, None)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() x_7 = F.relu(self.bn7_1(self.conv7_1(torch.cat((F.interpolate(x_2, scale_factor=(2, 2)), x_1), dim=1)))) x_7 = F.relu(self.bn7_2(self.conv7_2(x_7))) x = x.view(batch, -1, x.size(1), x.size(2), x.size(3)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = F.adaptive_max_pool3d(x, (1, None, None)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.view(-1, x.size(2), x.size(3), x.size(4)).contiguous() x_8 = F.relu(self.bn8_1(self.conv8_1(torch.cat((F.interpolate(x_7, scale_factor=(2, 2)), x), dim=1)))) res_x = F.relu(self.bn8_2(self.conv8_2(x_8))) return res_x class STPN_KD(nn.Module): def __init__(self, height_feat_size=13): super(STPN_KD, self).__init__() self.conv_pre_1 = nn.Conv2d(height_feat_size, 32, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(32) self.bn_pre_2 = nn.BatchNorm2d(32) # self.conv3d_1 = Conv3D(64, 64, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = Conv3D(128, 128, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_1 = Conv3D(64, 64, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_2 = Conv3D(128, 128, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv1_1 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512 + 256, 256, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv6_1 = nn.Conv2d(256 + 128, 128, kernel_size=3, stride=1, padding=1) self.conv6_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv7_1 = nn.Conv2d(128 + 64, 64, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv8_1 = nn.Conv2d(64 + 32, 32, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn1_1 = nn.BatchNorm2d(64) self.bn1_2 = nn.BatchNorm2d(64) self.bn2_1 = nn.BatchNorm2d(128) self.bn2_2 = nn.BatchNorm2d(128) self.bn3_1 = nn.BatchNorm2d(256) self.bn3_2 = nn.BatchNorm2d(256) self.bn4_1 = nn.BatchNorm2d(512) self.bn4_2 = nn.BatchNorm2d(512) self.bn5_1 = nn.BatchNorm2d(256) self.bn5_2 = nn.BatchNorm2d(256) self.bn6_1 = nn.BatchNorm2d(128) self.bn6_2 = nn.BatchNorm2d(128) self.bn7_1 = nn.BatchNorm2d(64) self.bn7_2 = nn.BatchNorm2d(64) self.bn8_1 = nn.BatchNorm2d(32) self.bn8_2 = nn.BatchNorm2d(32) def forward(self, x): batch, seq, z, h, w = x.size() x = x.view(-1, x.size(-3), x.size(-2), x.size(-1)) x = F.relu(self.bn_pre_1(self.conv_pre_1(x))) x = F.relu(self.bn_pre_2(self.conv_pre_2(x))) # -------------------------------- Encoder Path -------------------------------- # -- STC block 1 x_1 = F.relu(self.bn1_1(self.conv1_1(x))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)).contiguous() # (batch, seq, c, h, w) x_1 = self.conv3d_1(x_1) x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() # (batch * seq, c, h, w) # -- STC block 2 x_2 = F.relu(self.bn2_1(self.conv2_1(x_1))) x_2 = F.relu(self.bn2_2(self.conv2_2(x_2))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)).contiguous() # (batch, seq, c, h, w) x_2 = self.conv3d_2(x_2) x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() # (batch * seq, c, h, w), seq = 1 # -- STC block 3 x_3 = F.relu(self.bn3_1(self.conv3_1(x_2))) x_3 = F.relu(self.bn3_2(self.conv3_2(x_3))) # -- STC block 4 x_4 = F.relu(self.bn4_1(self.conv4_1(x_3))) x_4 = F.relu(self.bn4_2(self.conv4_2(x_4))) # -------------------------------- Decoder Path -------------------------------- x_5 = F.relu(self.bn5_1(self.conv5_1(torch.cat((F.interpolate(x_4, scale_factor=(2, 2)), x_3), dim=1)))) x_5 = F.relu(self.bn5_2(self.conv5_2(x_5))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = F.adaptive_max_pool3d(x_2, (1, None, None)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() x_6 = F.relu(self.bn6_1(self.conv6_1(torch.cat((F.interpolate(x_5, scale_factor=(2, 2)), x_2), dim=1)))) x_6 = F.relu(self.bn6_2(self.conv6_2(x_6))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = F.adaptive_max_pool3d(x_1, (1, None, None)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() x_7 = F.relu(self.bn7_1(self.conv7_1(torch.cat((F.interpolate(x_6, scale_factor=(2, 2)), x_1), dim=1)))) x_7 = F.relu(self.bn7_2(self.conv7_2(x_7))) x = x.view(batch, -1, x.size(1), x.size(2), x.size(3)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = F.adaptive_max_pool3d(x, (1, None, None)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.view(-1, x.size(2), x.size(3), x.size(4)).contiguous() x_8 = F.relu(self.bn8_1(self.conv8_1(torch.cat((F.interpolate(x_7, scale_factor=(2, 2)), x), dim=1)))) res_x = F.relu(self.bn8_2(self.conv8_2(x_8))) return res_x, x_7, x_6, x_5, x_3 class MotionNet(nn.Module): def __init__(self, out_seq_len=256, motion_category_num=2, height_feat_size=256, forecast_num = 3): super(MotionNet, self).__init__() self.out_seq_len = out_seq_len self.forecast_num = forecast_num self.cell_classify = CellClassification() self.motion_pred = MotionPrediction(seq_len=self.out_seq_len) self.state_classify = StateEstimation(motion_category_num=motion_category_num) self.stpn = STPN_MotionNet(height_feat_size=height_feat_size,forecast_num = forecast_num) self.cattime = CatTime() # self.cattime = ModulatedTime( ) def forward(self, bevs, delta_t): # bevs = bevs.permute(0, 1, 2, 3, 4) # (Batch, seq, z, h, w) # Backbone network x = self.stpn(bevs) # Cell Classification head # cell_class_pred = self.cell_classify(x) # Motion State Classification head # state_class_pred = self.state_classify(x) # Motion Displacement prediction x = self.cattime(x,delta_t) disp = self.motion_pred(x) # disp = disp.view(-1, 2, x.size(-2), x.size(-1)) # return disp, cell_class_pred, state_class_pred return disp # For MGDA loss computation class FeatEncoder(nn.Module): def __init__(self, height_feat_size=13): super(FeatEncoder, self).__init__() self.stpn = STPN(height_feat_size=height_feat_size) def forward(self, bevs): bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w) x = self.stpn(bevs) return x class MotionNetMGDA(nn.Module): def __init__(self, out_seq_len=20, motion_category_num=2): super(MotionNetMGDA, self).__init__() self.out_seq_len = out_seq_len self.cell_classify = CellClassification() self.motion_pred = MotionPrediction(seq_len=self.out_seq_len) self.state_classify = StateEstimation(motion_category_num=motion_category_num) def forward(self, stpn_out): # Cell Classification head cell_class_pred = self.cell_classify(stpn_out) # Motion State Classification head state_class_pred = self.state_classify(stpn_out) # Motion Displacement prediction disp = self.motion_pred(stpn_out) disp = disp.view(-1, 2, stpn_out.size(-2), stpn_out.size(-1)) return disp, cell_class_pred, state_class_pred ''''''''''''''''''''' Added by Yiming ''''''''''''''''''''' class conv2DBatchNormRelu(nn.Module): def __init__( self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, is_batchnorm=True, ): super(conv2DBatchNormRelu, self).__init__() conv_mod = nn.Conv2d( int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=dilation, ) if is_batchnorm: self.cbr_unit = nn.Sequential( conv_mod, nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True) ) else: self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True)) def forward(self, inputs): outputs = self.cbr_unit(inputs) return outputs class Sparsemax(nn.Module): """Sparsemax function.""" def __init__(self, dim=None): """Initialize sparsemax activation Args: dim (int, optional): The dimension over which to apply the sparsemax function. """ super(Sparsemax, self).__init__() self.dim = -1 if dim is None else dim def forward(self, input): """Forward function. Args: input (torch.Tensor): Input tensor. First dimension should be the batch size Returns: torch.Tensor: [batch_size x number_of_logits] Output tensor """ # Sparsemax currently only handles 2-dim tensors, # so we reshape and reshape back after sparsemax original_size = input.size() input = input.view(-1, input.size(self.dim)) dim = 1 number_of_logits = input.size(dim) # Translate input by max for numerical stability input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as(input) # Sort input in descending order. # (NOTE: Can be replaced with linear time selection method described here: # http://stanford.edu/~jduchi/projects/DuchiShSiCh08.html) zs = torch.sort(input=input, dim=dim, descending=True)[0] range = torch.range(start=1, end=number_of_logits, device=input.device).view(1, -1) range = range.expand_as(zs) # Determine sparsity of projection bound = 1 + range * zs cumulative_sum_zs = torch.cumsum(zs, dim) is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type()) k = torch.max(is_gt * range, dim, keepdim=True)[0] # Compute threshold function zs_sparse = is_gt * zs # Compute taus taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k taus = taus.expand_as(input) # Sparsemax self.output = torch.max(torch.zeros_like(input), input - taus) output = self.output.view(original_size) return output def backward(self, grad_output): """Backward function.""" dim = 1 nonzeros = torch.ne(self.output, 0) sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros, dim=dim) self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output)) return self.grad_input class lidar_encoder(nn.Module): def __init__(self, height_feat_size=13): super(lidar_encoder, self).__init__() self.conv_pre_1 = nn.Conv2d(height_feat_size, 32, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(32) self.bn_pre_2 = nn.BatchNorm2d(32) # self.conv3d_1 = Conv3D(64, 64, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = Conv3D(128, 128, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_1 = Conv3D(64, 64, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_2 = Conv3D(128, 128, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv1_1 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512 + 256, 256, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv6_1 = nn.Conv2d(256 + 128, 128, kernel_size=3, stride=1, padding=1) self.conv6_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv7_1 = nn.Conv2d(128 + 64, 64, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv8_1 = nn.Conv2d(64 + 32, 32, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn1_1 = nn.BatchNorm2d(64) self.bn1_2 = nn.BatchNorm2d(64) self.bn2_1 = nn.BatchNorm2d(128) self.bn2_2 = nn.BatchNorm2d(128) self.bn3_1 = nn.BatchNorm2d(256) self.bn3_2 = nn.BatchNorm2d(256) self.bn4_1 = nn.BatchNorm2d(512) self.bn4_2 = nn.BatchNorm2d(512) self.bn5_1 = nn.BatchNorm2d(256) self.bn5_2 = nn.BatchNorm2d(256) self.bn6_1 = nn.BatchNorm2d(128) self.bn6_2 = nn.BatchNorm2d(128) self.bn7_1 = nn.BatchNorm2d(64) self.bn7_2 = nn.BatchNorm2d(64) self.bn8_1 = nn.BatchNorm2d(32) self.bn8_2 = nn.BatchNorm2d(32) def forward(self, x): batch, seq, z, h, w = x.size() x = x.view(-1, x.size(-3), x.size(-2), x.size(-1)) x = F.relu(self.bn_pre_1(self.conv_pre_1(x))) x = F.relu(self.bn_pre_2(self.conv_pre_2(x))) # -------------------------------- Encoder Path -------------------------------- # -- STC block 1 x_1 = F.relu(self.bn1_1(self.conv1_1(x))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)).contiguous() # (batch, seq, c, h, w) x_1 = self.conv3d_1(x_1) x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() # (batch * seq, c, h, w) # -- STC block 2 x_2 = F.relu(self.bn2_1(self.conv2_1(x_1))) x_2 = F.relu(self.bn2_2(self.conv2_2(x_2))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)).contiguous() # (batch, seq, c, h, w) x_2 = self.conv3d_2(x_2) x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() # (batch * seq, c, h, w), seq = 1 # -- STC block 3 x_3 = F.relu(self.bn3_1(self.conv3_1(x_2))) x_3 = F.relu(self.bn3_2(self.conv3_2(x_3))) # -- STC block 4 x_4 = F.relu(self.bn4_1(self.conv4_1(x_3))) x_4 = F.relu(self.bn4_2(self.conv4_2(x_4))) return x, x_1, x_2, x_3, x_4 class lidar_decoder(nn.Module): def __init__(self, height_feat_size=13): super(lidar_decoder, self).__init__() self.conv_pre_1 = nn.Conv2d(height_feat_size, 32, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(32) self.bn_pre_2 = nn.BatchNorm2d(32) # self.conv3d_1 = Conv3D(64, 64, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = Conv3D(128, 128, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_1 = Conv3D(64, 64, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_2 = Conv3D(128, 128, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv1_1 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512 + 256, 256, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv6_1 = nn.Conv2d(256 + 128, 128, kernel_size=3, stride=1, padding=1) self.conv6_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv7_1 = nn.Conv2d(128 + 64, 64, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv8_1 = nn.Conv2d(64 + 32, 32, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn1_1 = nn.BatchNorm2d(64) self.bn1_2 = nn.BatchNorm2d(64) self.bn2_1 = nn.BatchNorm2d(128) self.bn2_2 = nn.BatchNorm2d(128) self.bn3_1 = nn.BatchNorm2d(256) self.bn3_2 = nn.BatchNorm2d(256) self.bn4_1 = nn.BatchNorm2d(512) self.bn4_2 = nn.BatchNorm2d(512) self.bn5_1 = nn.BatchNorm2d(256) self.bn5_2 = nn.BatchNorm2d(256) self.bn6_1 = nn.BatchNorm2d(128) self.bn6_2 = nn.BatchNorm2d(128) self.bn7_1 = nn.BatchNorm2d(64) self.bn7_2 = nn.BatchNorm2d(64) self.bn8_1 = nn.BatchNorm2d(32) self.bn8_2 = nn.BatchNorm2d(32) def forward(self, x, x_1, x_2, x_3, x_4, batch): # -------------------------------- Decoder Path -------------------------------- x_5 = F.relu(self.bn5_1(self.conv5_1(torch.cat((F.interpolate(x_4, scale_factor=(2, 2)), x_3), dim=1)))) x_5 = F.relu(self.bn5_2(self.conv5_2(x_5))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() #x_2 = F.adaptive_max_pool3d(x_2, (1, None, None)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() x_6 = F.relu(self.bn6_1(self.conv6_1(torch.cat((F.interpolate(x_5, scale_factor=(2, 2)), x_2), dim=1)))) x_6 = F.relu(self.bn6_2(self.conv6_2(x_6))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() #x_1 = F.adaptive_max_pool3d(x_1, (1, None, None)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() x_7 = F.relu(self.bn7_1(self.conv7_1(torch.cat((F.interpolate(x_6, scale_factor=(2, 2)), x_1), dim=1)))) x_7 = F.relu(self.bn7_2(self.conv7_2(x_7))) x = x.view(batch, -1, x.size(1), x.size(2), x.size(3)) x = x.permute(0, 2, 1, 3, 4).contiguous() #x = F.adaptive_max_pool3d(x, (1, None, None)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.view(-1, x.size(2), x.size(3), x.size(4)).contiguous() x_8 = F.relu(self.bn8_1(self.conv8_1(torch.cat((F.interpolate(x_7, scale_factor=(2, 2)), x), dim=1)))) res_x = F.relu(self.bn8_2(self.conv8_2(x_8))) return res_x, x_5, x_6, x_7, x_8 class lidar_decoder_kd(nn.Module): def __init__(self, height_feat_size=13): super(lidar_decoder_kd, self).__init__() self.conv_pre_1 = nn.Conv2d(height_feat_size, 32, kernel_size=3, stride=1, padding=1) self.conv_pre_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn_pre_1 = nn.BatchNorm2d(32) self.bn_pre_2 = nn.BatchNorm2d(32) # self.conv3d_1 = Conv3D(64, 64, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) # self.conv3d_2 = Conv3D(128, 128, kernel_size=(3, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_1 = Conv3D(64, 64, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv3d_2 = Conv3D(128, 128, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0)) self.conv1_1 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512 + 256, 256, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv6_1 = nn.Conv2d(256 + 128, 128, kernel_size=3, stride=1, padding=1) self.conv6_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv7_1 = nn.Conv2d(128 + 64, 64, kernel_size=3, stride=1, padding=1) self.conv7_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv8_1 = nn.Conv2d(64 + 32, 32, kernel_size=3, stride=1, padding=1) self.conv8_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.bn1_1 = nn.BatchNorm2d(64) self.bn1_2 = nn.BatchNorm2d(64) self.bn2_1 = nn.BatchNorm2d(128) self.bn2_2 = nn.BatchNorm2d(128) self.bn3_1 = nn.BatchNorm2d(256) self.bn3_2 = nn.BatchNorm2d(256) self.bn4_1 = nn.BatchNorm2d(512) self.bn4_2 = nn.BatchNorm2d(512) self.bn5_1 = nn.BatchNorm2d(256) self.bn5_2 = nn.BatchNorm2d(256) self.bn6_1 = nn.BatchNorm2d(128) self.bn6_2 = nn.BatchNorm2d(128) self.bn7_1 = nn.BatchNorm2d(64) self.bn7_2 = nn.BatchNorm2d(64) self.bn8_1 = nn.BatchNorm2d(32) self.bn8_2 = nn.BatchNorm2d(32) def forward(self, x, x_1, x_2, x_3, x_4, batch): # -------------------------------- Decoder Path -------------------------------- x_5 = F.relu(self.bn5_1(self.conv5_1(torch.cat((F.interpolate(x_4, scale_factor=(2, 2)), x_3), dim=1)))) x_5 = F.relu(self.bn5_2(self.conv5_2(x_5))) x_2 = x_2.view(batch, -1, x_2.size(1), x_2.size(2), x_2.size(3)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() #x_2 = F.adaptive_max_pool3d(x_2, (1, None, None)) x_2 = x_2.permute(0, 2, 1, 3, 4).contiguous() x_2 = x_2.view(-1, x_2.size(2), x_2.size(3), x_2.size(4)).contiguous() x_6 = F.relu(self.bn6_1(self.conv6_1(torch.cat((F.interpolate(x_5, scale_factor=(2, 2)), x_2), dim=1)))) x_6 = F.relu(self.bn6_2(self.conv6_2(x_6))) x_1 = x_1.view(batch, -1, x_1.size(1), x_1.size(2), x_1.size(3)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() #x_1 = F.adaptive_max_pool3d(x_1, (1, None, None)) x_1 = x_1.permute(0, 2, 1, 3, 4).contiguous() x_1 = x_1.view(-1, x_1.size(2), x_1.size(3), x_1.size(4)).contiguous() x_7 = F.relu(self.bn7_1(self.conv7_1(torch.cat((F.interpolate(x_6, scale_factor=(2, 2)), x_1), dim=1)))) x_7 = F.relu(self.bn7_2(self.conv7_2(x_7))) x = x.view(batch, -1, x.size(1), x.size(2), x.size(3)) x = x.permute(0, 2, 1, 3, 4).contiguous() #x = F.adaptive_max_pool3d(x, (1, None, None)) x = x.permute(0, 2, 1, 3, 4).contiguous() x = x.view(-1, x.size(2), x.size(3), x.size(4)).contiguous() x_8 = F.relu(self.bn8_1(self.conv8_1(torch.cat((F.interpolate(x_7, scale_factor=(2, 2)), x), dim=1)))) res_x = F.relu(self.bn8_2(self.conv8_2(x_8))) return res_x, x_7, x_6, x_5 class adafusionlayer(nn.Module): def __init__(self,input_channel=128): super(adafusionlayer, self).__init__() self.attn = nn.Conv2d(input_channel, 1, kernel_size=1, stride=1) self.bn = nn.BatchNorm2d(1) def forward(self,x): _,c,h,w = x.size() num_agent = x.size()[0] fusion_weight = F.relu(self.bn(self.attn(x))) # fusion_weight = F.relu(self.attn(x)) fusion_weight = F.softmax(fusion_weight,dim=0).cuda() feat = torch.zeros(x[0].size()).cuda() for j in range(num_agent): feat = feat + (x[j]*(fusion_weight[j].repeat(c,1,1))) return feat class multifusionlayer(nn.Module): def __init__(self,input_channel=128): super(multifusionlayer,self).__init__() c1 = 64 c2 = 32 self.attn1 = nn.Conv2d(input_channel,c1,kernel_size=1, stride=1) self.attn2 = nn.Conv2d(c1,c2,kernel_size=1, stride=1) self.attn3 = nn.Conv2d(c2,1,kernel_size=1, stride=1) self.bn_1=nn.BatchNorm2d(c1) self.bn_2=nn.BatchNorm2d(c2) self.bn_3=nn.BatchNorm2d(1) def forward(self,x): _,c,h,w = x.size() num_agent = 5 x = F.relu(self.bn_1(self.attn1(x))) x = F.relu(self.bn_2(self.attn2(x))) x = F.relu(self.bn_3(self.attn3(x))) fusion_weight = F.softmax(x,dim=0).cuda() feat = torch.zeros(x[0].size()).cuda() for j in range(num_agent): feat = feat + (x[j]*(fusion_weight[j].repeat(c,1,1))) return feat class sigmoidfusionlayer(nn.Module): def __init__(self,input_channel=128): super(sigmoidfusionlayer, self).__init__() self.attn = nn.Conv2d(input_channel, 1, kernel_size=1, stride=1) self.bn = nn.BatchNorm2d(1) def forward(self,x): _,c,h,w = x.size() num_agent = 5 fusion_weight = F.sigmoid(F.relu(self.bn(self.attn(x)))).cuda() # fusion_weight = fusion_weight.sum(dim=0).cuda() feat = torch.zeros(x[0].size()).cuda() for j in range(num_agent): feat = feat + (x[j]*(fusion_weight[j])) return feat # class MLPfusionlayer(nn.Module): # def __init__(self,input_channel=128): # super(MLPfusionlayer,self).__init__() # self.MLP = class pairfusionlayer(nn.Module): def __init__(self,input_channel=512): super(pairfusionlayer, self).__init__() self.attn = nn.Conv2d(512, 1, kernel_size=1, stride=1) self.bn = nn.BatchNorm2d(1) def forward(self,x): _,c,h,w = x.size() num_agent = x.size()[0] cat_list=[] for i in range(num_agent): cat_list.append(torch.cat((x[0],x[i]))) feat_list=torch.stack(cat_list) # fusion_weight = F.relu(self.bn(self.attn(x))) fusion_weight = F.relu(self.attn(feat_list)) fusion_weight = F.softmax(fusion_weight,dim=0).cuda() feat = torch.zeros(x[0].size()).cuda() for j in range(num_agent): feat = feat + (x[j]*(fusion_weight[j].repeat(c,1,1))) return feat class pairfusionlayer_1(nn.Module): def __init__(self,input_channel=512): super(pairfusionlayer_1, self).__init__() self.conv1_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0) self.bn1_1 = nn.BatchNorm2d(128) self.conv1_2 = nn.Conv2d(128, 32, kernel_size=1, stride=1, padding=0) self.bn1_2 = nn.BatchNorm2d(32) self.conv1_3 = nn.Conv2d(32, 8, kernel_size=1, stride=1, padding=0) self.bn1_3 = nn.BatchNorm2d(8) self.conv1_4 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0) def forward(self,x): _,c,h,w = x.size() num_agent = x.size()[0] cat_list=[] for i in range(num_agent): cat_list.append(torch.cat((x[0],x[i]))) feat_list=torch.stack(cat_list) # # fusion_weight = F.relu(self.bn(self.attn(x))) # fusion_weight = F.relu(self.attn(feat_list)) x_1 = F.relu(self.bn1_1(self.conv1_1(feat_list))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) x_1 = F.relu(self.bn1_3(self.conv1_3(x_1))) fusion_weight = F.relu(self.conv1_4(x_1)) fusion_weight = F.softmax(fusion_weight,dim=0).cuda() feat = torch.zeros(x[0].size()).cuda() for j in range(num_agent): feat = feat + (x[j]*(fusion_weight[j].repeat(c,1,1))) return feat class pairfusionlayer_2(nn.Module): def __init__(self,input_channel=512): super(pairfusionlayer_2, self).__init__() self.conv1_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0) self.bn1_1 = nn.BatchNorm2d(128) self.conv1_2 = nn.Conv2d(128, 32, kernel_size=1, stride=1, padding=0) self.bn1_2 = nn.BatchNorm2d(32) self.conv1_3 = nn.Conv2d(32, 8, kernel_size=1, stride=1, padding=0) self.bn1_3 = nn.BatchNorm2d(8) self.conv1_4 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0) self.bn1_4 = nn.BatchNorm2d(1) def forward(self, x, scene): _,c,h,w = x.size() num_agent = x.size()[0] [scene, delta_t, forecast_model] = scene cat_list=[] for i in range(num_agent): cat_list.append(torch.cat((x[0],x[i]))) feat_list=torch.stack(cat_list) # # fusion_weight = F.relu(self.bn(self.attn(x))) # fusion_weight = F.relu(self.attn(feat_list)) x_1 = F.relu(self.bn1_1(self.conv1_1(feat_list))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) x_1 = F.relu(self.bn1_3(self.conv1_3(x_1))) fusion_weight = F.relu(self.bn1_4(self.conv1_4(x_1))) fusion_weight = F.softmax(fusion_weight,dim=0).cuda() if 0: weight_save = fusion_weight.to('cpu') weight_save = np.array(weight_save) time_save = time.localtime(time.time()) scene_id = scene[0].split('/')[-1].split('_')[0] scene_time = scene[0].split('/')[-1].split('_')[-1] path_save = './visualization/colla_weight/' if not os.path.exists(path_save): os.mkdir(path_save) path_save = path_save + str(time_save.tm_mon) + str(time_save.tm_mday) + '_' + forecast_model + '_' + str(int(delta_t[0][1])) + '/' if not os.path.exists(path_save): os.mkdir(path_save) path_save = path_save + scene_id + '/' if not os.path.exists(path_save): os.mkdir(path_save) path_save = path_save + scene_time + '/' if not os.path.exists(path_save): os.mkdir(path_save) ref = weight_save[0].max() for i in range(weight_save.shape[0]): weight_pic = weight_save[i][0] / ref pic = sns.heatmap(data = weight_pic, vmax = 1, vmin = 0, linewidths=.5, cmap="YlGnBu") pic_save = pic.get_figure() pic_save.savefig(path_save + str(i) + '.jpeg') plt.clf() feat = torch.zeros(x[0].size()).cuda() for j in range(num_agent): feat = feat + (x[j]*(fusion_weight[j].repeat(c,1,1))) return feat class pairfusionlayer_3(nn.Module): def __init__(self,input_channel=512): super(pairfusionlayer_3, self).__init__() self.conv1_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0) self.bn1_1 = nn.BatchNorm2d(128) self.conv1_2 = nn.Conv2d(128, 8, kernel_size=1, stride=1, padding=0) self.bn1_2 = nn.BatchNorm2d(8) # self.conv1_3 = nn.Conv2d(32, 8, kernel_size=1, stride=1, padding=0) # self.bn1_3 = nn.BatchNorm2d(8) self.conv1_4 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0) self.bn1_4 = nn.BatchNorm2d(1) def forward(self,x): _,c,h,w = x.size() num_agent = x.size()[0] cat_list=[] for i in range(num_agent): cat_list.append(torch.cat((x[0],x[i]))) feat_list=torch.stack(cat_list) # # fusion_weight = F.relu(self.bn(self.attn(x))) # fusion_weight = F.relu(self.attn(feat_list)) x_1 = F.relu(self.bn1_1(self.conv1_1(feat_list))) x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) # x_1 = F.relu(self.bn1_3(self.conv1_3(x_1))) fusion_weight = F.relu(self.bn1_4(self.conv1_4(x_1))) fusion_weight = F.softmax(fusion_weight,dim=0).cuda() feat = torch.zeros(x[0].size()).cuda() for j in range(num_agent): feat = feat + (x[j]*(fusion_weight[j].repeat(c,1,1))) return feat class pairfusionlayer_4(nn.Module): def __init__(self,input_channel=512): super(pairfusionlayer_4, self).__init__() self.conv1_1 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0) self.bn1_1 = nn.BatchNorm2d(256) # self.conv1_2 = nn.Conv2d(128, 8, kernel_size=1, stride=1, padding=0) # self.bn1_2 = nn.BatchNorm2d(8) # self.conv1_3 = nn.Conv2d(32, 8, kernel_size=1, stride=1, padding=0) # self.bn1_3 = nn.BatchNorm2d(8) self.conv1_4 = nn.Conv2d(256, 1, kernel_size=1, stride=1, padding=0) self.bn1_4 = nn.BatchNorm2d(1) def forward(self,x): _,c,h,w = x.size() num_agent = x.size()[0] cat_list=[] for i in range(num_agent): cat_list.append(torch.cat((x[0],x[i]))) feat_list=torch.stack(cat_list) # # fusion_weight = F.relu(self.bn(self.attn(x))) # fusion_weight = F.relu(self.attn(feat_list)) x_1 = F.relu(self.bn1_1(self.conv1_1(feat_list))) # x_1 = F.relu(self.bn1_2(self.conv1_2(x_1))) # x_1 = F.relu(self.bn1_3(self.conv1_3(x_1))) fusion_weight = F.relu(self.bn1_4(self.conv1_4(x_1))) # print(fusion_weight.size()) fusion_weight = F.softmax(fusion_weight,dim=0).cuda() # print(fusion_weight.size()) # ipdb.set_trace() feat = torch.zeros(x[0].size()).cuda() for j in range(num_agent): feat = feat + (x[j]*(fusion_weight[j].repeat(c,1,1))) return feat
42.319114
970
0.599298
12,894
76,386
3.324259
0.03521
0.029046
0.059119
0.063855
0.828663
0.797168
0.776544
0.75935
0.724751
0.70884
0
0.092668
0.235436
76,386
1,804
971
42.342572
0.641262
0.181617
0
0.572093
0
0
0.001127
0.000467
0
0
0
0
0
1
0.060465
false
0
0.015814
0
0.136744
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
23fc27d233ce1e433513960d060fc322c56c97cb
95
py
Python
manager/generators/int.py
Exanis/dataset-manager
af2f2d4242417eb14240129ac6312a0ebdfd24ee
[ "MIT" ]
null
null
null
manager/generators/int.py
Exanis/dataset-manager
af2f2d4242417eb14240129ac6312a0ebdfd24ee
[ "MIT" ]
5
2018-11-22T13:32:17.000Z
2018-11-22T13:34:39.000Z
manager/generators/int.py
Exanis/dataset-manager
af2f2d4242417eb14240129ac6312a0ebdfd24ee
[ "MIT" ]
null
null
null
from random import randint def int_generator(t, rank): return str(randint(t.min, t.max))
15.833333
37
0.715789
16
95
4.1875
0.8125
0
0
0
0
0
0
0
0
0
0
0
0.168421
95
5
38
19
0.848101
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
f1cabbc119a3561fc8e800101777edfaf8274f1f
138
py
Python
kafka-utils/tests/bai_k8s_utils/test_kubernetes_test_client.py
gavinmbell/benchmark-ai-1
a697e67d68b843fe9350e55871dad867bab5d51d
[ "Apache-2.0" ]
6
2020-09-29T09:03:04.000Z
2022-03-14T06:52:25.000Z
kafka-utils/tests/bai_k8s_utils/test_kubernetes_test_client.py
gavinmbell/benchmark-ai-1
a697e67d68b843fe9350e55871dad867bab5d51d
[ "Apache-2.0" ]
null
null
null
kafka-utils/tests/bai_k8s_utils/test_kubernetes_test_client.py
gavinmbell/benchmark-ai-1
a697e67d68b843fe9350e55871dad867bab5d51d
[ "Apache-2.0" ]
4
2020-10-01T07:49:22.000Z
2021-06-16T19:44:12.000Z
def test_imports(): from bai_k8s_utils.kubernetes_tests_client import KubernetesTestUtilsClient assert KubernetesTestUtilsClient
27.6
79
0.847826
14
138
8
0.928571
0
0
0
0
0
0
0
0
0
0
0.008264
0.123188
138
4
80
34.5
0.917355
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.666667
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
f1ef648331ecec2f366da19ded96a1372a08758d
29
py
Python
train/__init__.py
li012589/NeuralWavelet
6e593ded5cb4ae80579cbf56eb9c346d808669cb
[ "Apache-2.0" ]
28
2021-01-27T00:41:40.000Z
2022-02-14T10:11:51.000Z
train/__init__.py
li012589/NeuralWavelet
6e593ded5cb4ae80579cbf56eb9c346d808669cb
[ "Apache-2.0" ]
null
null
null
train/__init__.py
li012589/NeuralWavelet
6e593ded5cb4ae80579cbf56eb9c346d808669cb
[ "Apache-2.0" ]
6
2021-02-03T01:42:08.000Z
2021-12-03T17:47:19.000Z
from .train import forwardKLD
29
29
0.862069
4
29
6.25
1
0
0
0
0
0
0
0
0
0
0
0
0.103448
29
1
29
29
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7b67afba08e4bec7b6bb1bb1928ea13a0ab1e96d
10,494
py
Python
herramientas/FiscalStimulusCOVID_to_GDP/barplotFiscalStimulus_to_GDP_LatAm_post2.py
DiazSalinas/COVID-19
c79bc5487363a76baa2b9eb282991077eaeddf14
[ "MIT" ]
24
2020-04-02T04:35:32.000Z
2020-08-11T00:48:06.000Z
herramientas/FiscalStimulusCOVID_to_GDP/barplotFiscalStimulus_to_GDP_LatAm_post2.py
DiazSalinas/COVID-19
c79bc5487363a76baa2b9eb282991077eaeddf14
[ "MIT" ]
26
2020-04-03T15:07:15.000Z
2020-09-01T08:12:08.000Z
herramientas/FiscalStimulusCOVID_to_GDP/barplotFiscalStimulus_to_GDP_LatAm_post2.py
DiazSalinas/COVID-19
c79bc5487363a76baa2b9eb282991077eaeddf14
[ "MIT" ]
21
2020-04-02T21:29:08.000Z
2020-09-01T19:25:22.000Z
import numpy as np import matplotlib.pyplot as plt import pandas import os from matplotlib import font_manager as fm, rcParams fig = plt.figure(figsize=(7,7)) ### Post colaboración Olivia Bordeu y Nacho Oliva. df = pandas.read_csv("asgdpPunto.csv",sep=";",encoding= 'unicode_escape') fpath = os.path.join(rcParams["datapath"],"../Montserrat-Regular.ttf") prop = fm.FontProperties(fname="../Montserrat-Regular.ttf") fname = os.path.split(fpath)[1] color_blue = tuple(np.array([38, 53, 134])/255.) # Make a fake dataset: country = np.array(df.values[:,0]) FiscalStimulus = np.array(df.values[:,1]) GrossDebt = np.array(df.values[:,3]) height = FiscalStimulus ##PRIMERO ESTIMULO FISCAL SOBRE PIB POR PAIS ######################################################################################## ## Recoletado de : https://www.segib.org/covid-19/ bars = country y_pos = np.arange(len(bars)) # Create bars plt.bar(y_pos, height,color=color_blue) # Create names on the x-axis plt.xticks(y_pos, bars) plt.title('This is a special font: {}'.format(fname), fontproperties=prop) plt.ylabel('Estímulo fiscal económico como (%) del PIB', fontproperties=prop, fontsize=12, labelpad= -4) plt.xlabel('País', fontproperties=prop, fontsize=12) plt.title(r"Estímulo fiscal económico COVID19 como porcentaje del PIB",fontproperties=prop, fontsize=12) plt.savefig("outputFiscalStimulus.png") # Show graphic plt.show() ##SEGUNDO DEUDA BRUTA SOBRE PIB POR PAIS ############################################################################################ ### IMF fig = plt.figure(figsize=(7,7)) height= GrossDebt # Create bars plt.bar(y_pos, height,color=color_blue) # Create names on the x-axis plt.xticks(y_pos, bars) plt.title('This is a special font: {}'.format(fname), fontproperties=prop) plt.ylabel('Deuda Bruta País como (%) del PIB', fontproperties=prop, fontsize=12, labelpad= -4) plt.xlabel('País', fontproperties=prop, fontsize=12) plt.title(r"Deuda Bruta como porcentaje del PIB",fontproperties=prop, fontsize=12) plt.savefig("outputGrossDebt.png") # Show graphic plt.show() ##TERCERO ESTIMULO FISCAL SOBRE PIB POR PAIS G20 ############################################################################################ ## BONUS / https://www.statista.com/statistics/1107572/covid-19-value-g20-stimulus-packages-share-gdp/ fig = plt.figure(figsize=(7,7)) df = pandas.read_csv("g20.csv",sep=";",encoding= 'unicode_escape') # Make a fake dataset: country = np.array(df.values[:,0]) FiscalStimulus = np.array(df.values[:,1]) height = FiscalStimulus bars = country y_pos = np.arange(len(bars)) # Create bars plt.bar(y_pos, height,color=color_blue) # Create names on the x-axis plt.xticks(y_pos, bars) plt.xticks(rotation=90, fontsize= 10, fontproperties=prop) plt.yticks(fontproperties=prop, fontsize= 12) plt.subplots_adjust(bottom=0.15) plt.title('This is a special font: {}'.format(fname), fontproperties=prop) plt.ylabel('Estímulo fiscal económico como (%) del PIB', fontproperties=prop, fontsize=10, labelpad= -4) plt.title(r"Estímulo fiscal económico COVID19 como porcentaje del PIB G20",fontproperties=prop, fontsize=12) plt.savefig("outputFiscalStimulusG20.png") # Show graphic plt.show() ########### NUEVO POST #1 fig = plt.figure(figsize=(7,7)) df = pandas.read_csv("g20.csv",sep=";",encoding= 'unicode_escape') # Make a fake dataset: country = np.array(df.values[:,0]) FiscalStimulus = np.array(df.values[:,1]) height = FiscalStimulus bars = country y_pos = np.arange(len(bars)) # Create bars plt.bar(y_pos, height,color=color_blue) # Create names on the x-axis plt.xticks(y_pos, bars) plt.xticks(fontproperties=prop,rotation=48, fontsize= 6) plt.yticks(fontproperties=prop, fontsize= 12) plt.title('This is a special font: {}'.format(fname), fontproperties=prop) plt.ylabel('Estímulo fiscal económico como (%) del PIB', fontproperties=prop, fontsize=10, labelpad= -4) plt.title(r"Estímulo fiscal económico COVID19 como porcentaje del PIB G20",fontproperties=prop, fontsize=12) plt.savefig("outputFiscalStimulusG20covid.png") # Show graphic plt.show() plt.close('all') # Numero 1 COVID #############################2 fig = plt.figure(figsize=(7,7)) df = pandas.read_csv("g20.csv",sep=";",encoding= 'unicode_escape') # Make a fake dataset: country = np.array(df.values[:,0]) country = np.array([r'Japón',r'EE-UU','Australia','Canada','Brasil','Francia','Alemania','Un. Europea', 'Argentina', 'Arabia Saud.', 'Rusia', 'Indonesia', 'China', r'Turquía', 'Italia', 'India', r'México']) FiscalStimulus = np.array(df.values[:,1]) country = np.append(country, 'Chile') FiscalStimulus = np.append(FiscalStimulus, 6.7) arrsort = FiscalStimulus.argsort() FiscalStimulus1 = FiscalStimulus[arrsort[::-1]] country1 = country[arrsort[::-1]] height = np.array(FiscalStimulus1,dtype=float) bars = country1 y_pos = np.arange(len(bars)) import matplotlib.colors as mcolors from matplotlib import cm greens = cm.get_cmap('bwr') test = (np.arange(18)+4)/18. #1 - (height - 6.7) #test[2:] = np.arange(16)/ 30. + 0.5 #h2 = height / height.max() #h2 = h2 / h2[2] #test2 = test - test[2] + 0.5 colors = greens(test) colors[5, :] = np.array([0,0,0,1]) # Create bars plt.bar(y_pos, height,color=colors) # Create names on the x-axis plt.xticks(y_pos, bars) plt.xticks(rotation=90, fontsize= 10, fontproperties=prop) plt.yticks(fontproperties=prop, fontsize= 12) plt.subplots_adjust(bottom=0.15) plt.title('This is a special font: {}'.format(fname), fontproperties=prop) plt.ylabel('Estímulo fiscal económico como (%) del PIB', fontproperties=prop, fontsize=12, labelpad=0) plt.xlim(-.75, 17.75) plt.title(r"Estímulo fiscal económico COVID-19 como porcentaje del PIB G20",fontproperties=prop, fontsize=12) plt.savefig("outputFiscalStimulusG20Lehman0.png") # Show graphic plt.show() """ plt.close() # Numero 1 #############################2 fig = plt.figure(figsize=(7,7)) df = pandas.read_csv("g20_2008.csv",sep=";",encoding= 'unicode_escape') # Make a fake dataset: country = np.array(df.values[:,0]) country = np.array([r'Japón',r'EE-UU','Australia','Canada','Brasil','Francia','Alemania','Un. Europea', 'Argentina', 'Arabia Saud.', 'Rusia', 'Indonesia', 'China', r'Turquía', 'Italia', 'India', r'México']) FiscalStimulus = np.array(df.values[:,1]) country = np.append(country, 'Chile') FiscalStimulus = np.append(FiscalStimulus, 2.8) arrsort = FiscalStimulus.argsort() FiscalStimulus1 = FiscalStimulus[arrsort[::-1]] country1 = country[arrsort[::-1]] height = np.array(FiscalStimulus1,dtype=float) bars = country1 y_pos = np.arange(len(bars)) import matplotlib.colors as mcolors from matplotlib import cm greens = cm.get_cmap('bwr') test = (np.arange(18)+1)/18. #1 - (height - 6.7) #test[2:] = np.arange(16)/ 30. + 0.5 #h2 = height / height.max() #h2 = h2 / h2[2] #test2 = test - test[2] + 0.5 colors = greens(test) colors[8, :] = np.array([0,0,0,1]) # Create bars plt.bar(y_pos, height,color=colors) # Create names on the x-axis plt.xticks(y_pos, bars) plt.xticks(rotation=90, fontsize= 10, fontproperties=prop) plt.yticks(fontproperties=prop, fontsize= 12) plt.subplots_adjust(bottom=0.15) plt.title('This is a special font: {}'.format(fname), fontproperties=prop) plt.ylabel('Estímulo fiscal económico como (%) del PIB', fontproperties=prop, fontsize=12, labelpad=0) plt.xlim(-.75, 17.75) plt.title(r"Estímulo fiscal económico Crisis 2008 como porcentaje del PIB G20",fontproperties=prop, fontsize=12) plt.savefig("outputFiscalStimulusG20Lehman.png") # Show graphic plt.show() plt.close() # Numero 2 #############################3 BONUS DIFERENCIA fig = plt.figure(figsize=(7,7)) df = pandas.read_csv("g20_2008_diff.csv",sep=";",encoding= 'unicode_escape') # Make a fake dataset: #country = np.array(df.values[:,0]) FiscalStimulus = np.array(df.values[:,1]) height = FiscalStimulus height = np.append(height, 3.9) arrsort = height.argsort() FiscalStimulus2 = height[arrsort[::-1]] country2 = country[arrsort[::-1]] bars = country2 y_pos = np.arange(len(bars)) # Create bars plt.bar(y_pos[:11], FiscalStimulus2[:11],color='g') plt.bar(y_pos[11:], FiscalStimulus2[11:],color='r') plt.bar(y_pos[6], FiscalStimulus2[6],color='k') # Create names on the x-axis plt.xticks(y_pos, bars) plt.xticks(rotation=90, fontsize= 10, fontproperties=prop) plt.yticks(fontproperties=prop, fontsize= 12) plt.subplots_adjust(bottom=0.15) plt.xlim(-.75, 17.75) plt.ylim(-9, 19.75) plt.title('This is a special font: {}'.format(fname), fontproperties=prop) plt.ylabel('Diferencia Estímulo como (%) PIB 2008 v/s 2020', fontproperties=prop, fontsize=12, labelpad= 0) #plt.xlabel('País', fontproperties=prop, fontsize=12) #plt.title(r"Diferencia de Estímulo fiscal económico como (%) del PIB 2008 v/s 2020",fontproperties=prop, fontsize=12) plt.title(r"COVID v/s 2008 Estímulo Fiscal como % del PIB",fontproperties=prop, fontsize=12) plt.savefig("outputDIFFCOVIDLEHMAN.png") # Show graphic plt.show() plt.close() ############################4 BONUS fig = plt.figure(figsize=(7,7)) df = pandas.read_csv("g20_2008_debt.csv",sep=";",encoding= 'unicode_escape') # Make a fake dataset: FiscalStimulus = np.array(df.values[:,1]) height = FiscalStimulus height = np.append(height, 27.9) arrsort = height.argsort() height3 = height[arrsort[::-1]] country3 = country[arrsort[::-1]] bars = country3 y_pos = np.arange(len(bars)) greens = cm.get_cmap('Oranges') # Create bars plt.bar(y_pos, height3,color=greens(1-np.arange(18)/18.)) plt.bar(y_pos[-3], height3[-3],color='k') # Create names on the x-axis plt.xticks(y_pos, bars) plt.xticks(rotation=90, fontsize= 10, fontproperties=prop) plt.yticks(fontproperties=prop, fontsize= 12) plt.subplots_adjust(bottom=0.15) plt.xlim(-.75, 17.75) plt.title('This is a special font: {}'.format(fname), fontproperties=prop) plt.ylabel('Deuda Bruta País como (%) del PIB', fontproperties=prop, fontsize=12, labelpad= 0) #plt.xlabel('País', fontproperties=prop, fontsize=12) plt.title(r"Deuda Bruta como porcentaje del PIB G20",fontproperties=prop, fontsize=12) plt.savefig("outputGrossDebt_g20.png") # Show graphic plt.show() """
29.231198
207
0.67248
1,472
10,494
4.753397
0.148098
0.105474
0.100329
0.100043
0.835644
0.81978
0.795627
0.775904
0.751322
0.737459
0
0.04203
0.136173
10,494
359
208
29.231198
0.72984
0.07833
0
0.585859
0
0
0.223084
0.03596
0
0
0
0
0
1
0
false
0
0.070707
0
0.070707
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7bb6483268785e730a8df06bd2480ef269079207
64
py
Python
nxml/jax/__init__.py
yuneg11/NXML
fb79f10f35d18e86ba31dc86413a0f9a2afe4c0a
[ "MIT" ]
null
null
null
nxml/jax/__init__.py
yuneg11/NXML
fb79f10f35d18e86ba31dc86413a0f9a2afe4c0a
[ "MIT" ]
null
null
null
nxml/jax/__init__.py
yuneg11/NXML
fb79f10f35d18e86ba31dc86413a0f9a2afe4c0a
[ "MIT" ]
null
null
null
from . import nn from . import utils from . import experimental
16
26
0.765625
9
64
5.444444
0.555556
0.612245
0
0
0
0
0
0
0
0
0
0
0.1875
64
3
27
21.333333
0.942308
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c885ee813f1a9eb1d1dc2c3446760ae654367721
155
py
Python
gdalhelpers/classes/__init__.py
JanCaha/gdalhelpers
925ecb2552b697b5970617484f1fc259f844ba04
[ "MIT" ]
null
null
null
gdalhelpers/classes/__init__.py
JanCaha/gdalhelpers
925ecb2552b697b5970617484f1fc259f844ba04
[ "MIT" ]
null
null
null
gdalhelpers/classes/__init__.py
JanCaha/gdalhelpers
925ecb2552b697b5970617484f1fc259f844ba04
[ "MIT" ]
null
null
null
""" Module that declares classes which extends functionality of **GDAL/OGR**. Classes: - `DEM` - extends functionality of raster dataset in **GDAL**. """
22.142857
73
0.709677
19
155
5.789474
0.736842
0.363636
0.4
0
0
0
0
0
0
0
0
0
0.148387
155
7
74
22.142857
0.833333
0.948387
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
c8861baed66e5e00dc05b8f9d836f46976b65e0f
44
py
Python
nfp/models/__init__.py
MolecularMaterials/MPNN-Mo2C
c0ea4cb793901b7ae86fdfc91e108f3912d7a750
[ "MIT" ]
18
2019-07-19T16:48:38.000Z
2021-08-05T11:45:06.000Z
easy_nlp/models/__init__.py
Moumeneb1/IRIT_INTERNSHIP
6a443508e9a6e26e46354c2d8282e360afdc02e7
[ "MIT" ]
3
2021-09-03T22:47:55.000Z
2022-02-16T07:54:19.000Z
easy_nlp/models/__init__.py
Moumeneb1/IRIT_INTERNSHIP
6a443508e9a6e26e46354c2d8282e360afdc02e7
[ "MIT" ]
3
2021-10-15T02:00:30.000Z
2022-01-19T06:29:05.000Z
from .models import * from .losses import *
14.666667
21
0.727273
6
44
5.333333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.181818
44
2
22
22
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c8b9a2b06aebc8df25799491d881b9267bed4a89
66
py
Python
tests/test_Customer.py
fraser-langton/Quandoo
3a5e1241b645129d805213d01221ede8f2b79aa2
[ "MIT" ]
1
2019-08-08T11:05:28.000Z
2019-08-08T11:05:28.000Z
tests/test_Customer.py
fraser-langton/Quandoo
3a5e1241b645129d805213d01221ede8f2b79aa2
[ "MIT" ]
1
2021-01-31T23:16:09.000Z
2021-03-05T01:33:49.000Z
tests/test_Customer.py
fraser-langton/Quandoo
3a5e1241b645129d805213d01221ede8f2b79aa2
[ "MIT" ]
1
2020-08-19T09:06:42.000Z
2020-08-19T09:06:42.000Z
import unittest class TestCustomer(unittest.TestCase): pass
11
38
0.772727
7
66
7.285714
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.166667
66
5
39
13.2
0.927273
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
cdfdec9e2550903cdc59926ed28bac61c4d8d0ef
4,868
py
Python
test/integration_tests/test_containers.py
poldracklab/bids-core
b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e
[ "MIT" ]
1
2016-03-09T01:24:02.000Z
2016-03-09T01:24:02.000Z
test/integration_tests/test_containers.py
poldracklab/bids-core
b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e
[ "MIT" ]
15
2016-02-17T19:11:32.000Z
2018-04-12T23:33:06.000Z
test/integration_tests/test_containers.py
poldracklab/bids-core
b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e
[ "MIT" ]
4
2017-04-05T17:34:59.000Z
2018-01-22T01:40:51.000Z
import requests import json import time from nose.tools import with_setup import logging log = logging.getLogger(__name__) sh = logging.StreamHandler() log.addHandler(sh) base_url = 'http://localhost:8080/api' adm_user = 'test@user.com' test_data = type('',(object,),{})() def setup_db(): global session session = requests.Session() # all the requests will be performed as root session.params = { 'user': adm_user, 'root': True } # Create a group test_data.group_id = 'test_group_' + str(int(time.time()*1000)) payload = { '_id': test_data.group_id } payload = json.dumps(payload) r = session.post(base_url + '/groups', data=payload) assert r.ok test_data.group_id_1 = 'test_group_' + str(int(time.time()*1000)) payload = { '_id': test_data.group_id_1 } payload = json.dumps(payload) r = session.post(base_url + '/groups', data=payload) assert r.ok def teardown_db(): r = session.delete(base_url + '/groups/' + test_data.group_id) assert r.ok r = session.delete(base_url + '/groups/' + test_data.group_id_1) assert r.ok @with_setup(setup_db, teardown_db) def test_projects(): payload = { 'group': test_data.group_id, 'label': 'test_project', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/projects', data=payload) assert r.ok _id = json.loads(r.content)['_id'] r = session.get(base_url + '/projects/' + _id) assert r.ok payload = { 'group': test_data.group_id_1, } payload = json.dumps(payload) r = session.put(base_url + '/projects/' + _id, data=payload) assert r.ok r = session.delete(base_url + '/projects/' + _id) assert r.ok @with_setup(setup_db, teardown_db) def test_sessions(): payload = { 'group': test_data.group_id, 'label': 'test_project', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/projects', data=payload) assert r.ok pid = json.loads(r.content)['_id'] payload = { 'project': pid, 'label': 'session_testing', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/sessions', data=payload) assert r.ok _id = json.loads(r.content)['_id'] r = session.get(base_url + '/sessions/' + _id) assert r.ok payload = { 'group': test_data.group_id, 'label': 'test_project_1', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/projects', data=payload) new_pid = json.loads(r.content)['_id'] assert r.ok payload = { 'project': new_pid, } payload = json.dumps(payload) r = session.put(base_url + '/sessions/' + _id, data=payload) assert r.ok r = session.delete(base_url + '/sessions/' + _id) assert r.ok r = session.get(base_url + '/sessions/' + _id) assert r.status_code == 404 r = session.delete(base_url + '/projects/' + pid) assert r.ok r = session.delete(base_url + '/projects/' + new_pid) assert r.ok @with_setup(setup_db, teardown_db) def test_acquisitions(): payload = { 'group': test_data.group_id, 'label': 'test_project', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/projects', data=payload) assert r.ok pid = json.loads(r.content)['_id'] payload = { 'project': pid, 'label': 'session_testing', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/sessions', data=payload) assert r.ok sid = json.loads(r.content)['_id'] payload = { 'project': pid, 'label': 'session_testing_1', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/sessions', data=payload) assert r.ok new_sid = json.loads(r.content)['_id'] payload = { 'session': sid, 'label': 'acq_testing', 'public': False } payload = json.dumps(payload) r = session.post(base_url + '/acquisitions', data=payload) assert r.ok aid = json.loads(r.content)['_id'] r = session.get(base_url + '/acquisitions/' + aid) assert r.ok payload = { 'session': new_sid } payload = json.dumps(payload) r = session.put(base_url + '/acquisitions/' + aid, data=payload) assert r.ok r = session.delete(base_url + '/acquisitions/' + aid) assert r.ok r = session.get(base_url + '/acquisitions/' + aid) assert r.status_code == 404 r = session.delete(base_url + '/sessions/' + sid) assert r.ok r = session.delete(base_url + '/sessions/' + new_sid) assert r.ok r = session.delete(base_url + '/projects/' + pid) assert r.ok
27.817143
69
0.603533
635
4,868
4.440945
0.124409
0.071986
0.082979
0.106028
0.833333
0.807447
0.796809
0.775177
0.765603
0.660284
0
0.006561
0.248562
4,868
174
70
27.977011
0.764352
0.011709
0
0.603774
0
0
0.133735
0
0
0
0
0
0.176101
1
0.031447
false
0
0.031447
0
0.062893
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a81218fbf87506be751616884825808a896f6524
39
py
Python
alphatools/fundamentals/__init__.py
marketneutral/alphatools
72b668381f21d77c0b52f920358df3d8008e909f
[ "Apache-2.0" ]
302
2018-08-29T01:59:03.000Z
2022-03-26T03:40:09.000Z
alphatools/fundamentals/__init__.py
webclinic017/alphatools
72b668381f21d77c0b52f920358df3d8008e909f
[ "Apache-2.0" ]
7
2018-08-29T15:07:13.000Z
2020-11-27T16:58:26.000Z
alphatools/fundamentals/__init__.py
webclinic017/alphatools
72b668381f21d77c0b52f920358df3d8008e909f
[ "Apache-2.0" ]
64
2019-04-24T13:09:03.000Z
2022-02-08T00:28:53.000Z
from .fundamentals import Fundamentals
19.5
38
0.871795
4
39
8.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.102564
39
1
39
39
0.971429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b5395bf708c3dac5ee02df02f567165ff9ec0f78
173
py
Python
SimplestSimulatedAnnleaning/__init__.py
PasaOpasen/SimplestSimulatedAnnealing
b07bad69c7b85f104df1928656abe91de218862a
[ "MIT" ]
1
2020-12-21T14:53:50.000Z
2020-12-21T14:53:50.000Z
SimplestSimulatedAnnleaning/__init__.py
PasaOpasen/SimplestSimulatedAnnealing
b07bad69c7b85f104df1928656abe91de218862a
[ "MIT" ]
null
null
null
SimplestSimulatedAnnleaning/__init__.py
PasaOpasen/SimplestSimulatedAnnealing
b07bad69c7b85f104df1928656abe91de218862a
[ "MIT" ]
null
null
null
from .cooling import Cooling from .mut_examples import simple_continual_mutation, continual_mutation_with_temperature from .simulated_annealing import SimulatedAnnealing
24.714286
88
0.884393
20
173
7.3
0.65
0.232877
0
0
0
0
0
0
0
0
0
0
0.092486
173
6
89
28.833333
0.929936
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a935c7c3bd36a32f977ced10e84b798ab9c12916
38
py
Python
dbSetup.py
rithik/CampusMarketplace
ee1c93849e5d58ab9d07eca339b569ba875fb30a
[ "MIT" ]
1
2020-03-10T07:23:55.000Z
2020-03-10T07:23:55.000Z
dbSetup.py
rithik/CampusMarketplace
ee1c93849e5d58ab9d07eca339b569ba875fb30a
[ "MIT" ]
3
2020-06-05T17:41:19.000Z
2021-09-07T23:46:24.000Z
dbSetup.py
rithik/CampusMarketplace
ee1c93849e5d58ab9d07eca339b569ba875fb30a
[ "MIT" ]
3
2017-11-17T15:59:45.000Z
2021-08-09T18:25:25.000Z
from database import init_db init_db()
19
28
0.842105
7
38
4.285714
0.714286
0.4
0
0
0
0
0
0
0
0
0
0
0.105263
38
2
29
19
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
a93c93f87010a998a50722cdf480d66300b22361
61
py
Python
python/ql/src/Imports/from_import_fixed.py
vadi2/codeql
a806a4f08696d241ab295a286999251b56a6860c
[ "MIT" ]
4,036
2020-04-29T00:09:57.000Z
2022-03-31T14:16:38.000Z
python/ql/src/Imports/from_import_fixed.py
vadi2/codeql
a806a4f08696d241ab295a286999251b56a6860c
[ "MIT" ]
2,970
2020-04-28T17:24:18.000Z
2022-03-31T22:40:46.000Z
python/ql/src/Imports/from_import_fixed.py
ScriptBox99/github-codeql
2ecf0d3264db8fb4904b2056964da469372a235c
[ "MIT" ]
794
2020-04-29T00:28:25.000Z
2022-03-30T08:21:46.000Z
import sys def main(): sys.stdout.write("Hello World!")
12.2
36
0.655738
9
61
4.444444
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.180328
61
4
37
15.25
0.8
0
0
0
0
0
0.196721
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
a9587477159c6768fb043941ae7f81189f843f07
14,313
py
Python
paktrade.py
tanveerntu/paktextiles
0e43fcc08f53cac87d784c0325b4be7899bea4db
[ "MIT" ]
null
null
null
paktrade.py
tanveerntu/paktextiles
0e43fcc08f53cac87d784c0325b4be7899bea4db
[ "MIT" ]
null
null
null
paktrade.py
tanveerntu/paktextiles
0e43fcc08f53cac87d784c0325b4be7899bea4db
[ "MIT" ]
null
null
null
from plotly import graph_objs as go import pandas as pd import streamlit as st ######################## ####################### #Setting page configuration and title for SEO st.set_page_config( page_title = 'Pakistan Trade Statistics', page_icon = '✅', layout = 'wide' ) ######################### ######################### # ---- HIDE STREAMLIT STYLE ---- hide_st_style = """ <style> #MainMenu {visibility: hidden;} footer {visibility: hidden;} header {visibility: hidden;} </style> """ st.markdown(hide_st_style, unsafe_allow_html=True) ######################################## ######################################## ######################################## #data df = pd.read_csv('paktrade_pbs.csv') ############## #fig = go.Figure() # add subplot properties when initializing fig variable from plotly.subplots import make_subplots fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.01, row_heights=[0.65,0.35]) ############### # Add traces fig.add_trace(go.Scatter( x=df["year"], y=df["export_US$B"], name="Exports", text=df['export_US$B'], texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}' mode="markers+lines", textposition="bottom right", textfont=dict(family="fjalla one, sans-serif", color="green", size=20), marker=dict(size=12, color="green"), line=dict(width=5, color="green")), row=1, col=1) # Add traces fig.add_trace(go.Scatter( x=df["year"], y=df["import_US$B"], name="Imports", text=df['import_US$B'], texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}' mode="markers+lines", textposition="bottom right", textfont=dict(family="fjalla one, sans-serif", color="red", size=20), marker=dict(size=12, color="red"), line=dict(width=5, color="red")), row=1, col=1) # Plot MACD trace on 3rd row #val = df['balance_US$B'] #colors = ['green' if val >= 0 # else 'red' for val in df['balance_US$B']] fig.add_trace(go.Bar(x=df['year'], y=df['balance_US$B'], name='Trade Balance', #text=df['balance_US$B'], #text on bars #textfont_size=24, #text on bars #textfont_family='roboto', #texttemplate='%{text:.3s}', # to text shorten into 3 digits, use '%{text:.3s}' marker_color='red', #bar colors ), row=2, col=1) ############### from PIL import Image image = Image.open('logo.png') #st.image(logo.png) fig.add_layout_image( dict( source=image, xref="paper", yref="paper", x=1, y=-0.2, #image postion on chart sizex=0.1, sizey=0.1, #image size on chart xanchor="right", yanchor="bottom" )) #layout fig.update_layout( autosize=False, height=650, width=1050, #legend_traceorder="reversed", margin=dict(t=60, b=120, l=40, r=40), plot_bgcolor='#ffffff', paper_bgcolor='#ffffff', ) ############### #updates axes fig.update_xaxes(showline=True, linewidth=8, linecolor='black', row=1, col=1) fig.update_yaxes(showline=True, linewidth=2, linecolor='black', row=1, col=1) fig.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1) fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24)) fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24)) fig.update_yaxes(side='right', title='US$ Billion', title_font=dict(family='Roboto', color='black', size=20), row=1, col=1) fig.update_yaxes(side='right', title='Trade Balance', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1) fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99', row=1, col=1) fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99', row=2, col=1) ############### #title fig.add_annotation( text="Pakistan Exports and Imports", font=dict(family='Fjalla one', color='#006BA2', size=36), xref="x domain", yref="y domain", x=0, y=1.18, showarrow=False, arrowhead=1) #subtitle fig.add_annotation( text="1950-51 to 2020-21", font=dict(family='roboto', color='black', size=24), xref="x domain", yref="y domain", x=0, y=1.06, showarrow=False, arrowhead=1) #data reference fig.add_annotation( text="Source: Pakistan Bureau of Statistics", font=dict(family='Fjalla one', color='#758D99', size=20), xref="x domain", yref="y domain", x=0, y=-0.9, showarrow=False, arrowhead=1) #Adding only the last date point value/text fig.add_trace(go.Scatter(x=[df['year'].iloc[-1]], y=[df['export_US$B'].iloc[-1]], text=[df['export_US$B'].iloc[-1]], name='', mode='markers+text', marker=dict(color='green', size=14), textposition='top center', textfont=dict(family="fjalla one, sans-serif", color="green", size=20), texttemplate='$%{text:.3s}B', #text shorten into 3 digits showlegend=False)) #Adding only the last date point value/text fig.add_trace(go.Scatter(x=[df['year'].iloc[-1]], y=[df['import_US$B'].iloc[-1]], text=[df['import_US$B'].iloc[-1]], name='', mode='markers+text', marker=dict(color='red', size=14), textposition='top center', textfont=dict(family="fjalla one, sans-serif", color="red", size=20), texttemplate='$%{text:.3s}B', #text shorten into 3 digits showlegend=False)) #Adding only the last date point value/text fig.add_trace(go.Scatter(x=[df['year'].iloc[-1]], y=[df['balance_US$B'].iloc[-1]], text=[df['balance_US$B'].iloc[-1]], name='', mode='markers+text', marker=dict(color='red', size=14), textposition='bottom center', textfont=dict(family="fjalla one, sans-serif", color="red", size=20), texttemplate='$%{text:.3s}B', #text shorten into 3 digits showlegend=False), row=2, col=1) #legend fig.update_layout(legend=dict( orientation="h", font=dict(family='Roboto', color='#758D99', size=16), yanchor="bottom", y=1.02, xanchor="right", x=1)) ###################### #show figure in streamlit web app st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive #config={'responsive': True} ############################## ############################## ####################################### ######################################## ######################################## #data df1 = pd.read_csv('monthly_trade.csv') #calculating year-to-date YTD bales and adding new column for the same df1['imports_ytd_21_22'] = df1['imports_21_22B'].cumsum() df1['imports_ytd_20_21'] = df1['imports_20_21B'].cumsum() df1['exports_ytd_20_21'] = df1['exports_20_21B'].cumsum() df1['exports_ytd_21_22'] = df1['exports_21_22B'].cumsum() df1['balance_ytd_20_21'] = df1['balance_20_21B'].cumsum() df1['balance_ytd_21_22'] = df1['balance_21_22B'].cumsum() ############## #fig = go.Figure() # add subplot properties when initializing fig variable from plotly.subplots import make_subplots fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.01, row_heights=[0.65,0.35]) ############### # Add traces fig.add_trace(go.Scatter( x=df1["month"], y=df1["imports_ytd_21_22"], name="Imports 21-22", text=df1['imports_ytd_21_22'], texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}' mode="markers+lines+text", textposition="bottom right", textfont=dict(family="fjalla one, sans-serif", color="red", size=20), marker=dict(size=12, color="red"), line=dict(width=5, color="red")), row=1, col=1) fig.add_trace(go.Scatter( x=df1["month"], y=df1["imports_ytd_20_21"], name="Imports 20-21", text=df1['imports_ytd_20_21'], texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}' mode="markers+lines+text", textposition="bottom right", textfont=dict(family="fjalla one, sans-serif", color="brown", size=20), marker=dict(size=12, color="brown"), line=dict(width=5, color="brown")), row=1, col=1) fig.add_trace(go.Scatter( x=df1["month"], y=df1["exports_ytd_20_21"], name="Exports 20-21", text=df1['exports_ytd_20_21'], texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}' mode="markers+lines+text", textposition="bottom right", textfont=dict(family="fjalla one, sans-serif", color="lightgreen", size=20), marker=dict(size=12, color="lightgreen"), line=dict(width=5, color="lightgreen")), row=1, col=1) fig.add_trace(go.Scatter( x=df1["month"], y=df1["exports_ytd_21_22"], name="Exports 21-22", text=df1['exports_ytd_21_22'], texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}' mode="markers+lines+text", textposition="bottom right", textfont=dict(family="fjalla one, sans-serif", color="green", size=20), marker=dict(size=12, color="green"), line=dict(width=5, color="green")), row=1, col=1) # Plot MACD trace on 3rd row #val = df['balance_US$B'] #colors = ['green' if val >= 0 # else 'red' for val in df['balance_US$B']] fig.add_trace(go.Scatter(x=df1['month'], y=df1['balance_ytd_20_21'], name='Trade Balance 20-21', text=df1['balance_ytd_20_21'], texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}' mode="markers+lines+text", textposition="top right", textfont=dict(family="fjalla one, sans-serif", color="lightblue", size=20), marker=dict(size=12, color="lightblue"), line=dict(width=5, color="lightblue")), row=2, col=1) fig.add_trace(go.Scatter(x=df1['month'], y=df1['balance_ytd_21_22'], name='Trade Balance 21-22', text=df1['balance_ytd_21_22'], texttemplate='%{text:.3s}B', # to text shorten into 3 digits, use '%{text:.3s}' mode="markers+lines+text", textposition="top right", textfont=dict(family="fjalla one, sans-serif", color="orange", size=20), marker=dict(size=12, color="orange"), line=dict(width=5, color="orange")), row=2, col=1) ############### from PIL import Image image = Image.open('logo.png') #st.image(logo.png) fig.add_layout_image( dict( source=image, xref="paper", yref="paper", x=1, y=-0.2, #image postion on chart sizex=0.1, sizey=0.1, #image size on chart xanchor="right", yanchor="bottom" )) #layout fig.update_layout( autosize=False, height=650, width=1050, #legend_traceorder="reversed", margin=dict(t=80, b=100, l=40, r=40), plot_bgcolor='#ffffff', paper_bgcolor='#ffffff', ) ############### #updates axes fig.update_xaxes(showline=True, linewidth=8, linecolor='black', row=1, col=1) fig.update_yaxes(showline=True, linewidth=2, linecolor='black', row=1, col=1) fig.update_yaxes(showline=True, linewidth=2, linecolor='black', row=2, col=1) fig.update_xaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24)) fig.update_yaxes(tickangle=0, tickfont=dict(family='Roboto', color='black', size=24)) fig.update_yaxes(side='right', title='US$ Billion', title_font=dict(family='Roboto', color='black', size=20), row=1, col=1) fig.update_yaxes(side='right', title='Trade Balance', title_font=dict(family='Roboto', color='black', size=20), row=2, col=1) fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99', row=1, col=1) fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#758D99', row=2, col=1) ############### #title fig.add_annotation( text="Pakistan Exports and Imports", font=dict(family='Fjalla one', color='#006BA2', size=36), xref="x domain", yref="y domain", x=0, y=1.21, showarrow=False, arrowhead=1) #subtitle fig.add_annotation( text="2020-21 vs. 2021-22 (cumulative figures till recent month)", font=dict(family='roboto', color='black', size=24), xref="x domain", yref="y domain", x=0, y=1.09, showarrow=False, arrowhead=1) #data reference fig.add_annotation( text="Source: Pakistan Bureau of Statistics", font=dict(family='Fjalla one', color='#758D99', size=20), xref="x domain", yref="y domain", x=0, y=-0.85, showarrow=False, arrowhead=1) #legend fig.update_layout(legend=dict( orientation="h", font=dict(family='Roboto', color='#758D99', size=16), yanchor="bottom", y=1.05, xanchor="right", x=1.07)) ###################### #show figure in streamlit web app st.plotly_chart(fig, use_container_width=True) # to show Figure; container width true makes fig. size responsive ############################## ##############################
38.475806
126
0.550828
1,827
14,313
4.22277
0.134647
0.034997
0.014517
0.036941
0.861309
0.820091
0.8035
0.789501
0.789501
0.776021
0
0.048926
0.251729
14,313
371
127
38.579515
0.671335
0.130161
0
0.655039
0
0
0.22012
0
0
0
0
0
0
1
0
false
0
0.085271
0
0.085271
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a96e5af6dc57b78ffc15f7336dc345de33aa2d22
173
py
Python
server/gateway/__init__.py
wazatoki/IotLogger
9baec9bff7762fbc0d279207fabf8902d9650a2d
[ "MIT" ]
null
null
null
server/gateway/__init__.py
wazatoki/IotLogger
9baec9bff7762fbc0d279207fabf8902d9650a2d
[ "MIT" ]
7
2021-03-11T00:57:20.000Z
2022-02-27T07:53:56.000Z
server/gateway/__init__.py
wazatoki/IotLogger
9baec9bff7762fbc0d279207fabf8902d9650a2d
[ "MIT" ]
null
null
null
from gateway import device_item, device, parsed, asynchronous, cyclic, index __all__ = [ device_item, device, parsed, asynchronous, cyclic, index, ]
17.3
76
0.67052
18
173
6.111111
0.555556
0.181818
0.290909
0.4
0.818182
0.818182
0.818182
0
0
0
0
0
0.248555
173
10
77
17.3
0.846154
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0
1
0
0
null
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8d71bb3516fa105c148383390912c7a9ae755f5b
36
py
Python
app/dist_service/__init__.py
en-medina/RPI_REACTOR_CSTR_TOG
1771123c6cdac6e8be1c21508d921c35fd68db25
[ "MIT" ]
null
null
null
app/dist_service/__init__.py
en-medina/RPI_REACTOR_CSTR_TOG
1771123c6cdac6e8be1c21508d921c35fd68db25
[ "MIT" ]
null
null
null
app/dist_service/__init__.py
en-medina/RPI_REACTOR_CSTR_TOG
1771123c6cdac6e8be1c21508d921c35fd68db25
[ "MIT" ]
null
null
null
from .dist_service import init_dist
18
35
0.861111
6
36
4.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.111111
36
1
36
36
0.90625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
8d7c3d6caf69454c342ee698631166218b7b9489
27
py
Python
include-file/myscript.py
michael-kotliar/cwl-patterns
3ec5194f5b63d5dcb1fc3aa5bd89080fb1a6da2d
[ "Apache-2.0" ]
14
2020-05-13T07:47:28.000Z
2021-08-20T04:01:11.000Z
include-file/myscript.py
michael-kotliar/cwl-patterns
3ec5194f5b63d5dcb1fc3aa5bd89080fb1a6da2d
[ "Apache-2.0" ]
4
2020-08-04T15:45:07.000Z
2022-03-30T07:35:25.000Z
include-file/myscript.py
michael-kotliar/cwl-patterns
3ec5194f5b63d5dcb1fc3aa5bd89080fb1a6da2d
[ "Apache-2.0" ]
6
2020-08-20T02:47:35.000Z
2022-03-04T20:01:31.000Z
print("$(inputs.message)")
13.5
26
0.666667
3
27
6
1
0
0
0
0
0
0
0
0
0
0
0
0.037037
27
1
27
27
0.692308
0
0
0
0
0
0.62963
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
573836479151e7abb3f811b87e616f2e15985926
76
py
Python
tests/data/__init__.py
nstott/simpleflow
483602deb745a09b59ad6e24052dd5096c54fad2
[ "MIT" ]
69
2015-02-24T00:49:40.000Z
2022-02-05T02:35:04.000Z
tests/data/__init__.py
nstott/simpleflow
483602deb745a09b59ad6e24052dd5096c54fad2
[ "MIT" ]
295
2015-02-06T11:02:00.000Z
2022-03-21T11:01:34.000Z
tests/data/__init__.py
nstott/simpleflow
483602deb745a09b59ad6e24052dd5096c54fad2
[ "MIT" ]
27
2015-08-31T22:14:42.000Z
2022-02-08T07:25:01.000Z
from .activities import * from .constants import * from .workflows import *
19
25
0.763158
9
76
6.444444
0.555556
0.344828
0
0
0
0
0
0
0
0
0
0
0.157895
76
3
26
25.333333
0.90625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
574ae44cc60be2fce5ac3d982b4d25b1a8c7fe27
128
py
Python
models/base_model.py
vlivashkin/modnet
2d43084804851cbc879d12deba0e3eab023044ee
[ "Apache-2.0" ]
28
2019-05-27T01:46:14.000Z
2022-02-14T13:51:06.000Z
models/base_model.py
vlivashkin/modnet
2d43084804851cbc879d12deba0e3eab023044ee
[ "Apache-2.0" ]
1
2019-08-06T08:49:17.000Z
2019-08-06T08:49:17.000Z
models/base_model.py
vlivashkin/modnet
2d43084804851cbc879d12deba0e3eab023044ee
[ "Apache-2.0" ]
8
2019-04-25T17:03:27.000Z
2021-05-07T16:52:25.000Z
import abc from abc import ABC class BaseModel(ABC): @abc.abstractmethod def fit_transform(self, graph): pass
14.222222
35
0.6875
17
128
5.117647
0.705882
0.206897
0
0
0
0
0
0
0
0
0
0
0.242188
128
8
36
16
0.896907
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0.166667
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
6
9386cde69106b743a868c9336e9e28d0e77ea69e
20,478
py
Python
test_cephfs-cli.py
Hacky-DH/cephfs-cli
9e7673a65ffa3e7833dc4637f61aef6adb752625
[ "MIT" ]
null
null
null
test_cephfs-cli.py
Hacky-DH/cephfs-cli
9e7673a65ffa3e7833dc4637f61aef6adb752625
[ "MIT" ]
null
null
null
test_cephfs-cli.py
Hacky-DH/cephfs-cli
9e7673a65ffa3e7833dc4637f61aef6adb752625
[ "MIT" ]
null
null
null
import pytest import os import sys from errno import * import re import json cephfs_cli = pytest.importorskip("cephfs-cli") addr = '172.28.218.70,172.28.160.165,172.28.217.100' key = 'AQBNzwhc8ru/IBAAef0NABDSntpt5Q8TQp4AWw==' user = 'test_cephfs_user' root = '/pytestdir/test_cephfs_user' @pytest.yield_fixture(scope='function') def suit(): try: os.remove(cephfs_cli.last_work_dir) except OSError: pass sys.argv = ["cephfs_cli_test"] yield def test_zero(suit, capsys): with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code _, err = capsys.readouterr() assert "error: too few arguments" in err, err def test_version(suit, capsys): sys.argv.append("-v") assert 0 == cephfs_cli.main() out, _ = capsys.readouterr() assert "0.0.1.2018" in out, out def test_config_zero(suit, capsys): sys.argv.extend(["-vv","config"]) assert 0 == cephfs_cli.main() out, err = capsys.readouterr() assert "current user info" in out assert not err def test_config_not_exist_file(suit, capsys): sys.argv.extend(["-vv","config","-c", "/tmp/notexist/conf/ceph.conf"]) with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code out, err = capsys.readouterr() assert "error: argument -c/--conf: can't open '/tmp/notexist/conf/ceph.conf': " + \ "[Errno 2] No such file or directory: '/tmp/notexist/conf/ceph.conf'" in err, err def test_config_invalid_file(suit, capfd, tmpdir): conf = tmpdir.mkdir("conf").join("ceph.conf") conf.write("ceph invalid conf") sys.argv.extend(["-vv","config","-c",str(conf)]) assert EPERM == cephfs_cli.main() conf.remove() _, err = capfd.readouterr() assert "no monitors specified to connect to" in err, err assert "user login cephfs failed" in err, err def test_config_valid_file_no_key(suit, capfd, tmpdir): conf = tmpdir.mkdir("conf").join("ceph.conf") conf.write("[global]\nmon host = {}\n".format(addr)) sys.argv.extend(["-vv","config","-c",str(conf)]) assert EPERM == cephfs_cli.main() conf.remove() out, err = capfd.readouterr() #assert "[ERROR] Unable to open cephfs : (95) Operation not supported" in out, out assert "user login cephfs failed" in err, err def test_config_file_admin_with_key_file(suit, capfd, tmpdir): conf = tmpdir.mkdir("conf").join("ceph.conf") k = tmpdir.join("conf").join("key") k.write(key) conf.write("[global]\nmon host = {}\nkeyfile = {}".format(addr, str(k))) sys.argv.extend(["-vv","config","-c",str(conf)]) assert 0 == cephfs_cli.main() conf.remove() key.remove() out, _ = capfd.readouterr() assert "connect cephfs admin:/ successfully" in out, out assert "admin:/ login cephfs successfully" in out, out def test_config_file_user_with_key_file(suit, capfd, tmpdir): conf = tmpdir.mkdir("conf").join("ceph.conf") key = tmpdir.join("conf").join("key") key.write("AQCiNnVbMEXFChAAdbI90BUZMGlDCeVl9QvPNA==") conf.write("[global]\nmon host = 172.28.217.102,172.18.178.106\nkeyfile = " + \ str(key)+"\n") sys.argv.extend(["-vv","config","-c",str(conf),"-n","test_cephfs_user"]) assert 0 == cephfs_cli.main() conf.remove() key.remove() out, _ = capfd.readouterr() assert "connect cephfs test_cephfs_user:/test_cephfs_user successfully" in out, out assert "test_cephfs_user:/test_cephfs_user login cephfs successfully" in out, out def test_config_file_user_with_key_file_with_root(suit, capfd, tmpdir): conf = tmpdir.mkdir("conf").join("ceph.conf") key = tmpdir.join("conf").join("key") key.write("AQCiNnVbMEXFChAAdbI90BUZMGlDCeVl9QvPNA==") conf.write("[global]\nmon host = 172.28.217.102,172.18.178.106\nkeyfile = " + \ str(key)+"\n") sys.argv.extend(["-vv","-r","/test_cephfs_user","config","-c",str(conf),\ "-n","test_cephfs_user"]) assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "connect cephfs test_cephfs_user:/test_cephfs_user successfully" in out, out assert "test_cephfs_user:/test_cephfs_user login cephfs successfully" in out, out def test_config_file_user_with_key_file_with_invalid_root(suit, capfd, tmpdir): conf = tmpdir.mkdir("conf").join("ceph.conf") key = tmpdir.join("conf").join("key") key.write("AQCiNnVbMEXFChAAdbI90BUZMGlDCeVl9QvPNA==") conf.write("[global]\nmon host = 172.28.217.102,172.18.178.106\nkeyfile = " + \ str(key)+"\n") sys.argv.extend(["-vv","-r","/invalid_root","config","-c",str(conf),\ "-n","test_cephfs_user"]) assert EPERM == cephfs_cli.main() out, err = capfd.readouterr() conf.remove() key.remove() assert "[ERROR] Unable to open cephfs /invalid_root: (1) " + \ "Operation not permitted" in out, out assert "user login cephfs failed" in err, err def test_config_args_user_a_no_key(suit, capfd): sys.argv.extend(["-vv","-r","/test_cephfs_user","config","-a", "172.28.217.102,172.18.178.106","-n", "test_cephfs_user"]) assert EPERM == cephfs_cli.main() out, err = capfd.readouterr() assert "[ERROR] Unable to open cephfs /test_cephfs_user: (95) " +\ "Operation not supported" in out, out assert "user login cephfs failed" in err, err def test_config_args_user_a_with_key(suit, capfd, tmpdir): key = tmpdir.mkdir("conf").join("key") key.write("AQCiNnVbMEXFChAAdbI90BUZMGlDCeVl9QvPNA==") sys.argv.extend(["-vv","config","-a","172.28.217.102,172.18.178.106","-n",\ "test_cephfs_user","-k",str(key)]) assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() key.remove() assert "connect cephfs test_cephfs_user:/test_cephfs_user successfully" in out, out assert "test_cephfs_user:/test_cephfs_user login cephfs successfully" in out, out def test_config_args_user_a_with_invalid_key(suit, capfd, tmpdir): key = tmpdir.mkdir("conf").join("key") key.write("invalid key") sys.argv.extend(["-vv","config","-a","172.28.217.102,172.18.178.106","-n",\ "test_cephfs_user","-k",str(key)]) assert EPERM == cephfs_cli.main() out, err = capfd.readouterr() key.remove() assert "[ERROR] Unable to open cephfs /test_cephfs_user: (22) " + \ "Invalid argument" in out, out assert "user login cephfs failed" in err, err def test_config_args_user_a_with_invalid_root(suit, capfd, tmpdir): key = tmpdir.mkdir("conf").join("key") key.write("AQCiNnVbMEXFChAAdbI90BUZMGlDCeVl9QvPNA==") sys.argv.extend(["-vv","-r","/invalid_root","config","-a",\ "172.28.217.102,172.18.178.106","-n",\ "test_cephfs_user","-k",str(key)]) assert EPERM == cephfs_cli.main() out, err = capfd.readouterr() assert "[ERROR] Unable to open cephfs /invalid_root: (1) " + \ "Operation not permitted" in out, out assert "user login cephfs failed" in err, err def test_config_args_user_a_with_group(suit, capfd, tmpdir): key = tmpdir.mkdir("conf").join("key") key.write("AQCiNnVbMEXFChAAdbI90BUZMGlDCeVl9QvPNA==") sys.argv.extend(["-vv","-r","/test_group","config","-a",\ "172.28.217.102,172.18.178.106","-n",\ "test_cephfs_user","-k",str(key)]) assert 0 == cephfs_cli.main() key.remove() out, _ = capfd.readouterr() assert "connect cephfs test_cephfs_user:/test_group successfully" in out, out assert "test_cephfs_user:/test_group login cephfs successfully" in out, out def test_user_info_file_config(suit, capfd, tmpdir): key = tmpdir.mkdir("conf").join("key") info = tmpdir.join("conf").join("user.info") key.write("AQCiNnVbMEXFChAAdbI90BUZMGlDCeVl9QvPNA==") sys.argv.extend(["-vv","-i",str(info),"config","-a",\ "172.28.217.102,172.18.178.106","-n",\ "test_cephfs_user","-k",str(key)]) assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "connect cephfs test_cephfs_user:/test_cephfs_user successfully" in out, out assert "test_cephfs_user:/test_cephfs_user login cephfs successfully" in out, out assert os.path.exists(str(info)) #load user info sys.argv = ["cephfs_cli_test","-vv","-i",str(info),"config"] assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "Root path: /test_cephfs_user" in out, out key.remove() info.remove() info = "/tmp/test_user.info" test_dir = "/pytest_dir/" test_file = "/pytest_file" @pytest.fixture(scope='function') def config(suit, tmpdir): with open(info,'w') as f: data = {"cephconf": None, "root": "/test_cephfs_user", "name": "test_cephfs_user", "key": "AQCiNnVbMEXFChAAdbI90BUZMGlDCeVl9QvPNA==", "cephaddr": "172.28.217.102,172.18.178.106" } json.dump(data, f) assert os.path.exists(info) sys.argv = ["cephfs_cli_test"] def test_upload_zero(suit, capsys): sys.argv.extend(["-vv","upload"]) with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code _, err = capsys.readouterr() assert "error: too few arguments" in err, err def test_upload_one_arg(suit, capsys): sys.argv.extend(["-vv","-i",info,"upload","src"]) with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code _, err = capsys.readouterr() assert "error: too few arguments" in err, err def test_upload_one_not_exist_file(suit, capfd): sys.argv.extend(["-vv","-i",info,"upload","noexistfile",test_file]) assert 0 == cephfs_cli.main() _, err = capfd.readouterr() assert "upload local path [noexistfile] No such file or directory" in err, err def remove(f, capfd): sys.argv = ["cephfs_cli_test","-i",info,"remove",f] assert 0 == cephfs_cli.main() capfd.readouterr() def test_upload_file_to_file(config, capfd, tmpdir): src = tmpdir.join("src_file") src.write("hello string from pytest") sys.argv.extend(["-vv","-i",info,"upload",str(src),test_file]) assert 0 == cephfs_cli.main() src.remove() out, err = capfd.readouterr() assert re.search(r"upload local path \[.*src_file\] " +\ "to cephfs path \[/pytest_file\] successfully",out), out assert len(err) == 0 remove(test_file, capfd) def test_upload_file_to_dir(config, capfd, tmpdir): src = tmpdir.join("src_file") src.write("hello string from pytest") sys.argv.extend(["-vv","-i",info,"upload",str(src),test_dir]) assert 0 == cephfs_cli.main() src.remove() out, err = capfd.readouterr() assert re.search(r"upload local path \[.*src_file\] " +\ "to cephfs path \[/pytest_dir/src_file\] successfully",out), out assert len(err) == 0 remove(test_dir, capfd) def test_upload_dir_to_dir(config, capfd, tmpdir): src = tmpdir.mkdir("folder").join("src_file") src.write("hello string from pytest") sys.argv.extend(["-vv","-i",info,"upload",src.dirname,test_dir]) assert 0 == cephfs_cli.main() src.remove() out, err = capfd.readouterr() assert re.search(r"upload local path \[.*folder\] " +\ "to cephfs path \[/pytest_dir/\] successfully",out), out assert len(err) == 0 remove(test_dir, capfd) def test_upload_dir_to_file(config, capfd, tmpdir): src = tmpdir.mkdir("folder").join("src_file") src.write("hello string from pytest") #upload a file sys.argv = ["cephfs_cli_test","-i",info,"upload",str(src),test_file] assert 0 == cephfs_cli.main() capfd.readouterr() #upload dir to a exist file sys.argv = ["cephfs_cli_test","-i",info,"upload",src.dirname,test_file] assert EPERM == cephfs_cli.main() src.remove() _, err = capfd.readouterr() assert "to cephfs exist file [/pytest_file] is not allowed" in err, err remove(test_file, capfd) def test_upload_multi_file(config, capfd, tmpdir): src = tmpdir.join("src_file") src.write("hello string from pytest") src2 = tmpdir.join("src_file2") src2.write("hello string from pytest again") sys.argv.extend(["-vv","-i",info,"upload",str(src),str(src2),test_dir]) assert 0 == cephfs_cli.main() src.remove() src2.remove() out, err = capfd.readouterr() assert re.search(r"upload local path \[.*src_file\] " +\ "to cephfs path \[/pytest_dir/src_file\] successfully",out), out assert re.search(r"upload local path \[.*src_file2\] " +\ "to cephfs path \[/pytest_dir/src_file\] successfully",out), out assert len(err) == 0 remove(test_dir, capfd) def test_upload_one_file_with_invalid_group(config, capfd, tmpdir): src = tmpdir.join("src_file") src.write("hello string from pytest") sys.argv.extend(["-vv","-i",info,"-r","test_invalid","upload",str(src),test_file]) assert EPERM == cephfs_cli.main() src.remove() out, err = capfd.readouterr() assert "[ERROR] Unable to open cephfs /test_invalid: (1) " + \ "Operation not permitted" in out assert "user login cephfs failed" in err def test_upload_one_file_with_group(config, capfd, tmpdir): src = tmpdir.join("src_file") src.write("hello string from pytest") sys.argv.extend(["-vv","-i",info,"-r","test_group","upload",str(src),test_file]) assert 0 == cephfs_cli.main() src.remove() out, err = capfd.readouterr() assert re.search(r"upload local path \[.*src_file\] " +\ "to cephfs path \[/pytest_file\] successfully",out), out assert len(err) == 0 remove(test_file, capfd) def test_download_zero(suit, capsys): sys.argv.extend(["-vv","download"]) with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code _, err = capsys.readouterr() assert "error: too few arguments" in err, err def test_download_one_arg(suit, capsys): sys.argv.extend(["-vv","download","abc"]) with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code _, err = capsys.readouterr() assert "error: too few arguments" in err, err def test_download_not_exist_file(config, capfd): sys.argv.extend(["-i",info,"download","abc","local_file"]) assert ENOENT == cephfs_cli.main() _, err = capfd.readouterr() assert "download path [abc] No such file or directory" in err, err def upload(capfd, tmpdir, dst = test_dir): #upload a file src = tmpdir.join("src_file") src.write("hello string from pytest") sys.argv = ["cephfs_cli_test","-i",info,"upload",str(src),dst] assert 0 == cephfs_cli.main() capfd.readouterr() def test_download_file_to_file(config, capfd, tmpdir): upload(capfd, tmpdir) sys.argv = ["cephfs_cli_test","-i",info,"download",test_dir+"src_file","local_file"] assert 0 == cephfs_cli.main() out, err = capfd.readouterr() assert "download to local path [local_file] from cephfs path " +\ "[/pytest_dir/src_file] successfully" in out, out assert len(err) == 0 def test_download_file_to_dir(config, capfd, tmpdir): upload(capfd, tmpdir) os.mkdir("local_dir") sys.argv = ["cephfs_cli_test","-i",info,"download",test_dir+"src_file","local_dir"] assert 0 == cephfs_cli.main() out, err = capfd.readouterr() assert "download to local path [local_dir/src_file] from cephfs path " +\ "[/pytest_dir/src_file] successfully" in out, out assert len(err) == 0 import shutil shutil.rmtree("local_dir") def test_download_file_to_dir2(config, capfd, tmpdir): upload(capfd, tmpdir) sys.argv = ["cephfs_cli_test","-i",info,"download",test_dir+"src_file", "local_dir/dir2/"] assert 0 == cephfs_cli.main() out, err = capfd.readouterr() assert "download to local path [local_dir/dir2/src_file] from cephfs path " +\ "[/pytest_dir/src_file] successfully" in out, out assert len(err) == 0 import shutil shutil.rmtree("local_dir") def test_download_dir_to_file(config, capfd, tmpdir): upload(capfd, tmpdir) sys.argv = ["cephfs_cli_test","-i",info,"download",test_dir,"local_file"] assert EPERM == cephfs_cli.main() _, err = capfd.readouterr() assert "download directory [/pytest_dir/] is not supported" in err, err def test_download_dir_to_dir(config, capfd, tmpdir): upload(capfd, tmpdir) sys.argv = ["cephfs_cli_test","-i",info,"download",test_dir,"local_dir/dir2/"] assert EPERM == cephfs_cli.main() _, err = capfd.readouterr() assert "download directory [/pytest_dir/] is not supported" in err, err import shutil shutil.rmtree("local_dir") def test_remove_zero(suit, capsys): sys.argv.extend(["-vv","remove"]) with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code _, err = capsys.readouterr() assert "error: too few arguments" in err, err def test_remove_multi(config, capfd, tmpdir): upload(capfd, tmpdir, "/test_file1") upload(capfd, tmpdir, "/test_file2") sys.argv = ["cephfs_cli_test","-i",info,"remove","/test_file1","/test_file2"] assert 0 ==cephfs_cli.main() out, err = capfd.readouterr() assert "remove cephfs path [/test_file1] successfully" in out, out assert "remove cephfs path [/test_file2] successfully" in out, out assert len(err) == 0 def test_pwd(config, capfd): sys.argv.extend(["-i",info,"pwd"]) assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "/" in out, out def test_mkdir_zero(config, capsys): sys.argv.extend(["-i",info,"mkdir"]) with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code _, err = capsys.readouterr() assert "error: too few arguments" in err, err def test_mkdir_one(config, capfd): sys.argv.extend(["-i",info,"mkdir",test_dir]) assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "mkdir path [/pytest_dir/] successfully" in out # remove dir sys.argv = ["cephfs_cli_test","-i",info,"remove",test_dir] assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "remove cephfs path [/pytest_dir/] successfully" in out, out def test_mkdir_multi(config, capfd): sys.argv.extend(["-i",info,"mkdir",test_dir,"/other_dir"]) assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "mkdir path [/pytest_dir/] successfully" in out assert "mkdir path [/other_dir/] successfully" in out # remove dirs sys.argv = ["cephfs_cli_test","-i",info,"remove",test_dir,"/other_dir"] assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "remove cephfs path [/pytest_dir/] successfully" in out, out assert "remove cephfs path [/other_dir] successfully" in out, out def test_chdir_zero(suit, capsys): sys.argv = ["cephfs_cli_test","-i",info,"cd"] with pytest.raises(SystemExit) as err: cephfs_cli.main() assert 2 == err.value.code _, err = capsys.readouterr() assert "error: too few arguments" in err, err def test_chdir(config, capfd): sys.argv.extend(["-i",info,"mkdir",test_dir]) assert 0 == cephfs_cli.main() capfd.readouterr() sys.argv = ["cephfs_cli_test","-i",info,"pwd"] assert 0 == cephfs_cli.main() capfd.readouterr() sys.argv = ["cephfs_cli_test","-i",info,"cd",test_dir] assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert "chdir path [/pytest_dir/] successfully" in out, out sys.argv = ["cephfs_cli_test","-i",info,"pwd"] assert 0 == cephfs_cli.main() out, _ = capfd.readouterr() assert test_dir[0:-1] in out, out sys.argv = ["cephfs_cli_test","-i",info,"remove",test_dir] assert 0 == cephfs_cli.main() capfd.readouterr() def test_listdir_zero(config, capfd): sys.argv = ["cephfs_cli_test","-i",info,"ls"] assert 0 == cephfs_cli.main() capfd.readouterr() def test_listdir_empty(config, capfd): sys.argv = ["cephfs_cli_test","-i",info,"mkdir",test_dir] assert 0 == cephfs_cli.main() capfd.readouterr() sys.argv = ["cephfs_cli_test","-i",info,"ls",test_dir] assert 0 == cephfs_cli.main() out, err = capfd.readouterr() assert "empty directory" in out assert len(err) == 0 remove(test_dir, capfd) def test_listdir(config, capfd, tmpdir): upload(capfd, tmpdir) sys.argv = ["cephfs_cli_test","-i",info,"ls",test_dir] assert 0 == cephfs_cli.main() out, err = capfd.readouterr() assert "src_file" in out assert len(err) == 0 remove(test_dir, capfd)
38.492481
89
0.659977
2,925
20,478
4.451624
0.059145
0.057369
0.05591
0.044236
0.878043
0.84356
0.811919
0.77183
0.731127
0.678519
0
0.02212
0.178777
20,478
531
90
38.564972
0.752156
0.008302
0
0.622318
0
0.012876
0.292104
0.065366
0
0
0
0
0.287554
1
0.10515
false
0.002146
0.021459
0
0.126609
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
938c25d2e878788ef60edba4b13077f48b76427f
34
py
Python
mg_hello.py
calleengman/github-playground
f2095c78ffec6eab4332663583965b8f8bce89aa
[ "MIT" ]
50
2017-01-12T03:15:02.000Z
2021-08-31T20:26:03.000Z
mg_hello.py
calleengman/github-playground
f2095c78ffec6eab4332663583965b8f8bce89aa
[ "MIT" ]
20
2017-01-30T11:50:30.000Z
2021-07-23T07:49:33.000Z
mg_hello.py
calleengman/github-playground
f2095c78ffec6eab4332663583965b8f8bce89aa
[ "MIT" ]
124
2017-01-13T00:17:37.000Z
2022-03-26T19:16:11.000Z
print (hello) print (hello again)
11.333333
19
0.735294
5
34
5
0.6
0.8
0
0
0
0
0
0
0
0
0
0
0.147059
34
2
20
17
0.862069
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
6
93a6ef0feeda9e2040845c4b8ee8a8fe9583504f
169
py
Python
clisops/__init__.py
cehbrecht/clisops
7d80bcbc21ee8a6248f88bb590a1fed33a060bfd
[ "BSD-3-Clause" ]
null
null
null
clisops/__init__.py
cehbrecht/clisops
7d80bcbc21ee8a6248f88bb590a1fed33a060bfd
[ "BSD-3-Clause" ]
null
null
null
clisops/__init__.py
cehbrecht/clisops
7d80bcbc21ee8a6248f88bb590a1fed33a060bfd
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """Top-level package for clisops.""" from .__version__ import __author__ from .__version__ import __email__ from .__version__ import __version__
28.166667
36
0.757396
20
169
5.2
0.65
0.317308
0.490385
0
0
0
0
0
0
0
0
0.006757
0.12426
169
5
37
33.8
0.695946
0.313609
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
93b40c42782e2f1eba83c3a3042a052b26a31b39
26
py
Python
docker/farmapi/ansible_playbook/templates/config.py
biothings/biothings-farm
76bb0077ce3b2618bbab7ce41d2bdeceadb88a78
[ "Apache-2.0" ]
null
null
null
docker/farmapi/ansible_playbook/templates/config.py
biothings/biothings-farm
76bb0077ce3b2618bbab7ce41d2bdeceadb88a78
[ "Apache-2.0" ]
3
2019-10-23T18:26:32.000Z
2019-10-25T15:07:36.000Z
docker/farmapi/ansible_playbook/templates/config.py
biothings/biothings-farm
76bb0077ce3b2618bbab7ce41d2bdeceadb88a78
[ "Apache-2.0" ]
null
null
null
from config_web import *
8.666667
24
0.769231
4
26
4.75
1
0
0
0
0
0
0
0
0
0
0
0
0.192308
26
2
25
13
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
93d0741798b665cd637c509b97668992eef33ece
35
py
Python
batch_jobs/bucket_replicate/__init__.py
uc-cdis/aws-batch-jobs
ce765a67a22f1646d849bd674dc0bce5e9cfcb8b
[ "Apache-2.0" ]
null
null
null
batch_jobs/bucket_replicate/__init__.py
uc-cdis/aws-batch-jobs
ce765a67a22f1646d849bd674dc0bce5e9cfcb8b
[ "Apache-2.0" ]
1
2020-05-12T16:56:43.000Z
2020-05-12T16:56:43.000Z
batch_jobs/bucket_replicate/__init__.py
uc-cdis/aws-batch-jobs
ce765a67a22f1646d849bd674dc0bce5e9cfcb8b
[ "Apache-2.0" ]
1
2021-02-19T17:05:49.000Z
2021-02-19T17:05:49.000Z
from . import bucket_replicate_job
17.5
34
0.857143
5
35
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.114286
35
1
35
35
0.903226
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f511e5a9c2f23d4142a6ced3ff2b9d75e602b2b5
296
py
Python
src/symbol_table/__init__.py
AAU-PSix/canary
93b07d23cd9380adc03a6aa1291a13eaa3b3008c
[ "MIT" ]
null
null
null
src/symbol_table/__init__.py
AAU-PSix/canary
93b07d23cd9380adc03a6aa1291a13eaa3b3008c
[ "MIT" ]
null
null
null
src/symbol_table/__init__.py
AAU-PSix/canary
93b07d23cd9380adc03a6aa1291a13eaa3b3008c
[ "MIT" ]
null
null
null
from .types import * from .c_types import * from .declaration import * from .lexical_declaration import * from .lexical_symbol_table import * from .lexical_declaration import * from .node import * from .symbol_table_filler import * from .lexical_symbol_table_builder import * from .tree import *
26.909091
43
0.797297
40
296
5.65
0.3
0.39823
0.300885
0.247788
0.539823
0.336283
0
0
0
0
0
0
0.135135
296
10
44
29.6
0.882813
0
0
0.2
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f53d4e4d6998f34db73ad6c94211157a827b0da4
82
py
Python
simuvex/simuvex/storage/file.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
86
2015-08-06T23:25:07.000Z
2022-02-17T14:58:22.000Z
simuvex/simuvex/storage/file.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
132
2015-09-10T19:06:59.000Z
2018-10-04T20:36:45.000Z
simuvex/simuvex/storage/file.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
80
2015-08-07T10:30:20.000Z
2020-03-21T14:45:28.000Z
print '... Importing simuvex/storage/file.py ...' from angr.storage.file import *
27.333333
49
0.719512
11
82
5.363636
0.818182
0.372881
0
0
0
0
0
0
0
0
0
0
0.109756
82
2
50
41
0.808219
0
0
0
0
0
0.5
0.280488
0
0
0
0
0
0
null
null
0
1
null
null
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
1
0
6
f584452f98c757df73146db181504992709e6cbe
113
py
Python
platypush/message/response/camera/__init__.py
RichardChiang/platypush
1777ebb0516118cdef20046a92caab496fa7c6cb
[ "MIT" ]
228
2018-01-30T11:17:09.000Z
2022-03-24T11:22:26.000Z
platypush/message/response/camera/__init__.py
RichardChiang/platypush
1777ebb0516118cdef20046a92caab496fa7c6cb
[ "MIT" ]
167
2017-12-11T19:35:38.000Z
2022-03-27T14:45:30.000Z
platypush/message/response/camera/__init__.py
BlackLight/runbullet
8d26c8634d2677b4402f0a21b9ab8244b44640db
[ "MIT" ]
16
2018-05-03T07:31:56.000Z
2021-12-05T19:27:37.000Z
from platypush.message.response import Response class CameraResponse(Response): pass # vim:sw=4:ts=4:et:
12.555556
47
0.743363
16
113
5.25
0.8125
0
0
0
0
0
0
0
0
0
0
0.020833
0.150442
113
8
48
14.125
0.854167
0.150442
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
f58fcc363adb67fee9df5554f7dee3401b7bf1ee
322
py
Python
utils/__init__.py
Walter-Feng/myModule
f8cf065d52153ef3d386d10be1771e80cf5af4e5
[ "MIT" ]
null
null
null
utils/__init__.py
Walter-Feng/myModule
f8cf065d52153ef3d386d10be1771e80cf5af4e5
[ "MIT" ]
null
null
null
utils/__init__.py
Walter-Feng/myModule
f8cf065d52153ef3d386d10be1771e80cf5af4e5
[ "MIT" ]
null
null
null
import numpy as np flatten = lambda l: [item for sublist in l for item in sublist] transpose = lambda l: list(map(list, zip(*l))) def pick_indexed_element(target_list,index): return[i[index] for i in target_list] def np_sort_by_column(target_list, index): return target_list[target_list[:, index].argsort()]
23
63
0.736025
54
322
4.203704
0.5
0.220264
0.198238
0.185022
0
0
0
0
0
0
0
0
0.15528
322
13
64
24.769231
0.834559
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.285714
0.571429
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
193ded94fd61e3778499ff0ad96b8b1a158e81b5
102
py
Python
dobby-pi/screens/manager.py
brebory/dobby-pi
ae97bab652cc571c7a6071ef6eb01f88bb6bc9df
[ "MIT" ]
null
null
null
dobby-pi/screens/manager.py
brebory/dobby-pi
ae97bab652cc571c7a6071ef6eb01f88bb6bc9df
[ "MIT" ]
null
null
null
dobby-pi/screens/manager.py
brebory/dobby-pi
ae97bab652cc571c7a6071ef6eb01f88bb6bc9df
[ "MIT" ]
null
null
null
from kivy.uix.screenmanager import ScreenManager class DobbyScreenManager(ScreenManager): pass
25.5
49
0.813725
10
102
8.3
0.8
0
0
0
0
0
0
0
0
0
0
0
0.137255
102
4
50
25.5
0.943182
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
1974508e5fa754fbeeb043c8d4543f165221c439
96
py
Python
venv/lib/python3.8/site-packages/distlib/resources.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/distlib/resources.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/distlib/resources.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/2f/06/cf/92c73403524c6e2e979ee3dd301527f375fb04fb85356a8f184288ebdf
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.4375
0
96
1
96
96
0.458333
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
5fed41a9355d2cac88a5ee271224e627658f8140
40
py
Python
GmailWrapper_JE/je_gmail/core/__init__.py
JE-Chen/je_old_repo
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
[ "MIT" ]
2
2020-12-30T06:37:10.000Z
2020-12-30T07:27:45.000Z
GmailWrapper_JE/je_gmail/core/__init__.py
JE-Chen/je_old_repo
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
[ "MIT" ]
null
null
null
GmailWrapper_JE/je_gmail/core/__init__.py
JE-Chen/je_old_repo
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
[ "MIT" ]
null
null
null
from je_gmail.core.gmail_core import *
20
39
0.8
7
40
4.285714
0.714286
0.6
0
0
0
0
0
0
0
0
0
0
0.125
40
1
40
40
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
270a1974827c141bf19510c84d423333224f9baa
132
py
Python
samples/led-trigger.py
sen-den/stem-python-ev3dev2
2e93f943ab7b86250823057385a6ea759e47a743
[ "MIT" ]
1
2021-05-16T14:43:43.000Z
2021-05-16T14:43:43.000Z
samples/led-trigger.py
sen-den/stem-python-ev3dev2
2e93f943ab7b86250823057385a6ea759e47a743
[ "MIT" ]
null
null
null
samples/led-trigger.py
sen-den/stem-python-ev3dev2
2e93f943ab7b86250823057385a6ea759e47a743
[ "MIT" ]
1
2019-11-30T10:32:51.000Z
2019-11-30T10:32:51.000Z
from ev3dev.ev3 import * Leds.set(Leds.LEFT, brightness_pct=0.5, trigger='timer') Leds.set(Leds.LEFT, delay_on=3000, delay_off=500)
33
56
0.765152
24
132
4.083333
0.75
0.142857
0.22449
0.306122
0
0
0
0
0
0
0
0.090164
0.075758
132
3
57
44
0.713115
0
0
0
0
0
0.037879
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
272c562e37fb3326681ae9890fb273154231fd09
62
py
Python
app/bz150_script.py
estellespanneut/example-open-source-repo-2021
413735c3aac710999229970736553c393b23a49f
[ "MIT" ]
null
null
null
app/bz150_script.py
estellespanneut/example-open-source-repo-2021
413735c3aac710999229970736553c393b23a49f
[ "MIT" ]
1
2021-06-15T23:05:23.000Z
2021-06-15T23:05:23.000Z
app/bz150_script.py
estellespanneut/example-open-source-repo-2021
413735c3aac710999229970736553c393b23a49f
[ "MIT" ]
78
2021-03-15T21:54:31.000Z
2021-07-28T05:41:32.000Z
print("hello") print("hello again") print("hello again again")
20.666667
26
0.725806
9
62
5
0.333333
0.666667
0.666667
0
0
0
0
0
0
0
0
0
0.080645
62
3
26
20.666667
0.789474
0
0
0
0
0
0.52381
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
2736601d66cacf0bb464687e8faa3d901e0c415d
75
py
Python
CodeWars/7 Kyu/Area of an annulus.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
CodeWars/7 Kyu/Area of an annulus.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
CodeWars/7 Kyu/Area of an annulus.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
from math import pi def annulus_area(r): return round(r*r / 4 * pi, 2)
18.75
33
0.653333
15
75
3.2
0.8
0
0
0
0
0
0
0
0
0
0
0.034483
0.226667
75
4
33
18.75
0.793103
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6